repo_name
stringclasses 6
values | pr_number
int64 512
78.9k
| pr_title
stringlengths 3
144
| pr_description
stringlengths 0
30.3k
| author
stringlengths 2
21
| date_created
timestamp[ns, tz=UTC] | date_merged
timestamp[ns, tz=UTC] | previous_commit
stringlengths 40
40
| pr_commit
stringlengths 40
40
| query
stringlengths 17
30.4k
| filepath
stringlengths 9
210
| before_content
stringlengths 0
112M
| after_content
stringlengths 0
112M
| label
int64 -1
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dotnet/runtime
| 66,411 |
Arm64: Always use SIMD features
|
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
kunalspathak
| 2022-03-09T21:00:00Z | 2022-03-12T04:35:06Z |
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
|
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
|
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
./src/coreclr/tools/superpmi/superpmi-shim-collector/superpmi-shim-collector.cpp
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//----------------------------------------------------------
// SuperPMI-Shim-Collector.cpp - Shim that collects and yields .mc (method context) files.
//----------------------------------------------------------
#include "standardpch.h"
#include "icorjitcompiler.h"
#include "runtimedetails.h"
#include "errorhandling.h"
#include "logging.h"
#include "spmiutil.h"
#include "jithost.h"
// Assumptions:
// -We'll never be unloaded - we leak memory and have no facility to unload libraries
// -printf output to console is okay
HMODULE g_hRealJit = 0; // We leak this currently (could do the proper shutdown in process_detach)
WCHAR* g_realJitPath = nullptr; // We leak this (could do the proper shutdown in process_detach)
WCHAR* g_logPath = nullptr; // Again, we leak this one too...
WCHAR* g_dataFileName = nullptr; // We leak this
char* g_logFilePath = nullptr; // We *don't* leak this, hooray!
WCHAR* g_HomeDirectory = nullptr;
WCHAR* g_DefaultRealJitPath = nullptr;
MethodContext* g_globalContext = nullptr;
bool g_initialized = false;
void SetDefaultPaths()
{
if (g_HomeDirectory == nullptr)
{
g_HomeDirectory = GetEnvironmentVariableWithDefaultW(W("HOME"), W("."));
}
if (g_DefaultRealJitPath == nullptr)
{
size_t len = wcslen(g_HomeDirectory) + 1 + wcslen(DEFAULT_REAL_JIT_NAME_W) + 1;
g_DefaultRealJitPath = new WCHAR[len];
wcscpy_s(g_DefaultRealJitPath, len, g_HomeDirectory);
wcscat_s(g_DefaultRealJitPath, len, DIRECTORY_SEPARATOR_STR_W);
wcscat_s(g_DefaultRealJitPath, len, DEFAULT_REAL_JIT_NAME_W);
}
}
void SetLibName()
{
if (g_realJitPath == nullptr)
{
g_realJitPath = GetEnvironmentVariableWithDefaultW(W("SuperPMIShimPath"), g_DefaultRealJitPath);
}
}
void SetLogPath()
{
if (g_logPath == nullptr)
{
g_logPath = GetEnvironmentVariableWithDefaultW(W("SuperPMIShimLogPath"), g_HomeDirectory);
}
}
void SetLogPathName()
{
// NOTE: under PAL, we don't get the command line, so we depend on the random number generator to give us a unique
// filename
const WCHAR* fileName = GetCommandLineW();
const WCHAR* extension = W(".mc");
g_dataFileName = GetResultFileName(g_logPath, fileName, extension);
}
// TODO: this only works for ANSI file paths...
void SetLogFilePath()
{
if (g_logFilePath == nullptr)
{
// If the environment variable isn't set, we don't enable file logging
g_logFilePath = GetEnvironmentVariableWithDefaultA("SuperPMIShimLogFilePath", nullptr);
}
}
void InitializeShim()
{
if (g_initialized)
{
return;
}
#ifdef HOST_UNIX
if (0 != PAL_InitializeDLL())
{
fprintf(stderr, "Error: Fail to PAL_InitializeDLL\n");
exit(1);
}
#endif // HOST_UNIX
Logger::Initialize();
SetLogFilePath();
Logger::OpenLogFile(g_logFilePath);
g_initialized = true;
}
extern "C"
#ifdef HOST_UNIX
DLLEXPORT // For Win32 PAL LoadLibrary emulation
#endif
BOOL
DllMain(HMODULE hModule, DWORD ul_reason_for_call, LPVOID lpReserved)
{
switch (ul_reason_for_call)
{
case DLL_PROCESS_ATTACH:
InitializeShim();
break;
case DLL_PROCESS_DETACH:
Logger::Shutdown();
delete[] g_logFilePath;
g_logFilePath = nullptr;
break;
case DLL_THREAD_ATTACH:
case DLL_THREAD_DETACH:
break;
}
return TRUE;
}
extern "C" DLLEXPORT void jitStartup(ICorJitHost* host)
{
// crossgen2 doesn't invoke DllMain on Linux/Mac (under PAL), so optionally do initialization work here.
InitializeShim();
SetDefaultPaths();
SetLibName();
SetDebugDumpVariables();
if (!LoadRealJitLib(g_hRealJit, g_realJitPath))
{
return;
}
// Get the required entrypoint
PjitStartup pnjitStartup = (PjitStartup)::GetProcAddress(g_hRealJit, "jitStartup");
if (pnjitStartup == nullptr)
{
// This portion of the interface is not used by the JIT under test.
return;
}
g_globalContext = new MethodContext();
g_ourJitHost = new JitHost(host, g_globalContext);
pnjitStartup(g_ourJitHost);
}
extern "C" DLLEXPORT ICorJitCompiler* getJit()
{
DWORD dwRetVal = 0;
PgetJit pngetJit;
interceptor_ICJC* pJitInstance = nullptr;
ICorJitCompiler* tICJI = nullptr;
SetDefaultPaths();
SetLibName();
SetLogPath();
SetLogPathName();
SetDebugDumpVariables();
if (!LoadRealJitLib(g_hRealJit, g_realJitPath))
{
return nullptr;
}
// get the required entrypoints
pngetJit = (PgetJit)::GetProcAddress(g_hRealJit, "getJit");
if (pngetJit == 0)
{
LogError("getJit() - GetProcAddress 'getJit' failed (0x%08x)", ::GetLastError());
return nullptr;
}
tICJI = pngetJit();
if (tICJI == nullptr)
{
LogError("getJit() - pngetJit gave us null");
return nullptr;
}
pJitInstance = new interceptor_ICJC();
pJitInstance->original_ICorJitCompiler = tICJI;
#ifdef TARGET_WINDOWS
pJitInstance->currentOs = CORINFO_WINNT;
#elif defined(TARGET_OSX)
pJitInstance->currentOs = CORINFO_MACOS;
#elif defined(TARGET_UNIX)
pJitInstance->currentOs = CORINFO_UNIX;
#else
#error No target os defined
#endif
// create our datafile
pJitInstance->hFile = CreateFileW(g_dataFileName, GENERIC_READ | GENERIC_WRITE, 0, NULL, CREATE_ALWAYS,
FILE_ATTRIBUTE_NORMAL | FILE_FLAG_SEQUENTIAL_SCAN, NULL);
if (pJitInstance->hFile == INVALID_HANDLE_VALUE)
{
LogError("Couldn't open file '%ws': error %d", g_dataFileName, GetLastError());
}
return pJitInstance;
}
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//----------------------------------------------------------
// SuperPMI-Shim-Collector.cpp - Shim that collects and yields .mc (method context) files.
//----------------------------------------------------------
#include "standardpch.h"
#include "icorjitcompiler.h"
#include "runtimedetails.h"
#include "errorhandling.h"
#include "logging.h"
#include "spmiutil.h"
#include "jithost.h"
// Assumptions:
// -We'll never be unloaded - we leak memory and have no facility to unload libraries
// -printf output to console is okay
HMODULE g_hRealJit = 0; // We leak this currently (could do the proper shutdown in process_detach)
WCHAR* g_realJitPath = nullptr; // We leak this (could do the proper shutdown in process_detach)
WCHAR* g_logPath = nullptr; // Again, we leak this one too...
WCHAR* g_dataFileName = nullptr; // We leak this
char* g_logFilePath = nullptr; // We *don't* leak this, hooray!
WCHAR* g_HomeDirectory = nullptr;
WCHAR* g_DefaultRealJitPath = nullptr;
MethodContext* g_globalContext = nullptr;
bool g_initialized = false;
void SetDefaultPaths()
{
if (g_HomeDirectory == nullptr)
{
g_HomeDirectory = GetEnvironmentVariableWithDefaultW(W("HOME"), W("."));
}
if (g_DefaultRealJitPath == nullptr)
{
size_t len = wcslen(g_HomeDirectory) + 1 + wcslen(DEFAULT_REAL_JIT_NAME_W) + 1;
g_DefaultRealJitPath = new WCHAR[len];
wcscpy_s(g_DefaultRealJitPath, len, g_HomeDirectory);
wcscat_s(g_DefaultRealJitPath, len, DIRECTORY_SEPARATOR_STR_W);
wcscat_s(g_DefaultRealJitPath, len, DEFAULT_REAL_JIT_NAME_W);
}
}
void SetLibName()
{
if (g_realJitPath == nullptr)
{
g_realJitPath = GetEnvironmentVariableWithDefaultW(W("SuperPMIShimPath"), g_DefaultRealJitPath);
}
}
void SetLogPath()
{
if (g_logPath == nullptr)
{
g_logPath = GetEnvironmentVariableWithDefaultW(W("SuperPMIShimLogPath"), g_HomeDirectory);
}
}
void SetLogPathName()
{
// NOTE: under PAL, we don't get the command line, so we depend on the random number generator to give us a unique
// filename
const WCHAR* fileName = GetCommandLineW();
const WCHAR* extension = W(".mc");
g_dataFileName = GetResultFileName(g_logPath, fileName, extension);
}
// TODO: this only works for ANSI file paths...
void SetLogFilePath()
{
if (g_logFilePath == nullptr)
{
// If the environment variable isn't set, we don't enable file logging
g_logFilePath = GetEnvironmentVariableWithDefaultA("SuperPMIShimLogFilePath", nullptr);
}
}
void InitializeShim()
{
if (g_initialized)
{
return;
}
#ifdef HOST_UNIX
if (0 != PAL_InitializeDLL())
{
fprintf(stderr, "Error: Fail to PAL_InitializeDLL\n");
exit(1);
}
#endif // HOST_UNIX
Logger::Initialize();
SetLogFilePath();
Logger::OpenLogFile(g_logFilePath);
g_initialized = true;
}
extern "C"
#ifdef HOST_UNIX
DLLEXPORT // For Win32 PAL LoadLibrary emulation
#endif
BOOL
DllMain(HMODULE hModule, DWORD ul_reason_for_call, LPVOID lpReserved)
{
switch (ul_reason_for_call)
{
case DLL_PROCESS_ATTACH:
InitializeShim();
break;
case DLL_PROCESS_DETACH:
Logger::Shutdown();
delete[] g_logFilePath;
g_logFilePath = nullptr;
break;
case DLL_THREAD_ATTACH:
case DLL_THREAD_DETACH:
break;
}
return TRUE;
}
extern "C" DLLEXPORT void jitStartup(ICorJitHost* host)
{
// crossgen2 doesn't invoke DllMain on Linux/Mac (under PAL), so optionally do initialization work here.
InitializeShim();
SetDefaultPaths();
SetLibName();
SetDebugDumpVariables();
if (!LoadRealJitLib(g_hRealJit, g_realJitPath))
{
return;
}
// Get the required entrypoint
PjitStartup pnjitStartup = (PjitStartup)::GetProcAddress(g_hRealJit, "jitStartup");
if (pnjitStartup == nullptr)
{
// This portion of the interface is not used by the JIT under test.
return;
}
g_globalContext = new MethodContext();
g_ourJitHost = new JitHost(host, g_globalContext);
pnjitStartup(g_ourJitHost);
}
extern "C" DLLEXPORT ICorJitCompiler* getJit()
{
DWORD dwRetVal = 0;
PgetJit pngetJit;
interceptor_ICJC* pJitInstance = nullptr;
ICorJitCompiler* tICJI = nullptr;
SetDefaultPaths();
SetLibName();
SetLogPath();
SetLogPathName();
SetDebugDumpVariables();
if (!LoadRealJitLib(g_hRealJit, g_realJitPath))
{
return nullptr;
}
// get the required entrypoints
pngetJit = (PgetJit)::GetProcAddress(g_hRealJit, "getJit");
if (pngetJit == 0)
{
LogError("getJit() - GetProcAddress 'getJit' failed (0x%08x)", ::GetLastError());
return nullptr;
}
tICJI = pngetJit();
if (tICJI == nullptr)
{
LogError("getJit() - pngetJit gave us null");
return nullptr;
}
pJitInstance = new interceptor_ICJC();
pJitInstance->original_ICorJitCompiler = tICJI;
#ifdef TARGET_WINDOWS
pJitInstance->currentOs = CORINFO_WINNT;
#elif defined(TARGET_OSX)
pJitInstance->currentOs = CORINFO_MACOS;
#elif defined(TARGET_UNIX)
pJitInstance->currentOs = CORINFO_UNIX;
#else
#error No target os defined
#endif
// create our datafile
pJitInstance->hFile = CreateFileW(g_dataFileName, GENERIC_READ | GENERIC_WRITE, 0, NULL, CREATE_ALWAYS,
FILE_ATTRIBUTE_NORMAL | FILE_FLAG_SEQUENTIAL_SCAN, NULL);
if (pJitInstance->hFile == INVALID_HANDLE_VALUE)
{
LogError("Couldn't open file '%ws': error %d", g_dataFileName, GetLastError());
}
return pJitInstance;
}
| -1 |
dotnet/runtime
| 66,411 |
Arm64: Always use SIMD features
|
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
kunalspathak
| 2022-03-09T21:00:00Z | 2022-03-12T04:35:06Z |
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
|
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
|
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
./src/coreclr/inc/stacktrace.h
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//-----------------------------------------------------------------------------
//-----------------------------------------------------------------------------
#ifndef __STACK_TRACE_H__
#define __STACK_TRACE_H__
HINSTANCE LoadImageHlp();
HINSTANCE LoadDbgHelp();
#include <specstrings.h>
//
//--- Constants ---------------------------------------------------------------
//
#define cchMaxAssertModuleLen 60
#define cchMaxAssertSymbolLen 257
#define cfrMaxAssertStackLevels 20
#define cchMaxAssertExprLen 257
#ifdef HOST_64BIT
#define cchMaxAssertStackLevelStringLen \
((3 * 8) + cchMaxAssertModuleLen + cchMaxAssertSymbolLen + 13)
// 3 addresses of at most 8 char, module, symbol, and the extra chars:
// 0x<address>: <module>! <symbol> + 0x<offset>\n
//FMT_ADDR_BARE is defined as "%08x`%08x" on Win64, and as
//"%08x" on 32 bit platforms. Hence the difference in the definitions.
#else
#define cchMaxAssertStackLevelStringLen \
((2 * 8) + cchMaxAssertModuleLen + cchMaxAssertSymbolLen + 12)
// 2 addresses of at most 8 char, module, symbol, and the extra chars:
// 0x<address>: <module>! <symbol> + 0x<offset>\n
#endif
//
//--- Prototypes --------------------------------------------------------------
//
/****************************************************************************
* MagicDeinit *
*-------------*
* Description:
* Cleans up for the symbol loading code. Should be called before
* exiting in order to free the dynamically loaded imagehlp.dll
******************************************************************** robch */
void MagicDeinit(void);
/****************************************************************************
* GetStringFromStackLevels *
*--------------------------*
* Description:
* Retrieves a string from the stack frame. If more than one frame, they
* are separated by newlines. Each fram appears in this format:
*
* 0x<address>: <module>! <symbol> + 0x<offset>
******************************************************************** robch */
void GetStringFromStackLevels(UINT ifrStart, UINT cfrTotal, _Out_writes_(cchMaxAssertStackLevelStringLen * cfrTotal) CHAR *pszString, struct _CONTEXT * pContext = NULL);
/****************************************************************************
* GetStringFromAddr *
*-------------------*
* Description:
* Builds a string from an address in the format:
*
* 0x<address>: <module>! <symbol> + 0x<offset>
******************************************************************** robch */
void GetStringFromAddr(DWORD_PTR dwAddr, _Out_writes_(cchMaxAssertStackLevelStringLen) LPSTR szString);
#if defined(HOST_X86) && !defined(TARGET_UNIX)
/****************************************************************************
* ClrCaptureContext *
*-------------------*
* Description:
* Exactly the contents of RtlCaptureContext for Win7 - Win2K doesn't
* support this, so we need it for CoreCLR 4, if we require Win2K support
****************************************************************************/
extern "C" void __stdcall ClrCaptureContext(_Out_ PCONTEXT ctx);
#else // HOST_X86 && !TARGET_UNIX
#define ClrCaptureContext RtlCaptureContext
#endif // HOST_X86 && !TARGET_UNIX
#endif
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//-----------------------------------------------------------------------------
//-----------------------------------------------------------------------------
#ifndef __STACK_TRACE_H__
#define __STACK_TRACE_H__
HINSTANCE LoadImageHlp();
HINSTANCE LoadDbgHelp();
#include <specstrings.h>
//
//--- Constants ---------------------------------------------------------------
//
#define cchMaxAssertModuleLen 60
#define cchMaxAssertSymbolLen 257
#define cfrMaxAssertStackLevels 20
#define cchMaxAssertExprLen 257
#ifdef HOST_64BIT
#define cchMaxAssertStackLevelStringLen \
((3 * 8) + cchMaxAssertModuleLen + cchMaxAssertSymbolLen + 13)
// 3 addresses of at most 8 char, module, symbol, and the extra chars:
// 0x<address>: <module>! <symbol> + 0x<offset>\n
//FMT_ADDR_BARE is defined as "%08x`%08x" on Win64, and as
//"%08x" on 32 bit platforms. Hence the difference in the definitions.
#else
#define cchMaxAssertStackLevelStringLen \
((2 * 8) + cchMaxAssertModuleLen + cchMaxAssertSymbolLen + 12)
// 2 addresses of at most 8 char, module, symbol, and the extra chars:
// 0x<address>: <module>! <symbol> + 0x<offset>\n
#endif
//
//--- Prototypes --------------------------------------------------------------
//
/****************************************************************************
* MagicDeinit *
*-------------*
* Description:
* Cleans up for the symbol loading code. Should be called before
* exiting in order to free the dynamically loaded imagehlp.dll
******************************************************************** robch */
void MagicDeinit(void);
/****************************************************************************
* GetStringFromStackLevels *
*--------------------------*
* Description:
* Retrieves a string from the stack frame. If more than one frame, they
* are separated by newlines. Each fram appears in this format:
*
* 0x<address>: <module>! <symbol> + 0x<offset>
******************************************************************** robch */
void GetStringFromStackLevels(UINT ifrStart, UINT cfrTotal, _Out_writes_(cchMaxAssertStackLevelStringLen * cfrTotal) CHAR *pszString, struct _CONTEXT * pContext = NULL);
/****************************************************************************
* GetStringFromAddr *
*-------------------*
* Description:
* Builds a string from an address in the format:
*
* 0x<address>: <module>! <symbol> + 0x<offset>
******************************************************************** robch */
void GetStringFromAddr(DWORD_PTR dwAddr, _Out_writes_(cchMaxAssertStackLevelStringLen) LPSTR szString);
#if defined(HOST_X86) && !defined(TARGET_UNIX)
/****************************************************************************
* ClrCaptureContext *
*-------------------*
* Description:
* Exactly the contents of RtlCaptureContext for Win7 - Win2K doesn't
* support this, so we need it for CoreCLR 4, if we require Win2K support
****************************************************************************/
extern "C" void __stdcall ClrCaptureContext(_Out_ PCONTEXT ctx);
#else // HOST_X86 && !TARGET_UNIX
#define ClrCaptureContext RtlCaptureContext
#endif // HOST_X86 && !TARGET_UNIX
#endif
| -1 |
dotnet/runtime
| 66,411 |
Arm64: Always use SIMD features
|
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
kunalspathak
| 2022-03-09T21:00:00Z | 2022-03-12T04:35:06Z |
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
|
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
|
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
./src/coreclr/tools/superpmi/superpmi-shared/mclist.h
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//----------------------------------------------------------
// MCList.h - MethodContext List utility class
//----------------------------------------------------------
#ifndef _MCList
#define _MCList
#define MAXMCLFILESIZE 0xFFFFFF
class MCList
{
public:
static bool processArgAsMCL(char* input, int* count, int** list);
MCList()
{
// Initialize the static file handle
hMCLFile = INVALID_HANDLE_VALUE;
}
// Methods to create an MCL file
void InitializeMCL(char* filename);
void AddMethodToMCL(int methodIndex);
void CloseMCL();
private:
static bool getLineData(const char* nameOfInput, /* OUT */ int* pIndexCount, /* OUT */ int** pIndexes);
// File handle for MCL file
HANDLE hMCLFile;
};
#endif
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//----------------------------------------------------------
// MCList.h - MethodContext List utility class
//----------------------------------------------------------
#ifndef _MCList
#define _MCList
#define MAXMCLFILESIZE 0xFFFFFF
class MCList
{
public:
static bool processArgAsMCL(char* input, int* count, int** list);
MCList()
{
// Initialize the static file handle
hMCLFile = INVALID_HANDLE_VALUE;
}
// Methods to create an MCL file
void InitializeMCL(char* filename);
void AddMethodToMCL(int methodIndex);
void CloseMCL();
private:
static bool getLineData(const char* nameOfInput, /* OUT */ int* pIndexCount, /* OUT */ int** pIndexes);
// File handle for MCL file
HANDLE hMCLFile;
};
#endif
| -1 |
dotnet/runtime
| 66,411 |
Arm64: Always use SIMD features
|
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
kunalspathak
| 2022-03-09T21:00:00Z | 2022-03-12T04:35:06Z |
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
|
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
|
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
./src/coreclr/pal/tests/palsuite/miscellaneous/IsBadWritePtr/test3/test3.cpp
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================
**
** Source: test3.c
**
** Purpose:
** Check that IsBadWritePtr returns non-zero on Read-only memory.
**
**
**=========================================================*/
#include <palsuite.h>
PALTEST(miscellaneous_IsBadWritePtr_test3_paltest_isbadwriteptr_test3, "miscellaneous/IsBadWritePtr/test3/paltest_isbadwriteptr_test3")
{
LPVOID PageOne;
if(0 != (PAL_Initialize(argc, argv)))
{
return FAIL;
}
/* Reserve enough space for four pages. We'll commit this memory
and set the correct access for each page below.
*/
PageOne = VirtualAlloc(NULL,
GetOsPageSize(),
MEM_COMMIT,
PAGE_READONLY);
if(PageOne == NULL)
{
Fail("ERROR: VirtualAlloc failed to commit the required memory.\n");
}
if(IsBadWritePtr(PageOne,GetOsPageSize()) == 0)
{
VirtualFree(PageOne,0,MEM_RELEASE);
Fail("ERROR: IsBadWritePtr returned 0 when checking a section of "
"read-only memory. It should be non-zero.\n");
}
VirtualFree(PageOne,0,MEM_RELEASE);
PAL_Terminate();
return PASS;
}
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================
**
** Source: test3.c
**
** Purpose:
** Check that IsBadWritePtr returns non-zero on Read-only memory.
**
**
**=========================================================*/
#include <palsuite.h>
PALTEST(miscellaneous_IsBadWritePtr_test3_paltest_isbadwriteptr_test3, "miscellaneous/IsBadWritePtr/test3/paltest_isbadwriteptr_test3")
{
LPVOID PageOne;
if(0 != (PAL_Initialize(argc, argv)))
{
return FAIL;
}
/* Reserve enough space for four pages. We'll commit this memory
and set the correct access for each page below.
*/
PageOne = VirtualAlloc(NULL,
GetOsPageSize(),
MEM_COMMIT,
PAGE_READONLY);
if(PageOne == NULL)
{
Fail("ERROR: VirtualAlloc failed to commit the required memory.\n");
}
if(IsBadWritePtr(PageOne,GetOsPageSize()) == 0)
{
VirtualFree(PageOne,0,MEM_RELEASE);
Fail("ERROR: IsBadWritePtr returned 0 when checking a section of "
"read-only memory. It should be non-zero.\n");
}
VirtualFree(PageOne,0,MEM_RELEASE);
PAL_Terminate();
return PASS;
}
| -1 |
dotnet/runtime
| 66,411 |
Arm64: Always use SIMD features
|
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
kunalspathak
| 2022-03-09T21:00:00Z | 2022-03-12T04:35:06Z |
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
|
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
|
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
./src/native/libs/System.Globalization.Native/pal_localeStringData.h
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
#pragma once
#include "pal_locale.h"
#include "pal_compiler.h"
// Enum that corresponds to managed enum CultureData.LocaleStringData.
// The numeric values of the enum members match their Win32 counterparts.
typedef enum
{
LocaleString_LocalizedDisplayName = 0x02,
LocaleString_EnglishDisplayName = 0x00000072,
LocaleString_NativeDisplayName = 0x00000073,
LocaleString_LocalizedLanguageName = 0x0000006f,
LocaleString_EnglishLanguageName = 0x00001001,
LocaleString_NativeLanguageName = 0x04,
LocaleString_EnglishCountryName = 0x00001002,
LocaleString_NativeCountryName = 0x08,
LocaleString_DecimalSeparator = 0x0E,
LocaleString_ThousandSeparator = 0x0F,
LocaleString_Digits = 0x00000013,
LocaleString_MonetarySymbol = 0x00000014,
LocaleString_CurrencyEnglishName = 0x00001007,
LocaleString_CurrencyNativeName = 0x00001008,
LocaleString_Iso4217MonetarySymbol = 0x00000015,
LocaleString_MonetaryDecimalSeparator = 0x00000016,
LocaleString_MonetaryThousandSeparator = 0x00000017,
LocaleString_AMDesignator = 0x00000028,
LocaleString_PMDesignator = 0x00000029,
LocaleString_PositiveSign = 0x00000050,
LocaleString_NegativeSign = 0x00000051,
LocaleString_Iso639LanguageTwoLetterName = 0x00000059,
LocaleString_Iso639LanguageThreeLetterName = 0x00000067,
LocaleString_Iso3166CountryName = 0x0000005A,
LocaleString_Iso3166CountryName2= 0x00000068,
LocaleString_NaNSymbol = 0x00000069,
LocaleString_PositiveInfinitySymbol = 0x0000006a,
LocaleString_ParentName = 0x0000006d,
LocaleString_PercentSymbol = 0x00000076,
LocaleString_PerMilleSymbol = 0x00000077
} LocaleStringData;
PALEXPORT int32_t GlobalizationNative_GetLocaleInfoString(const UChar* localeName,
LocaleStringData localeStringData,
UChar* value,
int32_t valueLength,
const UChar* uiLocaleName);
PALEXPORT int32_t GlobalizationNative_GetLocaleTimeFormat(const UChar* localeName,
int shortFormat, UChar* value,
int32_t valueLength);
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
#pragma once
#include "pal_locale.h"
#include "pal_compiler.h"
// Enum that corresponds to managed enum CultureData.LocaleStringData.
// The numeric values of the enum members match their Win32 counterparts.
typedef enum
{
LocaleString_LocalizedDisplayName = 0x02,
LocaleString_EnglishDisplayName = 0x00000072,
LocaleString_NativeDisplayName = 0x00000073,
LocaleString_LocalizedLanguageName = 0x0000006f,
LocaleString_EnglishLanguageName = 0x00001001,
LocaleString_NativeLanguageName = 0x04,
LocaleString_EnglishCountryName = 0x00001002,
LocaleString_NativeCountryName = 0x08,
LocaleString_DecimalSeparator = 0x0E,
LocaleString_ThousandSeparator = 0x0F,
LocaleString_Digits = 0x00000013,
LocaleString_MonetarySymbol = 0x00000014,
LocaleString_CurrencyEnglishName = 0x00001007,
LocaleString_CurrencyNativeName = 0x00001008,
LocaleString_Iso4217MonetarySymbol = 0x00000015,
LocaleString_MonetaryDecimalSeparator = 0x00000016,
LocaleString_MonetaryThousandSeparator = 0x00000017,
LocaleString_AMDesignator = 0x00000028,
LocaleString_PMDesignator = 0x00000029,
LocaleString_PositiveSign = 0x00000050,
LocaleString_NegativeSign = 0x00000051,
LocaleString_Iso639LanguageTwoLetterName = 0x00000059,
LocaleString_Iso639LanguageThreeLetterName = 0x00000067,
LocaleString_Iso3166CountryName = 0x0000005A,
LocaleString_Iso3166CountryName2= 0x00000068,
LocaleString_NaNSymbol = 0x00000069,
LocaleString_PositiveInfinitySymbol = 0x0000006a,
LocaleString_ParentName = 0x0000006d,
LocaleString_PercentSymbol = 0x00000076,
LocaleString_PerMilleSymbol = 0x00000077
} LocaleStringData;
PALEXPORT int32_t GlobalizationNative_GetLocaleInfoString(const UChar* localeName,
LocaleStringData localeStringData,
UChar* value,
int32_t valueLength,
const UChar* uiLocaleName);
PALEXPORT int32_t GlobalizationNative_GetLocaleTimeFormat(const UChar* localeName,
int shortFormat, UChar* value,
int32_t valueLength);
| -1 |
dotnet/runtime
| 66,411 |
Arm64: Always use SIMD features
|
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
kunalspathak
| 2022-03-09T21:00:00Z | 2022-03-12T04:35:06Z |
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
|
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
|
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
./src/coreclr/pal/src/safecrt/strlen_s.cpp
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/***
*strlen_s.c - contains strnlen() routine
*
*
*Purpose:
* strnlen returns the length of a null-terminated string,
* not including the null byte itself, up to the specified max size
*
*******************************************************************************/
#include <string.h>
#include <errno.h>
#include <limits.h>
#include "internal_securecrt.h"
#include "mbusafecrt_internal.h"
/***
*strnlen - return the length of a null-terminated string
*
*Purpose:
* Finds the length in bytes of the given string, not including
* the final null character. Only the first maxsize characters
* are inspected: if the null character is not found, maxsize is
* returned.
*
*Entry:
* const char * str - string whose length is to be computed
* size_t maxsize
*
*Exit:
* Length of the string "str", exclusive of the final null byte, or
* maxsize if the null character is not found.
*
*Exceptions:
*
*******************************************************************************/
size_t __cdecl PAL_strnlen(const char *str, size_t maxsize)
{
size_t n;
/* Note that we do not check if str == NULL, because we do not
* return errno_t...
*/
for (n = 0; n < maxsize && *str; n++, str++)
;
return n;
}
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/***
*strlen_s.c - contains strnlen() routine
*
*
*Purpose:
* strnlen returns the length of a null-terminated string,
* not including the null byte itself, up to the specified max size
*
*******************************************************************************/
#include <string.h>
#include <errno.h>
#include <limits.h>
#include "internal_securecrt.h"
#include "mbusafecrt_internal.h"
/***
*strnlen - return the length of a null-terminated string
*
*Purpose:
* Finds the length in bytes of the given string, not including
* the final null character. Only the first maxsize characters
* are inspected: if the null character is not found, maxsize is
* returned.
*
*Entry:
* const char * str - string whose length is to be computed
* size_t maxsize
*
*Exit:
* Length of the string "str", exclusive of the final null byte, or
* maxsize if the null character is not found.
*
*Exceptions:
*
*******************************************************************************/
size_t __cdecl PAL_strnlen(const char *str, size_t maxsize)
{
size_t n;
/* Note that we do not check if str == NULL, because we do not
* return errno_t...
*/
for (n = 0; n < maxsize && *str; n++, str++)
;
return n;
}
| -1 |
dotnet/runtime
| 66,411 |
Arm64: Always use SIMD features
|
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
kunalspathak
| 2022-03-09T21:00:00Z | 2022-03-12T04:35:06Z |
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
|
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
|
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
./src/coreclr/nativeaot/Runtime/GCHelpers.cpp
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// Unmanaged helpers exposed by the System.GC managed class.
//
#include "common.h"
#include "gcenv.h"
#include "gcenv.ee.h"
#include "gcheaputilities.h"
#include "RestrictedCallouts.h"
#include "gcrhinterface.h"
#include "PalRedhawkCommon.h"
#include "slist.h"
#include "varint.h"
#include "regdisplay.h"
#include "StackFrameIterator.h"
#include "thread.h"
#include "RWLock.h"
#include "threadstore.h"
#include "threadstore.inl"
#include "thread.inl"
EXTERN_C REDHAWK_API void __cdecl RhpCollect(uint32_t uGeneration, uint32_t uMode)
{
// This must be called via p/invoke rather than RuntimeImport to make the stack crawlable.
Thread * pCurThread = ThreadStore::GetCurrentThread();
pCurThread->SetupHackPInvokeTunnel();
pCurThread->DisablePreemptiveMode();
ASSERT(!pCurThread->IsDoNotTriggerGcSet());
GCHeapUtilities::GetGCHeap()->GarbageCollect(uGeneration, FALSE, uMode);
pCurThread->EnablePreemptiveMode();
}
EXTERN_C REDHAWK_API int64_t __cdecl RhpGetGcTotalMemory()
{
// This must be called via p/invoke rather than RuntimeImport to make the stack crawlable.
Thread * pCurThread = ThreadStore::GetCurrentThread();
pCurThread->SetupHackPInvokeTunnel();
pCurThread->DisablePreemptiveMode();
int64_t ret = GCHeapUtilities::GetGCHeap()->GetTotalBytesInUse();
pCurThread->EnablePreemptiveMode();
return ret;
}
EXTERN_C REDHAWK_API int32_t __cdecl RhpStartNoGCRegion(int64_t totalSize, UInt32_BOOL hasLohSize, int64_t lohSize, UInt32_BOOL disallowFullBlockingGC)
{
Thread *pCurThread = ThreadStore::GetCurrentThread();
ASSERT(!pCurThread->IsCurrentThreadInCooperativeMode());
pCurThread->SetupHackPInvokeTunnel();
pCurThread->DisablePreemptiveMode();
int result = GCHeapUtilities::GetGCHeap()->StartNoGCRegion(totalSize, hasLohSize, lohSize, disallowFullBlockingGC);
pCurThread->EnablePreemptiveMode();
return result;
}
EXTERN_C REDHAWK_API int32_t __cdecl RhpEndNoGCRegion()
{
ASSERT(!ThreadStore::GetCurrentThread()->IsCurrentThreadInCooperativeMode());
return GCHeapUtilities::GetGCHeap()->EndNoGCRegion();
}
COOP_PINVOKE_HELPER(void, RhSuppressFinalize, (OBJECTREF refObj))
{
if (!refObj->get_EEType()->HasFinalizer())
return;
GCHeapUtilities::GetGCHeap()->SetFinalizationRun(refObj);
}
COOP_PINVOKE_HELPER(FC_BOOL_RET, RhReRegisterForFinalize, (OBJECTREF refObj))
{
if (!refObj->get_EEType()->HasFinalizer())
FC_RETURN_BOOL(true);
FC_RETURN_BOOL(GCHeapUtilities::GetGCHeap()->RegisterForFinalization(-1, refObj));
}
COOP_PINVOKE_HELPER(int32_t, RhGetMaxGcGeneration, ())
{
return GCHeapUtilities::GetGCHeap()->GetMaxGeneration();
}
COOP_PINVOKE_HELPER(int32_t, RhGetGcCollectionCount, (int32_t generation, CLR_BOOL getSpecialGCCount))
{
return GCHeapUtilities::GetGCHeap()->CollectionCount(generation, getSpecialGCCount);
}
COOP_PINVOKE_HELPER(int32_t, RhGetGeneration, (OBJECTREF obj))
{
return GCHeapUtilities::GetGCHeap()->WhichGeneration(obj);
}
COOP_PINVOKE_HELPER(int32_t, RhGetGcLatencyMode, ())
{
return GCHeapUtilities::GetGCHeap()->GetGcLatencyMode();
}
COOP_PINVOKE_HELPER(int32_t, RhSetGcLatencyMode, (int32_t newLatencyMode))
{
return GCHeapUtilities::GetGCHeap()->SetGcLatencyMode(newLatencyMode);
}
COOP_PINVOKE_HELPER(FC_BOOL_RET, RhIsServerGc, ())
{
FC_RETURN_BOOL(GCHeapUtilities::IsServerHeap());
}
COOP_PINVOKE_HELPER(FC_BOOL_RET, RhRegisterGcCallout, (GcRestrictedCalloutKind eKind, void * pCallout))
{
FC_RETURN_BOOL(RestrictedCallouts::RegisterGcCallout(eKind, pCallout));
}
COOP_PINVOKE_HELPER(void, RhUnregisterGcCallout, (GcRestrictedCalloutKind eKind, void * pCallout))
{
RestrictedCallouts::UnregisterGcCallout(eKind, pCallout);
}
COOP_PINVOKE_HELPER(int32_t, RhGetLohCompactionMode, ())
{
return GCHeapUtilities::GetGCHeap()->GetLOHCompactionMode();
}
COOP_PINVOKE_HELPER(void, RhSetLohCompactionMode, (int32_t newLohCompactionMode))
{
GCHeapUtilities::GetGCHeap()->SetLOHCompactionMode(newLohCompactionMode);
}
COOP_PINVOKE_HELPER(int64_t, RhGetCurrentObjSize, ())
{
return GCHeapUtilities::GetGCHeap()->GetCurrentObjSize();
}
COOP_PINVOKE_HELPER(int64_t, RhGetGCNow, ())
{
return GCHeapUtilities::GetGCHeap()->GetNow();
}
COOP_PINVOKE_HELPER(int64_t, RhGetLastGCStartTime, (int32_t generation))
{
return GCHeapUtilities::GetGCHeap()->GetLastGCStartTime(generation);
}
COOP_PINVOKE_HELPER(int64_t, RhGetLastGCDuration, (int32_t generation))
{
return GCHeapUtilities::GetGCHeap()->GetLastGCDuration(generation);
}
COOP_PINVOKE_HELPER(FC_BOOL_RET, RhRegisterForFullGCNotification, (int32_t maxGenerationThreshold, int32_t largeObjectHeapThreshold))
{
ASSERT(maxGenerationThreshold >= 1 && maxGenerationThreshold <= 99);
ASSERT(largeObjectHeapThreshold >= 1 && largeObjectHeapThreshold <= 99);
FC_RETURN_BOOL(GCHeapUtilities::GetGCHeap()->RegisterForFullGCNotification(maxGenerationThreshold, largeObjectHeapThreshold));
}
COOP_PINVOKE_HELPER(FC_BOOL_RET, RhCancelFullGCNotification, ())
{
FC_RETURN_BOOL(GCHeapUtilities::GetGCHeap()->CancelFullGCNotification());
}
COOP_PINVOKE_HELPER(int32_t, RhWaitForFullGCApproach, (int32_t millisecondsTimeout))
{
ASSERT(millisecondsTimeout >= -1);
ASSERT(ThreadStore::GetCurrentThread()->IsCurrentThreadInCooperativeMode());
int timeout = millisecondsTimeout == -1 ? INFINITE : millisecondsTimeout;
return GCHeapUtilities::GetGCHeap()->WaitForFullGCApproach(millisecondsTimeout);
}
COOP_PINVOKE_HELPER(int32_t, RhWaitForFullGCComplete, (int32_t millisecondsTimeout))
{
ASSERT(millisecondsTimeout >= -1);
ASSERT(ThreadStore::GetCurrentThread()->IsCurrentThreadInCooperativeMode());
int timeout = millisecondsTimeout == -1 ? INFINITE : millisecondsTimeout;
return GCHeapUtilities::GetGCHeap()->WaitForFullGCComplete(millisecondsTimeout);
}
COOP_PINVOKE_HELPER(int64_t, RhGetGCSegmentSize, ())
{
size_t first = GCHeapUtilities::GetGCHeap()->GetValidSegmentSize(true);
size_t second = GCHeapUtilities::GetGCHeap()->GetValidSegmentSize(false);
return (first > second) ? first : second;
}
COOP_PINVOKE_HELPER(int64_t, RhGetAllocatedBytesForCurrentThread, ())
{
Thread *pThread = ThreadStore::GetCurrentThread();
gc_alloc_context *ac = pThread->GetAllocContext();
int64_t currentAllocated = ac->alloc_bytes + ac->alloc_bytes_uoh - (ac->alloc_limit - ac->alloc_ptr);
return currentAllocated;
}
struct RH_GC_GENERATION_INFO
{
uint64_t sizeBefore;
uint64_t fragmentationBefore;
uint64_t sizeAfter;
uint64_t fragmentationAfter;
};
struct RH_GH_MEMORY_INFO
{
public:
uint64_t highMemLoadThresholdBytes;
uint64_t totalAvailableMemoryBytes;
uint64_t lastRecordedMemLoadBytes;
uint64_t lastRecordedHeapSizeBytes;
uint64_t lastRecordedFragmentationBytes;
uint64_t totalCommittedBytes;
uint64_t promotedBytes;
uint64_t pinnedObjectCount;
uint64_t finalizationPendingCount;
uint64_t index;
uint32_t generation;
uint32_t pauseTimePercent;
uint8_t isCompaction;
uint8_t isConcurrent;
RH_GC_GENERATION_INFO generationInfo0;
RH_GC_GENERATION_INFO generationInfo1;
RH_GC_GENERATION_INFO generationInfo2;
RH_GC_GENERATION_INFO generationInfo3;
RH_GC_GENERATION_INFO generationInfo4;
uint64_t pauseDuration0;
uint64_t pauseDuration1;
};
COOP_PINVOKE_HELPER(void, RhGetMemoryInfo, (RH_GH_MEMORY_INFO* pData, int kind))
{
uint64_t* genInfoRaw = (uint64_t*)&(pData->generationInfo0);
uint64_t* pauseInfoRaw = (uint64_t*)&(pData->pauseDuration0);
return GCHeapUtilities::GetGCHeap()->GetMemoryInfo(
&(pData->highMemLoadThresholdBytes),
&(pData->totalAvailableMemoryBytes),
&(pData->lastRecordedMemLoadBytes),
&(pData->lastRecordedHeapSizeBytes),
&(pData->lastRecordedFragmentationBytes),
&(pData->totalCommittedBytes),
&(pData->promotedBytes),
&(pData->pinnedObjectCount),
&(pData->finalizationPendingCount),
&(pData->index),
&(pData->generation),
&(pData->pauseTimePercent),
(bool*)&(pData->isCompaction),
(bool*)&(pData->isConcurrent),
genInfoRaw,
pauseInfoRaw,
kind);
}
COOP_PINVOKE_HELPER(int64_t, RhGetTotalAllocatedBytes, ())
{
uint64_t allocated_bytes = GCHeapUtilities::GetGCHeap()->GetTotalAllocatedBytes() - RedhawkGCInterface::GetDeadThreadsNonAllocBytes();
// highest reported allocated_bytes. We do not want to report a value less than that even if unused_bytes has increased.
static uint64_t high_watermark;
uint64_t current_high = high_watermark;
while (allocated_bytes > current_high)
{
uint64_t orig = PalInterlockedCompareExchange64((int64_t*)&high_watermark, allocated_bytes, current_high);
if (orig == current_high)
return allocated_bytes;
current_high = orig;
}
return current_high;
}
EXTERN_C REDHAWK_API int64_t __cdecl RhGetTotalAllocatedBytesPrecise()
{
int64_t allocated;
// We need to suspend/restart the EE to get each thread's
// non-allocated memory from their allocation contexts
GCToEEInterface::SuspendEE(SUSPEND_REASON::SUSPEND_FOR_GC);
allocated = GCHeapUtilities::GetGCHeap()->GetTotalAllocatedBytes() - RedhawkGCInterface::GetDeadThreadsNonAllocBytes();
FOREACH_THREAD(pThread)
{
gc_alloc_context* ac = pThread->GetAllocContext();
allocated -= ac->alloc_limit - ac->alloc_ptr;
}
END_FOREACH_THREAD
GCToEEInterface::RestartEE(true);
return allocated;
}
extern Object* GcAllocInternal(MethodTable* pEEType, uint32_t uFlags, uintptr_t cbSize, Thread* pThread);
EXTERN_C REDHAWK_API void RhAllocateNewArray(MethodTable* pArrayEEType, uint32_t numElements, uint32_t flags, Array** pResult)
{
Thread* pThread = ThreadStore::GetCurrentThread();
pThread->SetupHackPInvokeTunnel();
pThread->DisablePreemptiveMode();
ASSERT(!pThread->IsDoNotTriggerGcSet());
*pResult = (Array*)GcAllocInternal(pArrayEEType, flags, numElements, pThread);
pThread->EnablePreemptiveMode();
}
EXTERN_C REDHAWK_API void RhAllocateNewObject(MethodTable* pEEType, uint32_t flags, Object** pResult)
{
Thread* pThread = ThreadStore::GetCurrentThread();
pThread->SetupHackPInvokeTunnel();
pThread->DisablePreemptiveMode();
ASSERT(!pThread->IsDoNotTriggerGcSet());
*pResult = GcAllocInternal(pEEType, flags, 0, pThread);
pThread->EnablePreemptiveMode();
}
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// Unmanaged helpers exposed by the System.GC managed class.
//
#include "common.h"
#include "gcenv.h"
#include "gcenv.ee.h"
#include "gcheaputilities.h"
#include "RestrictedCallouts.h"
#include "gcrhinterface.h"
#include "PalRedhawkCommon.h"
#include "slist.h"
#include "varint.h"
#include "regdisplay.h"
#include "StackFrameIterator.h"
#include "thread.h"
#include "RWLock.h"
#include "threadstore.h"
#include "threadstore.inl"
#include "thread.inl"
EXTERN_C REDHAWK_API void __cdecl RhpCollect(uint32_t uGeneration, uint32_t uMode)
{
// This must be called via p/invoke rather than RuntimeImport to make the stack crawlable.
Thread * pCurThread = ThreadStore::GetCurrentThread();
pCurThread->SetupHackPInvokeTunnel();
pCurThread->DisablePreemptiveMode();
ASSERT(!pCurThread->IsDoNotTriggerGcSet());
GCHeapUtilities::GetGCHeap()->GarbageCollect(uGeneration, FALSE, uMode);
pCurThread->EnablePreemptiveMode();
}
EXTERN_C REDHAWK_API int64_t __cdecl RhpGetGcTotalMemory()
{
// This must be called via p/invoke rather than RuntimeImport to make the stack crawlable.
Thread * pCurThread = ThreadStore::GetCurrentThread();
pCurThread->SetupHackPInvokeTunnel();
pCurThread->DisablePreemptiveMode();
int64_t ret = GCHeapUtilities::GetGCHeap()->GetTotalBytesInUse();
pCurThread->EnablePreemptiveMode();
return ret;
}
EXTERN_C REDHAWK_API int32_t __cdecl RhpStartNoGCRegion(int64_t totalSize, UInt32_BOOL hasLohSize, int64_t lohSize, UInt32_BOOL disallowFullBlockingGC)
{
Thread *pCurThread = ThreadStore::GetCurrentThread();
ASSERT(!pCurThread->IsCurrentThreadInCooperativeMode());
pCurThread->SetupHackPInvokeTunnel();
pCurThread->DisablePreemptiveMode();
int result = GCHeapUtilities::GetGCHeap()->StartNoGCRegion(totalSize, hasLohSize, lohSize, disallowFullBlockingGC);
pCurThread->EnablePreemptiveMode();
return result;
}
EXTERN_C REDHAWK_API int32_t __cdecl RhpEndNoGCRegion()
{
ASSERT(!ThreadStore::GetCurrentThread()->IsCurrentThreadInCooperativeMode());
return GCHeapUtilities::GetGCHeap()->EndNoGCRegion();
}
COOP_PINVOKE_HELPER(void, RhSuppressFinalize, (OBJECTREF refObj))
{
if (!refObj->get_EEType()->HasFinalizer())
return;
GCHeapUtilities::GetGCHeap()->SetFinalizationRun(refObj);
}
COOP_PINVOKE_HELPER(FC_BOOL_RET, RhReRegisterForFinalize, (OBJECTREF refObj))
{
if (!refObj->get_EEType()->HasFinalizer())
FC_RETURN_BOOL(true);
FC_RETURN_BOOL(GCHeapUtilities::GetGCHeap()->RegisterForFinalization(-1, refObj));
}
COOP_PINVOKE_HELPER(int32_t, RhGetMaxGcGeneration, ())
{
return GCHeapUtilities::GetGCHeap()->GetMaxGeneration();
}
COOP_PINVOKE_HELPER(int32_t, RhGetGcCollectionCount, (int32_t generation, CLR_BOOL getSpecialGCCount))
{
return GCHeapUtilities::GetGCHeap()->CollectionCount(generation, getSpecialGCCount);
}
COOP_PINVOKE_HELPER(int32_t, RhGetGeneration, (OBJECTREF obj))
{
return GCHeapUtilities::GetGCHeap()->WhichGeneration(obj);
}
COOP_PINVOKE_HELPER(int32_t, RhGetGcLatencyMode, ())
{
return GCHeapUtilities::GetGCHeap()->GetGcLatencyMode();
}
COOP_PINVOKE_HELPER(int32_t, RhSetGcLatencyMode, (int32_t newLatencyMode))
{
return GCHeapUtilities::GetGCHeap()->SetGcLatencyMode(newLatencyMode);
}
COOP_PINVOKE_HELPER(FC_BOOL_RET, RhIsServerGc, ())
{
FC_RETURN_BOOL(GCHeapUtilities::IsServerHeap());
}
COOP_PINVOKE_HELPER(FC_BOOL_RET, RhRegisterGcCallout, (GcRestrictedCalloutKind eKind, void * pCallout))
{
FC_RETURN_BOOL(RestrictedCallouts::RegisterGcCallout(eKind, pCallout));
}
COOP_PINVOKE_HELPER(void, RhUnregisterGcCallout, (GcRestrictedCalloutKind eKind, void * pCallout))
{
RestrictedCallouts::UnregisterGcCallout(eKind, pCallout);
}
COOP_PINVOKE_HELPER(int32_t, RhGetLohCompactionMode, ())
{
return GCHeapUtilities::GetGCHeap()->GetLOHCompactionMode();
}
COOP_PINVOKE_HELPER(void, RhSetLohCompactionMode, (int32_t newLohCompactionMode))
{
GCHeapUtilities::GetGCHeap()->SetLOHCompactionMode(newLohCompactionMode);
}
COOP_PINVOKE_HELPER(int64_t, RhGetCurrentObjSize, ())
{
return GCHeapUtilities::GetGCHeap()->GetCurrentObjSize();
}
COOP_PINVOKE_HELPER(int64_t, RhGetGCNow, ())
{
return GCHeapUtilities::GetGCHeap()->GetNow();
}
COOP_PINVOKE_HELPER(int64_t, RhGetLastGCStartTime, (int32_t generation))
{
return GCHeapUtilities::GetGCHeap()->GetLastGCStartTime(generation);
}
COOP_PINVOKE_HELPER(int64_t, RhGetLastGCDuration, (int32_t generation))
{
return GCHeapUtilities::GetGCHeap()->GetLastGCDuration(generation);
}
COOP_PINVOKE_HELPER(FC_BOOL_RET, RhRegisterForFullGCNotification, (int32_t maxGenerationThreshold, int32_t largeObjectHeapThreshold))
{
ASSERT(maxGenerationThreshold >= 1 && maxGenerationThreshold <= 99);
ASSERT(largeObjectHeapThreshold >= 1 && largeObjectHeapThreshold <= 99);
FC_RETURN_BOOL(GCHeapUtilities::GetGCHeap()->RegisterForFullGCNotification(maxGenerationThreshold, largeObjectHeapThreshold));
}
COOP_PINVOKE_HELPER(FC_BOOL_RET, RhCancelFullGCNotification, ())
{
FC_RETURN_BOOL(GCHeapUtilities::GetGCHeap()->CancelFullGCNotification());
}
COOP_PINVOKE_HELPER(int32_t, RhWaitForFullGCApproach, (int32_t millisecondsTimeout))
{
ASSERT(millisecondsTimeout >= -1);
ASSERT(ThreadStore::GetCurrentThread()->IsCurrentThreadInCooperativeMode());
int timeout = millisecondsTimeout == -1 ? INFINITE : millisecondsTimeout;
return GCHeapUtilities::GetGCHeap()->WaitForFullGCApproach(millisecondsTimeout);
}
COOP_PINVOKE_HELPER(int32_t, RhWaitForFullGCComplete, (int32_t millisecondsTimeout))
{
ASSERT(millisecondsTimeout >= -1);
ASSERT(ThreadStore::GetCurrentThread()->IsCurrentThreadInCooperativeMode());
int timeout = millisecondsTimeout == -1 ? INFINITE : millisecondsTimeout;
return GCHeapUtilities::GetGCHeap()->WaitForFullGCComplete(millisecondsTimeout);
}
COOP_PINVOKE_HELPER(int64_t, RhGetGCSegmentSize, ())
{
size_t first = GCHeapUtilities::GetGCHeap()->GetValidSegmentSize(true);
size_t second = GCHeapUtilities::GetGCHeap()->GetValidSegmentSize(false);
return (first > second) ? first : second;
}
COOP_PINVOKE_HELPER(int64_t, RhGetAllocatedBytesForCurrentThread, ())
{
Thread *pThread = ThreadStore::GetCurrentThread();
gc_alloc_context *ac = pThread->GetAllocContext();
int64_t currentAllocated = ac->alloc_bytes + ac->alloc_bytes_uoh - (ac->alloc_limit - ac->alloc_ptr);
return currentAllocated;
}
struct RH_GC_GENERATION_INFO
{
uint64_t sizeBefore;
uint64_t fragmentationBefore;
uint64_t sizeAfter;
uint64_t fragmentationAfter;
};
struct RH_GH_MEMORY_INFO
{
public:
uint64_t highMemLoadThresholdBytes;
uint64_t totalAvailableMemoryBytes;
uint64_t lastRecordedMemLoadBytes;
uint64_t lastRecordedHeapSizeBytes;
uint64_t lastRecordedFragmentationBytes;
uint64_t totalCommittedBytes;
uint64_t promotedBytes;
uint64_t pinnedObjectCount;
uint64_t finalizationPendingCount;
uint64_t index;
uint32_t generation;
uint32_t pauseTimePercent;
uint8_t isCompaction;
uint8_t isConcurrent;
RH_GC_GENERATION_INFO generationInfo0;
RH_GC_GENERATION_INFO generationInfo1;
RH_GC_GENERATION_INFO generationInfo2;
RH_GC_GENERATION_INFO generationInfo3;
RH_GC_GENERATION_INFO generationInfo4;
uint64_t pauseDuration0;
uint64_t pauseDuration1;
};
COOP_PINVOKE_HELPER(void, RhGetMemoryInfo, (RH_GH_MEMORY_INFO* pData, int kind))
{
uint64_t* genInfoRaw = (uint64_t*)&(pData->generationInfo0);
uint64_t* pauseInfoRaw = (uint64_t*)&(pData->pauseDuration0);
return GCHeapUtilities::GetGCHeap()->GetMemoryInfo(
&(pData->highMemLoadThresholdBytes),
&(pData->totalAvailableMemoryBytes),
&(pData->lastRecordedMemLoadBytes),
&(pData->lastRecordedHeapSizeBytes),
&(pData->lastRecordedFragmentationBytes),
&(pData->totalCommittedBytes),
&(pData->promotedBytes),
&(pData->pinnedObjectCount),
&(pData->finalizationPendingCount),
&(pData->index),
&(pData->generation),
&(pData->pauseTimePercent),
(bool*)&(pData->isCompaction),
(bool*)&(pData->isConcurrent),
genInfoRaw,
pauseInfoRaw,
kind);
}
COOP_PINVOKE_HELPER(int64_t, RhGetTotalAllocatedBytes, ())
{
uint64_t allocated_bytes = GCHeapUtilities::GetGCHeap()->GetTotalAllocatedBytes() - RedhawkGCInterface::GetDeadThreadsNonAllocBytes();
// highest reported allocated_bytes. We do not want to report a value less than that even if unused_bytes has increased.
static uint64_t high_watermark;
uint64_t current_high = high_watermark;
while (allocated_bytes > current_high)
{
uint64_t orig = PalInterlockedCompareExchange64((int64_t*)&high_watermark, allocated_bytes, current_high);
if (orig == current_high)
return allocated_bytes;
current_high = orig;
}
return current_high;
}
EXTERN_C REDHAWK_API int64_t __cdecl RhGetTotalAllocatedBytesPrecise()
{
int64_t allocated;
// We need to suspend/restart the EE to get each thread's
// non-allocated memory from their allocation contexts
GCToEEInterface::SuspendEE(SUSPEND_REASON::SUSPEND_FOR_GC);
allocated = GCHeapUtilities::GetGCHeap()->GetTotalAllocatedBytes() - RedhawkGCInterface::GetDeadThreadsNonAllocBytes();
FOREACH_THREAD(pThread)
{
gc_alloc_context* ac = pThread->GetAllocContext();
allocated -= ac->alloc_limit - ac->alloc_ptr;
}
END_FOREACH_THREAD
GCToEEInterface::RestartEE(true);
return allocated;
}
extern Object* GcAllocInternal(MethodTable* pEEType, uint32_t uFlags, uintptr_t cbSize, Thread* pThread);
EXTERN_C REDHAWK_API void RhAllocateNewArray(MethodTable* pArrayEEType, uint32_t numElements, uint32_t flags, Array** pResult)
{
Thread* pThread = ThreadStore::GetCurrentThread();
pThread->SetupHackPInvokeTunnel();
pThread->DisablePreemptiveMode();
ASSERT(!pThread->IsDoNotTriggerGcSet());
*pResult = (Array*)GcAllocInternal(pArrayEEType, flags, numElements, pThread);
pThread->EnablePreemptiveMode();
}
EXTERN_C REDHAWK_API void RhAllocateNewObject(MethodTable* pEEType, uint32_t flags, Object** pResult)
{
Thread* pThread = ThreadStore::GetCurrentThread();
pThread->SetupHackPInvokeTunnel();
pThread->DisablePreemptiveMode();
ASSERT(!pThread->IsDoNotTriggerGcSet());
*pResult = GcAllocInternal(pEEType, flags, 0, pThread);
pThread->EnablePreemptiveMode();
}
| -1 |
dotnet/runtime
| 66,411 |
Arm64: Always use SIMD features
|
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
kunalspathak
| 2022-03-09T21:00:00Z | 2022-03-12T04:35:06Z |
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
|
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
|
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
./src/tests/JIT/jit64/hfa/main/testC/hfa_nd2C_r.csproj
|
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>PdbOnly</DebugType>
</PropertyGroup>
<ItemGroup>
<Compile Include="hfa_testC.cs" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\dll\common.csproj" />
<ProjectReference Include="..\dll\hfa_nested_f64_common.csproj" />
<ProjectReference Include="..\dll\hfa_nested_f64_interop_cpp.csproj" />
<CMakeProjectReference Include="..\dll\CMakelists.txt" />
</ItemGroup>
</Project>
|
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>PdbOnly</DebugType>
</PropertyGroup>
<ItemGroup>
<Compile Include="hfa_testC.cs" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\dll\common.csproj" />
<ProjectReference Include="..\dll\hfa_nested_f64_common.csproj" />
<ProjectReference Include="..\dll\hfa_nested_f64_interop_cpp.csproj" />
<CMakeProjectReference Include="..\dll\CMakelists.txt" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime
| 66,411 |
Arm64: Always use SIMD features
|
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
kunalspathak
| 2022-03-09T21:00:00Z | 2022-03-12T04:35:06Z |
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
|
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
|
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
./src/tests/JIT/jit64/regress/asurt/143616/foo.cs
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
public class foo
{
public static int Main()
{
return bar.getX();
}
}
public class bar
{
static bar()
{
System.Console.WriteLine(": Executing class constructor of bar.");
bar2.x = 100;
}
public static int getX()
{
int val = bar2.x;
System.Console.WriteLine("bar2.x contains: " + val);
return val;
}
}
public class bar2
{
static public int x;
static bar2()
{
System.Console.WriteLine(": Executing class constructor of bar2.");
bar2.x = -1;
}
}
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
public class foo
{
public static int Main()
{
return bar.getX();
}
}
public class bar
{
static bar()
{
System.Console.WriteLine(": Executing class constructor of bar.");
bar2.x = 100;
}
public static int getX()
{
int val = bar2.x;
System.Console.WriteLine("bar2.x contains: " + val);
return val;
}
}
public class bar2
{
static public int x;
static bar2()
{
System.Console.WriteLine(": Executing class constructor of bar2.");
bar2.x = -1;
}
}
| -1 |
dotnet/runtime
| 66,411 |
Arm64: Always use SIMD features
|
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
kunalspathak
| 2022-03-09T21:00:00Z | 2022-03-12T04:35:06Z |
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
|
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
|
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
./src/libraries/System.ServiceModel.Syndication/tests/TestFeeds/RssFeeds/valid_managingEditor.xml
|
<!--
Description: managingEditor must include email address
Expect: ValidContact{parent:channel,element:managingEditor}
-->
<rss version="2.0">
<channel>
<title>Invalid webMaster</title>
<link>http://contoso.com/rss/2.0/</link>
<description>managingEditor must include email address</description>
<managingEditor>[email protected]</managingEditor>
</channel>
</rss>
|
<!--
Description: managingEditor must include email address
Expect: ValidContact{parent:channel,element:managingEditor}
-->
<rss version="2.0">
<channel>
<title>Invalid webMaster</title>
<link>http://contoso.com/rss/2.0/</link>
<description>managingEditor must include email address</description>
<managingEditor>[email protected]</managingEditor>
</channel>
</rss>
| -1 |
dotnet/runtime
| 66,411 |
Arm64: Always use SIMD features
|
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
kunalspathak
| 2022-03-09T21:00:00Z | 2022-03-12T04:35:06Z |
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
|
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
|
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
./src/tests/Loader/classloader/TypeGeneratorTests/TypeGeneratorTest1075/Generated1075.ilproj
|
<Project Sdk="Microsoft.NET.Sdk.IL">
<PropertyGroup>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<ItemGroup>
<Compile Include="Generated1075.il" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\TestFramework\TestFramework.csproj" />
</ItemGroup>
</Project>
|
<Project Sdk="Microsoft.NET.Sdk.IL">
<PropertyGroup>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<ItemGroup>
<Compile Include="Generated1075.il" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\TestFramework\TestFramework.csproj" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime
| 66,411 |
Arm64: Always use SIMD features
|
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
kunalspathak
| 2022-03-09T21:00:00Z | 2022-03-12T04:35:06Z |
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
|
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
|
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
./src/tests/JIT/jit64/opt/rngchk/BadMatrixMul_o.csproj
|
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
</PropertyGroup>
<PropertyGroup>
<DebugType>PdbOnly</DebugType>
<Optimize>True</Optimize>
</PropertyGroup>
<ItemGroup>
<Compile Include="BadMatrixMul.cs" />
</ItemGroup>
</Project>
|
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
</PropertyGroup>
<PropertyGroup>
<DebugType>PdbOnly</DebugType>
<Optimize>True</Optimize>
</PropertyGroup>
<ItemGroup>
<Compile Include="BadMatrixMul.cs" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime
| 66,411 |
Arm64: Always use SIMD features
|
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
kunalspathak
| 2022-03-09T21:00:00Z | 2022-03-12T04:35:06Z |
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
|
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
|
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
./src/tests/JIT/CodeGenBringUpTests/LeftShift_do.csproj
|
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>Full</DebugType>
<Optimize>True</Optimize>
</PropertyGroup>
<ItemGroup>
<Compile Include="LeftShift.cs" />
</ItemGroup>
</Project>
|
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>Full</DebugType>
<Optimize>True</Optimize>
</PropertyGroup>
<ItemGroup>
<Compile Include="LeftShift.cs" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime
| 66,411 |
Arm64: Always use SIMD features
|
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
kunalspathak
| 2022-03-09T21:00:00Z | 2022-03-12T04:35:06Z |
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
|
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
|
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
./src/tests/Loader/classloader/TypeGeneratorTests/TypeGeneratorTest226/Generated226.ilproj
|
<Project Sdk="Microsoft.NET.Sdk.IL">
<PropertyGroup>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<ItemGroup>
<Compile Include="Generated226.il" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\TestFramework\TestFramework.csproj" />
</ItemGroup>
</Project>
|
<Project Sdk="Microsoft.NET.Sdk.IL">
<PropertyGroup>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<ItemGroup>
<Compile Include="Generated226.il" />
</ItemGroup>
<ItemGroup>
<ProjectReference Include="..\TestFramework\TestFramework.csproj" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime
| 66,411 |
Arm64: Always use SIMD features
|
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
kunalspathak
| 2022-03-09T21:00:00Z | 2022-03-12T04:35:06Z |
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
|
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
|
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd/CompareLessThanOrEqual.Vector128.Single.cs
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
using System.Runtime.Intrinsics.Arm;
namespace JIT.HardwareIntrinsics.Arm
{
public static partial class Program
{
private static void CompareLessThanOrEqual_Vector128_Single()
{
var test = new SimpleBinaryOpTest__CompareLessThanOrEqual_Vector128_Single();
if (test.IsSupported)
{
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates basic functionality works, using Load
test.RunBasicScenario_Load();
}
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates calling via reflection works, using Load
test.RunReflectionScenario_Load();
}
// Validates passing a static member works
test.RunClsVarScenario();
if (AdvSimd.IsSupported)
{
// Validates passing a static member works, using pinning and Load
test.RunClsVarScenario_Load();
}
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates passing a local works, using Load
test.RunLclVarScenario_Load();
}
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local class works, using pinning and Load
test.RunClassLclFldScenario_Load();
}
// Validates passing an instance member of a class works
test.RunClassFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a class works, using pinning and Load
test.RunClassFldScenario_Load();
}
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local struct works, using pinning and Load
test.RunStructLclFldScenario_Load();
}
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a struct works, using pinning and Load
test.RunStructFldScenario_Load();
}
}
else
{
// Validates we throw on unsupported hardware
test.RunUnsupportedScenario();
}
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class SimpleBinaryOpTest__CompareLessThanOrEqual_Vector128_Single
{
private struct DataTable
{
private byte[] inArray1;
private byte[] inArray2;
private byte[] outArray;
private GCHandle inHandle1;
private GCHandle inHandle2;
private GCHandle outHandle;
private ulong alignment;
public DataTable(Single[] inArray1, Single[] inArray2, Single[] outArray, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Single>();
int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Single>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Single>();
if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.inArray2 = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Single, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Single, byte>(ref inArray2[0]), (uint)sizeOfinArray2);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
inHandle2.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector128<Single> _fld1;
public Vector128<Single> _fld2;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Single>, byte>(ref testStruct._fld1), ref Unsafe.As<Single, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Single>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSingle(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Single>, byte>(ref testStruct._fld2), ref Unsafe.As<Single, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Single>>());
return testStruct;
}
public void RunStructFldScenario(SimpleBinaryOpTest__CompareLessThanOrEqual_Vector128_Single testClass)
{
var result = AdvSimd.CompareLessThanOrEqual(_fld1, _fld2);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr);
}
public void RunStructFldScenario_Load(SimpleBinaryOpTest__CompareLessThanOrEqual_Vector128_Single testClass)
{
fixed (Vector128<Single>* pFld1 = &_fld1)
fixed (Vector128<Single>* pFld2 = &_fld2)
{
var result = AdvSimd.CompareLessThanOrEqual(
AdvSimd.LoadVector128((Single*)(pFld1)),
AdvSimd.LoadVector128((Single*)(pFld2))
);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr);
}
}
}
private static readonly int LargestVectorSize = 16;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Single>>() / sizeof(Single);
private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<Single>>() / sizeof(Single);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Single>>() / sizeof(Single);
private static Single[] _data1 = new Single[Op1ElementCount];
private static Single[] _data2 = new Single[Op2ElementCount];
private static Vector128<Single> _clsVar1;
private static Vector128<Single> _clsVar2;
private Vector128<Single> _fld1;
private Vector128<Single> _fld2;
private DataTable _dataTable;
static SimpleBinaryOpTest__CompareLessThanOrEqual_Vector128_Single()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Single>, byte>(ref _clsVar1), ref Unsafe.As<Single, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Single>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSingle(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Single>, byte>(ref _clsVar2), ref Unsafe.As<Single, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Single>>());
}
public SimpleBinaryOpTest__CompareLessThanOrEqual_Vector128_Single()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Single>, byte>(ref _fld1), ref Unsafe.As<Single, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Single>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSingle(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Single>, byte>(ref _fld2), ref Unsafe.As<Single, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Single>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); }
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSingle(); }
_dataTable = new DataTable(_data1, _data2, new Single[RetElementCount], LargestVectorSize);
}
public bool IsSupported => AdvSimd.IsSupported;
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = AdvSimd.CompareLessThanOrEqual(
Unsafe.Read<Vector128<Single>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<Single>>(_dataTable.inArray2Ptr)
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunBasicScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load));
var result = AdvSimd.CompareLessThanOrEqual(
AdvSimd.LoadVector128((Single*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector128((Single*)(_dataTable.inArray2Ptr))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.CompareLessThanOrEqual), new Type[] { typeof(Vector128<Single>), typeof(Vector128<Single>) })
.Invoke(null, new object[] {
Unsafe.Read<Vector128<Single>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<Single>>(_dataTable.inArray2Ptr)
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Single>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.CompareLessThanOrEqual), new Type[] { typeof(Vector128<Single>), typeof(Vector128<Single>) })
.Invoke(null, new object[] {
AdvSimd.LoadVector128((Single*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector128((Single*)(_dataTable.inArray2Ptr))
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Single>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = AdvSimd.CompareLessThanOrEqual(
_clsVar1,
_clsVar2
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr);
}
public void RunClsVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load));
fixed (Vector128<Single>* pClsVar1 = &_clsVar1)
fixed (Vector128<Single>* pClsVar2 = &_clsVar2)
{
var result = AdvSimd.CompareLessThanOrEqual(
AdvSimd.LoadVector128((Single*)(pClsVar1)),
AdvSimd.LoadVector128((Single*)(pClsVar2))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr);
}
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector128<Single>>(_dataTable.inArray1Ptr);
var op2 = Unsafe.Read<Vector128<Single>>(_dataTable.inArray2Ptr);
var result = AdvSimd.CompareLessThanOrEqual(op1, op2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load));
var op1 = AdvSimd.LoadVector128((Single*)(_dataTable.inArray1Ptr));
var op2 = AdvSimd.LoadVector128((Single*)(_dataTable.inArray2Ptr));
var result = AdvSimd.CompareLessThanOrEqual(op1, op2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new SimpleBinaryOpTest__CompareLessThanOrEqual_Vector128_Single();
var result = AdvSimd.CompareLessThanOrEqual(test._fld1, test._fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load));
var test = new SimpleBinaryOpTest__CompareLessThanOrEqual_Vector128_Single();
fixed (Vector128<Single>* pFld1 = &test._fld1)
fixed (Vector128<Single>* pFld2 = &test._fld2)
{
var result = AdvSimd.CompareLessThanOrEqual(
AdvSimd.LoadVector128((Single*)(pFld1)),
AdvSimd.LoadVector128((Single*)(pFld2))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = AdvSimd.CompareLessThanOrEqual(_fld1, _fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr);
}
public void RunClassFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load));
fixed (Vector128<Single>* pFld1 = &_fld1)
fixed (Vector128<Single>* pFld2 = &_fld2)
{
var result = AdvSimd.CompareLessThanOrEqual(
AdvSimd.LoadVector128((Single*)(pFld1)),
AdvSimd.LoadVector128((Single*)(pFld2))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr);
}
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = AdvSimd.CompareLessThanOrEqual(test._fld1, test._fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load));
var test = TestStruct.Create();
var result = AdvSimd.CompareLessThanOrEqual(
AdvSimd.LoadVector128((Single*)(&test._fld1)),
AdvSimd.LoadVector128((Single*)(&test._fld2))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
public void RunStructFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load));
var test = TestStruct.Create();
test.RunStructFldScenario_Load(this);
}
public void RunUnsupportedScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario));
bool succeeded = false;
try
{
RunBasicScenario_UnsafeRead();
}
catch (PlatformNotSupportedException)
{
succeeded = true;
}
if (!succeeded)
{
Succeeded = false;
}
}
private void ValidateResult(Vector128<Single> op1, Vector128<Single> op2, void* result, [CallerMemberName] string method = "")
{
Single[] inArray1 = new Single[Op1ElementCount];
Single[] inArray2 = new Single[Op2ElementCount];
Single[] outArray = new Single[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Single, byte>(ref inArray1[0]), op1);
Unsafe.WriteUnaligned(ref Unsafe.As<Single, byte>(ref inArray2[0]), op2);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Single, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Single>>());
ValidateResult(inArray1, inArray2, outArray, method);
}
private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "")
{
Single[] inArray1 = new Single[Op1ElementCount];
Single[] inArray2 = new Single[Op2ElementCount];
Single[] outArray = new Single[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Single, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<Single>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Single, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<Single>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Single, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Single>>());
ValidateResult(inArray1, inArray2, outArray, method);
}
private void ValidateResult(Single[] left, Single[] right, Single[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
for (var i = 0; i < RetElementCount; i++)
{
if (BitConverter.SingleToInt32Bits(Helpers.CompareLessThanOrEqual(left[i], right[i])) != BitConverter.SingleToInt32Bits(result[i]))
{
succeeded = false;
break;
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.CompareLessThanOrEqual)}<Single>(Vector128<Single>, Vector128<Single>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})");
TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
using System.Runtime.Intrinsics.Arm;
namespace JIT.HardwareIntrinsics.Arm
{
public static partial class Program
{
private static void CompareLessThanOrEqual_Vector128_Single()
{
var test = new SimpleBinaryOpTest__CompareLessThanOrEqual_Vector128_Single();
if (test.IsSupported)
{
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates basic functionality works, using Load
test.RunBasicScenario_Load();
}
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates calling via reflection works, using Load
test.RunReflectionScenario_Load();
}
// Validates passing a static member works
test.RunClsVarScenario();
if (AdvSimd.IsSupported)
{
// Validates passing a static member works, using pinning and Load
test.RunClsVarScenario_Load();
}
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates passing a local works, using Load
test.RunLclVarScenario_Load();
}
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local class works, using pinning and Load
test.RunClassLclFldScenario_Load();
}
// Validates passing an instance member of a class works
test.RunClassFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a class works, using pinning and Load
test.RunClassFldScenario_Load();
}
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local struct works, using pinning and Load
test.RunStructLclFldScenario_Load();
}
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a struct works, using pinning and Load
test.RunStructFldScenario_Load();
}
}
else
{
// Validates we throw on unsupported hardware
test.RunUnsupportedScenario();
}
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class SimpleBinaryOpTest__CompareLessThanOrEqual_Vector128_Single
{
private struct DataTable
{
private byte[] inArray1;
private byte[] inArray2;
private byte[] outArray;
private GCHandle inHandle1;
private GCHandle inHandle2;
private GCHandle outHandle;
private ulong alignment;
public DataTable(Single[] inArray1, Single[] inArray2, Single[] outArray, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Single>();
int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Single>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Single>();
if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.inArray2 = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Single, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Single, byte>(ref inArray2[0]), (uint)sizeOfinArray2);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
inHandle2.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector128<Single> _fld1;
public Vector128<Single> _fld2;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Single>, byte>(ref testStruct._fld1), ref Unsafe.As<Single, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Single>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSingle(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Single>, byte>(ref testStruct._fld2), ref Unsafe.As<Single, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Single>>());
return testStruct;
}
public void RunStructFldScenario(SimpleBinaryOpTest__CompareLessThanOrEqual_Vector128_Single testClass)
{
var result = AdvSimd.CompareLessThanOrEqual(_fld1, _fld2);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr);
}
public void RunStructFldScenario_Load(SimpleBinaryOpTest__CompareLessThanOrEqual_Vector128_Single testClass)
{
fixed (Vector128<Single>* pFld1 = &_fld1)
fixed (Vector128<Single>* pFld2 = &_fld2)
{
var result = AdvSimd.CompareLessThanOrEqual(
AdvSimd.LoadVector128((Single*)(pFld1)),
AdvSimd.LoadVector128((Single*)(pFld2))
);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr);
}
}
}
private static readonly int LargestVectorSize = 16;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Single>>() / sizeof(Single);
private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<Single>>() / sizeof(Single);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Single>>() / sizeof(Single);
private static Single[] _data1 = new Single[Op1ElementCount];
private static Single[] _data2 = new Single[Op2ElementCount];
private static Vector128<Single> _clsVar1;
private static Vector128<Single> _clsVar2;
private Vector128<Single> _fld1;
private Vector128<Single> _fld2;
private DataTable _dataTable;
static SimpleBinaryOpTest__CompareLessThanOrEqual_Vector128_Single()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Single>, byte>(ref _clsVar1), ref Unsafe.As<Single, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Single>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSingle(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Single>, byte>(ref _clsVar2), ref Unsafe.As<Single, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Single>>());
}
public SimpleBinaryOpTest__CompareLessThanOrEqual_Vector128_Single()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Single>, byte>(ref _fld1), ref Unsafe.As<Single, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Single>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSingle(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Single>, byte>(ref _fld2), ref Unsafe.As<Single, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Single>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetSingle(); }
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSingle(); }
_dataTable = new DataTable(_data1, _data2, new Single[RetElementCount], LargestVectorSize);
}
public bool IsSupported => AdvSimd.IsSupported;
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = AdvSimd.CompareLessThanOrEqual(
Unsafe.Read<Vector128<Single>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<Single>>(_dataTable.inArray2Ptr)
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunBasicScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load));
var result = AdvSimd.CompareLessThanOrEqual(
AdvSimd.LoadVector128((Single*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector128((Single*)(_dataTable.inArray2Ptr))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.CompareLessThanOrEqual), new Type[] { typeof(Vector128<Single>), typeof(Vector128<Single>) })
.Invoke(null, new object[] {
Unsafe.Read<Vector128<Single>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<Single>>(_dataTable.inArray2Ptr)
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Single>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.CompareLessThanOrEqual), new Type[] { typeof(Vector128<Single>), typeof(Vector128<Single>) })
.Invoke(null, new object[] {
AdvSimd.LoadVector128((Single*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector128((Single*)(_dataTable.inArray2Ptr))
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Single>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = AdvSimd.CompareLessThanOrEqual(
_clsVar1,
_clsVar2
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr);
}
public void RunClsVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load));
fixed (Vector128<Single>* pClsVar1 = &_clsVar1)
fixed (Vector128<Single>* pClsVar2 = &_clsVar2)
{
var result = AdvSimd.CompareLessThanOrEqual(
AdvSimd.LoadVector128((Single*)(pClsVar1)),
AdvSimd.LoadVector128((Single*)(pClsVar2))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr);
}
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector128<Single>>(_dataTable.inArray1Ptr);
var op2 = Unsafe.Read<Vector128<Single>>(_dataTable.inArray2Ptr);
var result = AdvSimd.CompareLessThanOrEqual(op1, op2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load));
var op1 = AdvSimd.LoadVector128((Single*)(_dataTable.inArray1Ptr));
var op2 = AdvSimd.LoadVector128((Single*)(_dataTable.inArray2Ptr));
var result = AdvSimd.CompareLessThanOrEqual(op1, op2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new SimpleBinaryOpTest__CompareLessThanOrEqual_Vector128_Single();
var result = AdvSimd.CompareLessThanOrEqual(test._fld1, test._fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load));
var test = new SimpleBinaryOpTest__CompareLessThanOrEqual_Vector128_Single();
fixed (Vector128<Single>* pFld1 = &test._fld1)
fixed (Vector128<Single>* pFld2 = &test._fld2)
{
var result = AdvSimd.CompareLessThanOrEqual(
AdvSimd.LoadVector128((Single*)(pFld1)),
AdvSimd.LoadVector128((Single*)(pFld2))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = AdvSimd.CompareLessThanOrEqual(_fld1, _fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr);
}
public void RunClassFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load));
fixed (Vector128<Single>* pFld1 = &_fld1)
fixed (Vector128<Single>* pFld2 = &_fld2)
{
var result = AdvSimd.CompareLessThanOrEqual(
AdvSimd.LoadVector128((Single*)(pFld1)),
AdvSimd.LoadVector128((Single*)(pFld2))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr);
}
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = AdvSimd.CompareLessThanOrEqual(test._fld1, test._fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load));
var test = TestStruct.Create();
var result = AdvSimd.CompareLessThanOrEqual(
AdvSimd.LoadVector128((Single*)(&test._fld1)),
AdvSimd.LoadVector128((Single*)(&test._fld2))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
public void RunStructFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load));
var test = TestStruct.Create();
test.RunStructFldScenario_Load(this);
}
public void RunUnsupportedScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario));
bool succeeded = false;
try
{
RunBasicScenario_UnsafeRead();
}
catch (PlatformNotSupportedException)
{
succeeded = true;
}
if (!succeeded)
{
Succeeded = false;
}
}
private void ValidateResult(Vector128<Single> op1, Vector128<Single> op2, void* result, [CallerMemberName] string method = "")
{
Single[] inArray1 = new Single[Op1ElementCount];
Single[] inArray2 = new Single[Op2ElementCount];
Single[] outArray = new Single[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Single, byte>(ref inArray1[0]), op1);
Unsafe.WriteUnaligned(ref Unsafe.As<Single, byte>(ref inArray2[0]), op2);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Single, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Single>>());
ValidateResult(inArray1, inArray2, outArray, method);
}
private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "")
{
Single[] inArray1 = new Single[Op1ElementCount];
Single[] inArray2 = new Single[Op2ElementCount];
Single[] outArray = new Single[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Single, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<Single>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Single, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<Single>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Single, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Single>>());
ValidateResult(inArray1, inArray2, outArray, method);
}
private void ValidateResult(Single[] left, Single[] right, Single[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
for (var i = 0; i < RetElementCount; i++)
{
if (BitConverter.SingleToInt32Bits(Helpers.CompareLessThanOrEqual(left[i], right[i])) != BitConverter.SingleToInt32Bits(result[i]))
{
succeeded = false;
break;
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.CompareLessThanOrEqual)}<Single>(Vector128<Single>, Vector128<Single>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})");
TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| -1 |
dotnet/runtime
| 66,411 |
Arm64: Always use SIMD features
|
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
kunalspathak
| 2022-03-09T21:00:00Z | 2022-03-12T04:35:06Z |
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
|
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
|
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
./src/coreclr/tools/aot/ILCompiler.Compiler/Compiler/DependencyAnalysis/CallingConventionConverterKey.cs
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Text;
using Internal.TypeSystem;
namespace ILCompiler.DependencyAnalysis
{
public struct CallingConventionConverterKey : IEquatable<CallingConventionConverterKey>
{
public CallingConventionConverterKey(Internal.NativeFormat.CallingConventionConverterKind converterKind,
MethodSignature signature)
{
ConverterKind = converterKind;
Signature = signature;
}
public Internal.NativeFormat.CallingConventionConverterKind ConverterKind { get; }
public MethodSignature Signature { get; }
public override bool Equals(object obj)
{
return obj is CallingConventionConverterKey && Equals((CallingConventionConverterKey)obj);
}
public bool Equals(CallingConventionConverterKey other)
{
if (ConverterKind != other.ConverterKind)
return false;
if (!Signature.Equals(other.Signature))
return false;
return true;
}
public override int GetHashCode()
{
return Signature.GetHashCode() ^ (int)ConverterKind;
}
public string GetName()
{
return ConverterKind.ToString() + Signature.GetName();
}
}
public static class MethodSignatureExtensions
{
public static void AppendName(this MethodSignature signature, StringBuilder nameBuilder, UniqueTypeNameFormatter typeNameFormatter)
{
if (signature.GenericParameterCount > 0)
{
nameBuilder.Append("GenParams:");
nameBuilder.Append(signature.GenericParameterCount);
nameBuilder.Append(' ');
}
if (signature.IsStatic)
nameBuilder.Append("Static ");
typeNameFormatter.AppendName(nameBuilder, signature.ReturnType);
nameBuilder.Append('(');
for (int i = 0; i < signature.Length; i++)
{
if (i != 0)
nameBuilder.Append(',');
typeNameFormatter.AppendName(nameBuilder, signature[i]);
}
nameBuilder.Append(')');
}
public static string GetName(this MethodSignature signature)
{
StringBuilder nameBuilder = new StringBuilder();
signature.AppendName(nameBuilder, UniqueTypeNameFormatter.Instance);
return nameBuilder.ToString();
}
}
public class UniqueTypeNameFormatter : TypeNameFormatter
{
public static UniqueTypeNameFormatter Instance { get; } = new UniqueTypeNameFormatter();
public override void AppendName(StringBuilder sb, PointerType type)
{
AppendName(sb, type.ParameterType);
sb.Append('*');
}
public override void AppendName(StringBuilder sb, GenericParameterDesc type)
{
string prefix = type.Kind == GenericParameterKind.Type ? "!" : "!!";
sb.Append(prefix);
sb.Append(type.Name);
}
public override void AppendName(StringBuilder sb, SignatureTypeVariable type)
{
sb.Append("!");
sb.Append(type.Index.ToStringInvariant());
}
public override void AppendName(StringBuilder sb, SignatureMethodVariable type)
{
sb.Append("!!");
sb.Append(type.Index.ToStringInvariant());
}
public override void AppendName(StringBuilder sb, FunctionPointerType type)
{
MethodSignature signature = type.Signature;
AppendName(sb, signature.ReturnType);
sb.Append(" (");
for (int i = 0; i < signature.Length; i++)
{
if (i > 0)
sb.Append(", ");
AppendName(sb, signature[i]);
}
// TODO: Append '...' for vararg methods
sb.Append(')');
}
public override void AppendName(StringBuilder sb, ByRefType type)
{
AppendName(sb, type.ParameterType);
sb.Append(" ByRef");
}
public override void AppendName(StringBuilder sb, ArrayType type)
{
AppendName(sb, type.ElementType);
sb.Append('[');
if (type.Rank == 1 && type.IsMdArray)
sb.Append('*');
sb.Append(',', type.Rank - 1);
sb.Append(']');
}
protected override void AppendNameForInstantiatedType(StringBuilder sb, DefType type)
{
AppendName(sb, type.GetTypeDefinition());
sb.Append('<');
for (int i = 0; i < type.Instantiation.Length; i++)
{
if (i > 0)
sb.Append(", ");
AppendName(sb, type.Instantiation[i]);
}
sb.Append('>');
}
protected override void AppendNameForNamespaceType(StringBuilder sb, DefType type)
{
string ns = GetTypeNamespace(type);
if (ns.Length > 0)
{
AppendEscapedIdentifier(sb, ns);
sb.Append('.');
}
AppendEscapedIdentifier(sb, GetTypeName(type));
if (type is MetadataType)
{
IAssemblyDesc homeAssembly = ((MetadataType)type).Module as IAssemblyDesc;
AppendAssemblyName(sb, homeAssembly);
}
}
private void AppendAssemblyName(StringBuilder sb, IAssemblyDesc assembly)
{
if (assembly == null)
return;
sb.Append(',');
AppendEscapedIdentifier(sb, assembly.GetName().Name);
}
protected override void AppendNameForNestedType(StringBuilder sb, DefType nestedType, DefType containingType)
{
AppendName(sb, containingType);
sb.Append('+');
string ns = GetTypeNamespace(nestedType);
if (ns.Length > 0)
{
AppendEscapedIdentifier(sb, ns);
sb.Append('.');
}
AppendEscapedIdentifier(sb, GetTypeName(nestedType));
}
private string GetTypeName(DefType type)
{
return type.Name;
}
private string GetTypeNamespace(DefType type)
{
return type.Namespace;
}
private static char[] s_escapedChars = new char[] { ',', '=', '"', ']', '[', '*', '&', '+', '\\' };
private void AppendEscapedIdentifier(StringBuilder sb, string identifier)
{
if (identifier.IndexOfAny(s_escapedChars) < 0)
{
string escapedIdentifier = identifier;
foreach (char escapedChar in s_escapedChars)
{
string escapedCharString = new string(escapedChar, 1);
escapedIdentifier = escapedIdentifier.Replace(escapedCharString, "\\" + escapedCharString);
}
sb.Append(escapedIdentifier);
}
else
{
sb.Append(identifier);
}
}
}
}
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Text;
using Internal.TypeSystem;
namespace ILCompiler.DependencyAnalysis
{
public struct CallingConventionConverterKey : IEquatable<CallingConventionConverterKey>
{
public CallingConventionConverterKey(Internal.NativeFormat.CallingConventionConverterKind converterKind,
MethodSignature signature)
{
ConverterKind = converterKind;
Signature = signature;
}
public Internal.NativeFormat.CallingConventionConverterKind ConverterKind { get; }
public MethodSignature Signature { get; }
public override bool Equals(object obj)
{
return obj is CallingConventionConverterKey && Equals((CallingConventionConverterKey)obj);
}
public bool Equals(CallingConventionConverterKey other)
{
if (ConverterKind != other.ConverterKind)
return false;
if (!Signature.Equals(other.Signature))
return false;
return true;
}
public override int GetHashCode()
{
return Signature.GetHashCode() ^ (int)ConverterKind;
}
public string GetName()
{
return ConverterKind.ToString() + Signature.GetName();
}
}
public static class MethodSignatureExtensions
{
public static void AppendName(this MethodSignature signature, StringBuilder nameBuilder, UniqueTypeNameFormatter typeNameFormatter)
{
if (signature.GenericParameterCount > 0)
{
nameBuilder.Append("GenParams:");
nameBuilder.Append(signature.GenericParameterCount);
nameBuilder.Append(' ');
}
if (signature.IsStatic)
nameBuilder.Append("Static ");
typeNameFormatter.AppendName(nameBuilder, signature.ReturnType);
nameBuilder.Append('(');
for (int i = 0; i < signature.Length; i++)
{
if (i != 0)
nameBuilder.Append(',');
typeNameFormatter.AppendName(nameBuilder, signature[i]);
}
nameBuilder.Append(')');
}
public static string GetName(this MethodSignature signature)
{
StringBuilder nameBuilder = new StringBuilder();
signature.AppendName(nameBuilder, UniqueTypeNameFormatter.Instance);
return nameBuilder.ToString();
}
}
public class UniqueTypeNameFormatter : TypeNameFormatter
{
public static UniqueTypeNameFormatter Instance { get; } = new UniqueTypeNameFormatter();
public override void AppendName(StringBuilder sb, PointerType type)
{
AppendName(sb, type.ParameterType);
sb.Append('*');
}
public override void AppendName(StringBuilder sb, GenericParameterDesc type)
{
string prefix = type.Kind == GenericParameterKind.Type ? "!" : "!!";
sb.Append(prefix);
sb.Append(type.Name);
}
public override void AppendName(StringBuilder sb, SignatureTypeVariable type)
{
sb.Append("!");
sb.Append(type.Index.ToStringInvariant());
}
public override void AppendName(StringBuilder sb, SignatureMethodVariable type)
{
sb.Append("!!");
sb.Append(type.Index.ToStringInvariant());
}
public override void AppendName(StringBuilder sb, FunctionPointerType type)
{
MethodSignature signature = type.Signature;
AppendName(sb, signature.ReturnType);
sb.Append(" (");
for (int i = 0; i < signature.Length; i++)
{
if (i > 0)
sb.Append(", ");
AppendName(sb, signature[i]);
}
// TODO: Append '...' for vararg methods
sb.Append(')');
}
public override void AppendName(StringBuilder sb, ByRefType type)
{
AppendName(sb, type.ParameterType);
sb.Append(" ByRef");
}
public override void AppendName(StringBuilder sb, ArrayType type)
{
AppendName(sb, type.ElementType);
sb.Append('[');
if (type.Rank == 1 && type.IsMdArray)
sb.Append('*');
sb.Append(',', type.Rank - 1);
sb.Append(']');
}
protected override void AppendNameForInstantiatedType(StringBuilder sb, DefType type)
{
AppendName(sb, type.GetTypeDefinition());
sb.Append('<');
for (int i = 0; i < type.Instantiation.Length; i++)
{
if (i > 0)
sb.Append(", ");
AppendName(sb, type.Instantiation[i]);
}
sb.Append('>');
}
protected override void AppendNameForNamespaceType(StringBuilder sb, DefType type)
{
string ns = GetTypeNamespace(type);
if (ns.Length > 0)
{
AppendEscapedIdentifier(sb, ns);
sb.Append('.');
}
AppendEscapedIdentifier(sb, GetTypeName(type));
if (type is MetadataType)
{
IAssemblyDesc homeAssembly = ((MetadataType)type).Module as IAssemblyDesc;
AppendAssemblyName(sb, homeAssembly);
}
}
private void AppendAssemblyName(StringBuilder sb, IAssemblyDesc assembly)
{
if (assembly == null)
return;
sb.Append(',');
AppendEscapedIdentifier(sb, assembly.GetName().Name);
}
protected override void AppendNameForNestedType(StringBuilder sb, DefType nestedType, DefType containingType)
{
AppendName(sb, containingType);
sb.Append('+');
string ns = GetTypeNamespace(nestedType);
if (ns.Length > 0)
{
AppendEscapedIdentifier(sb, ns);
sb.Append('.');
}
AppendEscapedIdentifier(sb, GetTypeName(nestedType));
}
private string GetTypeName(DefType type)
{
return type.Name;
}
private string GetTypeNamespace(DefType type)
{
return type.Namespace;
}
private static char[] s_escapedChars = new char[] { ',', '=', '"', ']', '[', '*', '&', '+', '\\' };
private void AppendEscapedIdentifier(StringBuilder sb, string identifier)
{
if (identifier.IndexOfAny(s_escapedChars) < 0)
{
string escapedIdentifier = identifier;
foreach (char escapedChar in s_escapedChars)
{
string escapedCharString = new string(escapedChar, 1);
escapedIdentifier = escapedIdentifier.Replace(escapedCharString, "\\" + escapedCharString);
}
sb.Append(escapedIdentifier);
}
else
{
sb.Append(identifier);
}
}
}
}
| -1 |
dotnet/runtime
| 66,411 |
Arm64: Always use SIMD features
|
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
kunalspathak
| 2022-03-09T21:00:00Z | 2022-03-12T04:35:06Z |
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
|
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
|
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
./src/mono/mono/tests/roslyn-bug-19038.cs
|
using System;
using System.Reflection;
unsafe public class C {
public int Value;
static void Main()
{
C a = new C { Value = 12 };
FieldInfo info = typeof(C).GetField("Value");
TypedReference reference = __makeref(a);
if (!(reference is TypedReference reference0))
throw new Exception("TypedReference");
info.SetValueDirect(reference0, 34);
Console.WriteLine($"a.Value = {a.Value}");
if (a.Value != 34)
throw new Exception("SetValueDirect");
int z = 56;
if (CopyRefInt(ref z) != 56)
throw new Exception("ref z");
Console.WriteLine("ok");
}
static int CopyRefInt(ref int z)
{
if (!(z is int z0))
throw new Exception("CopyRefInt");
return z0;
}
}
|
using System;
using System.Reflection;
unsafe public class C {
public int Value;
static void Main()
{
C a = new C { Value = 12 };
FieldInfo info = typeof(C).GetField("Value");
TypedReference reference = __makeref(a);
if (!(reference is TypedReference reference0))
throw new Exception("TypedReference");
info.SetValueDirect(reference0, 34);
Console.WriteLine($"a.Value = {a.Value}");
if (a.Value != 34)
throw new Exception("SetValueDirect");
int z = 56;
if (CopyRefInt(ref z) != 56)
throw new Exception("ref z");
Console.WriteLine("ok");
}
static int CopyRefInt(ref int z)
{
if (!(z is int z0))
throw new Exception("CopyRefInt");
return z0;
}
}
| -1 |
dotnet/runtime
| 66,411 |
Arm64: Always use SIMD features
|
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
kunalspathak
| 2022-03-09T21:00:00Z | 2022-03-12T04:35:06Z |
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
|
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
|
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
./src/tests/Loader/classloader/TypeGeneratorTests/TypeGeneratorTest1439/Generated1439.il
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
.assembly extern mscorlib { .publickeytoken = (B7 7A 5C 56 19 34 E0 89 ) .ver 4:0:0:0 }
.assembly extern TestFramework { .publickeytoken = ( B0 3F 5F 7F 11 D5 0A 3A ) }
//TYPES IN FORWARDER ASSEMBLIES:
//TEST ASSEMBLY:
.assembly Generated1439 { .hash algorithm 0x00008004 }
.assembly extern xunit.core {}
.class public BaseClass0
{
.method public hidebysig specialname rtspecialname instance void .ctor() cil managed {
ldarg.0
call instance void [mscorlib]System.Object::.ctor()
ret
}
}
.class public BaseClass1
extends BaseClass0
{
.method public hidebysig specialname rtspecialname instance void .ctor() cil managed {
ldarg.0
call instance void BaseClass0::.ctor()
ret
}
}
.class public G3_C1911`1<T0>
extends class G2_C831`2<class BaseClass1,!T0>
implements class IBase2`2<!T0,class BaseClass0>
{
.method public hidebysig virtual instance string Method7<M0>() cil managed noinlining {
ldstr "G3_C1911::Method7.18670<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string ClassMethod5215() cil managed noinlining {
ldstr "G3_C1911::ClassMethod5215.18671()"
ret
}
.method public hidebysig newslot virtual instance string ClassMethod5216() cil managed noinlining {
ldstr "G3_C1911::ClassMethod5216.18672()"
ret
}
.method public hidebysig newslot virtual instance string 'G2_C831<class BaseClass1,T0>.ClassMethod3042'() cil managed noinlining {
.override method instance string class G2_C831`2<class BaseClass1,!T0>::ClassMethod3042()
ldstr "G3_C1911::ClassMethod3042.MI.18673()"
ret
}
.method public hidebysig specialname rtspecialname instance void .ctor() cil managed {
ldarg.0
call instance void class G2_C831`2<class BaseClass1,!T0>::.ctor()
ret
}
}
.class public G2_C831`2<T0, T1>
extends class G1_C15`2<class BaseClass1,class BaseClass1>
implements class IBase2`2<class BaseClass1,class BaseClass0>, class IBase1`1<class BaseClass1>
{
.method public hidebysig virtual instance string Method7<M0>() cil managed noinlining {
ldstr "G2_C831::Method7.12642<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig virtual instance string Method4() cil managed noinlining {
ldstr "G2_C831::Method4.12643()"
ret
}
.method public hidebysig newslot virtual instance string Method5() cil managed noinlining {
ldstr "G2_C831::Method5.12644()"
ret
}
.method public hidebysig newslot virtual instance string 'IBase1<class BaseClass1>.Method5'() cil managed noinlining {
.override method instance string class IBase1`1<class BaseClass1>::Method5()
ldstr "G2_C831::Method5.MI.12645()"
ret
}
.method public hidebysig newslot virtual instance string Method6<M0>() cil managed noinlining {
ldstr "G2_C831::Method6.12646<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string 'IBase1<class BaseClass1>.Method6'<M0>() cil managed noinlining {
.override method instance string class IBase1`1<class BaseClass1>::Method6<[1]>()
ldstr "G2_C831::Method6.MI.12647<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string ClassMethod3041() cil managed noinlining {
ldstr "G2_C831::ClassMethod3041.12648()"
ret
}
.method public hidebysig newslot virtual instance string ClassMethod3042() cil managed noinlining {
ldstr "G2_C831::ClassMethod3042.12649()"
ret
}
.method public hidebysig specialname rtspecialname instance void .ctor() cil managed {
ldarg.0
call instance void class G1_C15`2<class BaseClass1,class BaseClass1>::.ctor()
ret
}
}
.class interface public abstract IBase2`2<+T0, -T1>
{
.method public hidebysig newslot abstract virtual instance string Method7<M0>() cil managed { }
}
.class public abstract G1_C15`2<T0, T1>
implements class IBase2`2<!T1,!T1>, class IBase1`1<class BaseClass0>
{
.method public hidebysig newslot virtual instance string Method7<M0>() cil managed noinlining {
ldstr "G1_C15::Method7.4885<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string 'IBase2<T1,T1>.Method7'<M0>() cil managed noinlining {
.override method instance string class IBase2`2<!T1,!T1>::Method7<[1]>()
ldstr "G1_C15::Method7.MI.4886<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig virtual instance string Method4() cil managed noinlining {
ldstr "G1_C15::Method4.4887()"
ret
}
.method public hidebysig newslot virtual instance string 'IBase1<class BaseClass0>.Method4'() cil managed noinlining {
.override method instance string class IBase1`1<class BaseClass0>::Method4()
ldstr "G1_C15::Method4.MI.4888()"
ret
}
.method public hidebysig virtual instance string Method5() cil managed noinlining {
ldstr "G1_C15::Method5.4889()"
ret
}
.method public hidebysig newslot virtual instance string 'IBase1<class BaseClass0>.Method5'() cil managed noinlining {
.override method instance string class IBase1`1<class BaseClass0>::Method5()
ldstr "G1_C15::Method5.MI.4890()"
ret
}
.method public hidebysig virtual instance string Method6<M0>() cil managed noinlining {
ldstr "G1_C15::Method6.4891<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string 'IBase1<class BaseClass0>.Method6'<M0>() cil managed noinlining {
.override method instance string class IBase1`1<class BaseClass0>::Method6<[1]>()
ldstr "G1_C15::Method6.MI.4892<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig specialname rtspecialname instance void .ctor() cil managed {
ldarg.0
call instance void [mscorlib]System.Object::.ctor()
ret
}
}
.class interface public abstract IBase1`1<+T0>
{
.method public hidebysig newslot abstract virtual instance string Method4() cil managed { }
.method public hidebysig newslot abstract virtual instance string Method5() cil managed { }
.method public hidebysig newslot abstract virtual instance string Method6<M0>() cil managed { }
}
.class public auto ansi beforefieldinit Generated1439 {
.method static void M.BaseClass0<(BaseClass0)W>(!!W inst, string exp) cil managed {
.maxstack 5
.locals init (string[] actualResults)
ldc.i4.s 0
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.BaseClass0<(BaseClass0)W>(!!W inst, string exp)"
ldc.i4.s 0
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.BaseClass1<(BaseClass1)W>(!!W inst, string exp) cil managed {
.maxstack 5
.locals init (string[] actualResults)
ldc.i4.s 0
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.BaseClass1<(BaseClass1)W>(!!W inst, string exp)"
ldc.i4.s 0
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G3_C1911.T<T0,(class G3_C1911`1<!!T0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 13
.locals init (string[] actualResults)
ldc.i4.s 8
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G3_C1911.T<T0,(class G3_C1911`1<!!T0>)W>(!!W 'inst', string exp)"
ldc.i4.s 8
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<!!T0>::ClassMethod3041()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<!!T0>::ClassMethod3042()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<!!T0>::ClassMethod5215()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<!!T0>::ClassMethod5216()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<!!T0>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<!!T0>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<!!T0>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 7
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<!!T0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G3_C1911.A<(class G3_C1911`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 13
.locals init (string[] actualResults)
ldc.i4.s 8
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G3_C1911.A<(class G3_C1911`1<class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 8
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<class BaseClass0>::ClassMethod3041()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<class BaseClass0>::ClassMethod3042()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<class BaseClass0>::ClassMethod5215()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<class BaseClass0>::ClassMethod5216()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<class BaseClass0>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<class BaseClass0>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<class BaseClass0>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 7
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G3_C1911.B<(class G3_C1911`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 13
.locals init (string[] actualResults)
ldc.i4.s 8
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G3_C1911.B<(class G3_C1911`1<class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 8
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<class BaseClass1>::ClassMethod3041()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<class BaseClass1>::ClassMethod3042()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<class BaseClass1>::ClassMethod5215()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<class BaseClass1>::ClassMethod5216()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 7
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G2_C831.T.T<T0,T1,(class G2_C831`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 11
.locals init (string[] actualResults)
ldc.i4.s 6
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G2_C831.T.T<T0,T1,(class G2_C831`2<!!T0,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 6
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<!!T0,!!T1>::ClassMethod3041()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<!!T0,!!T1>::ClassMethod3042()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<!!T0,!!T1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<!!T0,!!T1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<!!T0,!!T1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<!!T0,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G2_C831.A.T<T1,(class G2_C831`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 11
.locals init (string[] actualResults)
ldc.i4.s 6
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G2_C831.A.T<T1,(class G2_C831`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 6
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass0,!!T1>::ClassMethod3041()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass0,!!T1>::ClassMethod3042()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass0,!!T1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass0,!!T1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass0,!!T1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass0,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G2_C831.A.A<(class G2_C831`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 11
.locals init (string[] actualResults)
ldc.i4.s 6
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G2_C831.A.A<(class G2_C831`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 6
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass0>::ClassMethod3041()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass0>::ClassMethod3042()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass0>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass0>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass0>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G2_C831.A.B<(class G2_C831`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 11
.locals init (string[] actualResults)
ldc.i4.s 6
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G2_C831.A.B<(class G2_C831`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 6
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass1>::ClassMethod3041()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass1>::ClassMethod3042()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G2_C831.B.T<T1,(class G2_C831`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 11
.locals init (string[] actualResults)
ldc.i4.s 6
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G2_C831.B.T<T1,(class G2_C831`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 6
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass1,!!T1>::ClassMethod3041()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass1,!!T1>::ClassMethod3042()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass1,!!T1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass1,!!T1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass1,!!T1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass1,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G2_C831.B.A<(class G2_C831`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 11
.locals init (string[] actualResults)
ldc.i4.s 6
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G2_C831.B.A<(class G2_C831`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 6
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass0>::ClassMethod3041()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass0>::ClassMethod3042()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass0>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass0>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass0>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G2_C831.B.B<(class G2_C831`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 11
.locals init (string[] actualResults)
ldc.i4.s 6
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G2_C831.B.B<(class G2_C831`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 6
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass1>::ClassMethod3041()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass1>::ClassMethod3042()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<!!T0,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass0,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass1,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G1_C15.T.T<T0,T1,(class G1_C15`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 9
.locals init (string[] actualResults)
ldc.i4.s 4
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G1_C15.T.T<T0,T1,(class G1_C15`2<!!T0,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 4
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<!!T0,!!T1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<!!T0,!!T1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<!!T0,!!T1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<!!T0,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G1_C15.A.T<T1,(class G1_C15`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 9
.locals init (string[] actualResults)
ldc.i4.s 4
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G1_C15.A.T<T1,(class G1_C15`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 4
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass0,!!T1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass0,!!T1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass0,!!T1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass0,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G1_C15.A.A<(class G1_C15`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 9
.locals init (string[] actualResults)
ldc.i4.s 4
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G1_C15.A.A<(class G1_C15`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 4
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G1_C15.A.B<(class G1_C15`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 9
.locals init (string[] actualResults)
ldc.i4.s 4
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G1_C15.A.B<(class G1_C15`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 4
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G1_C15.B.T<T1,(class G1_C15`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 9
.locals init (string[] actualResults)
ldc.i4.s 4
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G1_C15.B.T<T1,(class G1_C15`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 4
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass1,!!T1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass1,!!T1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass1,!!T1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass1,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G1_C15.B.A<(class G1_C15`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 9
.locals init (string[] actualResults)
ldc.i4.s 4
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G1_C15.B.A<(class G1_C15`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 4
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass0>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass0>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass0>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G1_C15.B.B<(class G1_C15`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 9
.locals init (string[] actualResults)
ldc.i4.s 4
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G1_C15.B.B<(class G1_C15`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 4
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<!!T0>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<!!T0>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<!!T0>::Method6<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass0>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass0>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method public hidebysig static void MethodCallingTest() cil managed
{
.maxstack 10
.locals init (object V_0)
ldstr "========================== Method Calling Test =========================="
call void [mscorlib]System.Console::WriteLine(string)
newobj instance void class G3_C1911`1<class BaseClass0>::.ctor()
stloc.0
ldloc.0
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method6<object>()
ldstr "G1_C15::Method6.4891<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method5()
ldstr "G1_C15::Method5.4889()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method5()
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>()
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass0>::ClassMethod3042()
ldstr "G3_C1911::ClassMethod3042.MI.18673()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass0>::ClassMethod3041()
ldstr "G2_C831::ClassMethod3041.12648()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass0>::Method6<object>()
ldstr "G2_C831::Method6.12646<System.Object>()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass0>::Method5()
ldstr "G2_C831::Method5.12644()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass0>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass0>::Method7<object>()
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass1> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass1> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
castclass class G3_C1911`1<class BaseClass0>
callvirt instance string class G3_C1911`1<class BaseClass0>::ClassMethod5216()
ldstr "G3_C1911::ClassMethod5216.18672()"
ldstr "class G3_C1911`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1911`1<class BaseClass0>
callvirt instance string class G3_C1911`1<class BaseClass0>::ClassMethod5215()
ldstr "G3_C1911::ClassMethod5215.18671()"
ldstr "class G3_C1911`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1911`1<class BaseClass0>
callvirt instance string class G3_C1911`1<class BaseClass0>::Method7<object>()
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class G3_C1911`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1911`1<class BaseClass0>
callvirt instance string class G3_C1911`1<class BaseClass0>::ClassMethod3042()
ldstr "G3_C1911::ClassMethod3042.MI.18673()"
ldstr "class G3_C1911`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1911`1<class BaseClass0>
callvirt instance string class G3_C1911`1<class BaseClass0>::ClassMethod3041()
ldstr "G2_C831::ClassMethod3041.12648()"
ldstr "class G3_C1911`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1911`1<class BaseClass0>
callvirt instance string class G3_C1911`1<class BaseClass0>::Method6<object>()
ldstr "G2_C831::Method6.12646<System.Object>()"
ldstr "class G3_C1911`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1911`1<class BaseClass0>
callvirt instance string class G3_C1911`1<class BaseClass0>::Method5()
ldstr "G2_C831::Method5.12644()"
ldstr "class G3_C1911`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1911`1<class BaseClass0>
callvirt instance string class G3_C1911`1<class BaseClass0>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class G3_C1911`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
newobj instance void class G3_C1911`1<class BaseClass1>::.ctor()
stloc.0
ldloc.0
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method6<object>()
ldstr "G1_C15::Method6.4891<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method5()
ldstr "G1_C15::Method5.4889()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method5()
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>()
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass1>::ClassMethod3042()
ldstr "G3_C1911::ClassMethod3042.MI.18673()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass1>::ClassMethod3041()
ldstr "G2_C831::ClassMethod3041.12648()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass1>::Method6<object>()
ldstr "G2_C831::Method6.12646<System.Object>()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass1>::Method5()
ldstr "G2_C831::Method5.12644()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass1>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
castclass class G3_C1911`1<class BaseClass1>
callvirt instance string class G3_C1911`1<class BaseClass1>::ClassMethod5216()
ldstr "G3_C1911::ClassMethod5216.18672()"
ldstr "class G3_C1911`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1911`1<class BaseClass1>
callvirt instance string class G3_C1911`1<class BaseClass1>::ClassMethod5215()
ldstr "G3_C1911::ClassMethod5215.18671()"
ldstr "class G3_C1911`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1911`1<class BaseClass1>
callvirt instance string class G3_C1911`1<class BaseClass1>::Method7<object>()
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class G3_C1911`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1911`1<class BaseClass1>
callvirt instance string class G3_C1911`1<class BaseClass1>::ClassMethod3042()
ldstr "G3_C1911::ClassMethod3042.MI.18673()"
ldstr "class G3_C1911`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1911`1<class BaseClass1>
callvirt instance string class G3_C1911`1<class BaseClass1>::ClassMethod3041()
ldstr "G2_C831::ClassMethod3041.12648()"
ldstr "class G3_C1911`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1911`1<class BaseClass1>
callvirt instance string class G3_C1911`1<class BaseClass1>::Method6<object>()
ldstr "G2_C831::Method6.12646<System.Object>()"
ldstr "class G3_C1911`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1911`1<class BaseClass1>
callvirt instance string class G3_C1911`1<class BaseClass1>::Method5()
ldstr "G2_C831::Method5.12644()"
ldstr "class G3_C1911`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1911`1<class BaseClass1>
callvirt instance string class G3_C1911`1<class BaseClass1>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class G3_C1911`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
newobj instance void class G2_C831`2<class BaseClass0,class BaseClass0>::.ctor()
stloc.0
ldloc.0
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method6<object>()
ldstr "G1_C15::Method6.4891<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method5()
ldstr "G1_C15::Method5.4889()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method5()
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>()
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
castclass class G2_C831`2<class BaseClass0,class BaseClass0>
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass0>::ClassMethod3042()
ldstr "G2_C831::ClassMethod3042.12649()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass0,class BaseClass0>
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass0>::ClassMethod3041()
ldstr "G2_C831::ClassMethod3041.12648()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass0,class BaseClass0>
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass0>::Method6<object>()
ldstr "G2_C831::Method6.12646<System.Object>()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass0,class BaseClass0>
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass0>::Method5()
ldstr "G2_C831::Method5.12644()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass0,class BaseClass0>
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass0>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass0,class BaseClass0>
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass0>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
newobj instance void class G2_C831`2<class BaseClass0,class BaseClass1>::.ctor()
stloc.0
ldloc.0
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method6<object>()
ldstr "G1_C15::Method6.4891<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method5()
ldstr "G1_C15::Method5.4889()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method5()
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>()
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
castclass class G2_C831`2<class BaseClass0,class BaseClass1>
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass1>::ClassMethod3042()
ldstr "G2_C831::ClassMethod3042.12649()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass0,class BaseClass1>
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass1>::ClassMethod3041()
ldstr "G2_C831::ClassMethod3041.12648()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass0,class BaseClass1>
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass1>::Method6<object>()
ldstr "G2_C831::Method6.12646<System.Object>()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass0,class BaseClass1>
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass1>::Method5()
ldstr "G2_C831::Method5.12644()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass0,class BaseClass1>
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass1>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass0,class BaseClass1>
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
newobj instance void class G2_C831`2<class BaseClass1,class BaseClass0>::.ctor()
stloc.0
ldloc.0
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method6<object>()
ldstr "G1_C15::Method6.4891<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method5()
ldstr "G1_C15::Method5.4889()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method5()
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>()
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass0>::ClassMethod3042()
ldstr "G2_C831::ClassMethod3042.12649()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass0>::ClassMethod3041()
ldstr "G2_C831::ClassMethod3041.12648()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass0>::Method6<object>()
ldstr "G2_C831::Method6.12646<System.Object>()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass0>::Method5()
ldstr "G2_C831::Method5.12644()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass0>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass0>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
newobj instance void class G2_C831`2<class BaseClass1,class BaseClass1>::.ctor()
stloc.0
ldloc.0
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method6<object>()
ldstr "G1_C15::Method6.4891<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method5()
ldstr "G1_C15::Method5.4889()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method5()
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>()
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass1>::ClassMethod3042()
ldstr "G2_C831::ClassMethod3042.12649()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass1>::ClassMethod3041()
ldstr "G2_C831::ClassMethod3041.12648()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass1>::Method6<object>()
ldstr "G2_C831::Method6.12646<System.Object>()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass1>::Method5()
ldstr "G2_C831::Method5.12644()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass1>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static void ConstrainedCallsTest() cil managed
{
.maxstack 10
.locals init (object V_0)
ldstr "========================== Constrained Calls Test =========================="
call void [mscorlib]System.Console::WriteLine(string)
newobj instance void class G3_C1911`1<class BaseClass0>::.ctor()
stloc.0
ldloc.0
ldstr "G2_C831::Method4.12643()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.G1_C15.T.T<class BaseClass1,class BaseClass1,class G3_C1911`1<class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.G1_C15.B.T<class BaseClass1,class G3_C1911`1<class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.G1_C15.B.B<class G3_C1911`1<class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G3_C1911`1<class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.B.T<class BaseClass1,class G3_C1911`1<class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.B.B<class G3_C1911`1<class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.T<class BaseClass0,class G3_C1911`1<class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.A<class G3_C1911`1<class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G3_C1911`1<class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.A.T<class BaseClass1,class G3_C1911`1<class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.A.B<class G3_C1911`1<class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G3_C1911::ClassMethod3042.MI.18673()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.G2_C831.T.T<class BaseClass1,class BaseClass0,class G3_C1911`1<class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G3_C1911::ClassMethod3042.MI.18673()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.G2_C831.B.T<class BaseClass0,class G3_C1911`1<class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G3_C1911::ClassMethod3042.MI.18673()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.G2_C831.B.A<class G3_C1911`1<class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass1,class BaseClass0,class G3_C1911`1<class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.B.T<class BaseClass0,class G3_C1911`1<class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.B.A<class G3_C1911`1<class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.T<class BaseClass1,class G3_C1911`1<class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.B<class G3_C1911`1<class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G3_C1911`1<class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.A.T<class BaseClass0,class G3_C1911`1<class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.A.A<class G3_C1911`1<class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G3_C1911::ClassMethod3042.MI.18673()#G3_C1911::ClassMethod5215.18671()#G3_C1911::ClassMethod5216.18672()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.G3_C1911.T<class BaseClass0,class G3_C1911`1<class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G3_C1911::ClassMethod3042.MI.18673()#G3_C1911::ClassMethod5215.18671()#G3_C1911::ClassMethod5216.18672()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.G3_C1911.A<class G3_C1911`1<class BaseClass0>>(!!0,string)
newobj instance void class G3_C1911`1<class BaseClass1>::.ctor()
stloc.0
ldloc.0
ldstr "G2_C831::Method4.12643()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.G1_C15.T.T<class BaseClass1,class BaseClass1,class G3_C1911`1<class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.G1_C15.B.T<class BaseClass1,class G3_C1911`1<class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.G1_C15.B.B<class G3_C1911`1<class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G3_C1911`1<class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.B.T<class BaseClass1,class G3_C1911`1<class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.B.B<class G3_C1911`1<class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.T<class BaseClass0,class G3_C1911`1<class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.A<class G3_C1911`1<class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G3_C1911`1<class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.A.T<class BaseClass1,class G3_C1911`1<class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.A.B<class G3_C1911`1<class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G3_C1911::ClassMethod3042.MI.18673()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.G2_C831.T.T<class BaseClass1,class BaseClass1,class G3_C1911`1<class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G3_C1911::ClassMethod3042.MI.18673()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.G2_C831.B.T<class BaseClass1,class G3_C1911`1<class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G3_C1911::ClassMethod3042.MI.18673()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.G2_C831.B.B<class G3_C1911`1<class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass1,class BaseClass0,class G3_C1911`1<class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.B.T<class BaseClass0,class G3_C1911`1<class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.B.A<class G3_C1911`1<class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.T<class BaseClass1,class G3_C1911`1<class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.B<class G3_C1911`1<class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G3_C1911`1<class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.A.T<class BaseClass0,class G3_C1911`1<class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.A.A<class G3_C1911`1<class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G3_C1911::ClassMethod3042.MI.18673()#G3_C1911::ClassMethod5215.18671()#G3_C1911::ClassMethod5216.18672()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.G3_C1911.T<class BaseClass1,class G3_C1911`1<class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G3_C1911::ClassMethod3042.MI.18673()#G3_C1911::ClassMethod5215.18671()#G3_C1911::ClassMethod5216.18672()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.G3_C1911.B<class G3_C1911`1<class BaseClass1>>(!!0,string)
newobj instance void class G2_C831`2<class BaseClass0,class BaseClass0>::.ctor()
stloc.0
ldloc.0
ldstr "G2_C831::Method4.12643()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G1_C15.T.T<class BaseClass1,class BaseClass1,class G2_C831`2<class BaseClass0,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G1_C15.B.T<class BaseClass1,class G2_C831`2<class BaseClass0,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G1_C15.B.B<class G2_C831`2<class BaseClass0,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C831`2<class BaseClass0,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.B.T<class BaseClass1,class G2_C831`2<class BaseClass0,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.B.B<class G2_C831`2<class BaseClass0,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.T<class BaseClass0,class G2_C831`2<class BaseClass0,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.A<class G2_C831`2<class BaseClass0,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C831`2<class BaseClass0,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.A.T<class BaseClass1,class G2_C831`2<class BaseClass0,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.A.B<class G2_C831`2<class BaseClass0,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G2_C831::ClassMethod3042.12649()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G2_C831.T.T<class BaseClass0,class BaseClass0,class G2_C831`2<class BaseClass0,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G2_C831::ClassMethod3042.12649()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G2_C831.A.T<class BaseClass0,class G2_C831`2<class BaseClass0,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G2_C831::ClassMethod3042.12649()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G2_C831.A.A<class G2_C831`2<class BaseClass0,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass1,class BaseClass0,class G2_C831`2<class BaseClass0,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.B.T<class BaseClass0,class G2_C831`2<class BaseClass0,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.B.A<class G2_C831`2<class BaseClass0,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.T<class BaseClass1,class G2_C831`2<class BaseClass0,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.B<class G2_C831`2<class BaseClass0,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G2_C831`2<class BaseClass0,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.A.T<class BaseClass0,class G2_C831`2<class BaseClass0,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.A.A<class G2_C831`2<class BaseClass0,class BaseClass0>>(!!0,string)
newobj instance void class G2_C831`2<class BaseClass0,class BaseClass1>::.ctor()
stloc.0
ldloc.0
ldstr "G2_C831::Method4.12643()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G1_C15.T.T<class BaseClass1,class BaseClass1,class G2_C831`2<class BaseClass0,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G1_C15.B.T<class BaseClass1,class G2_C831`2<class BaseClass0,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G1_C15.B.B<class G2_C831`2<class BaseClass0,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C831`2<class BaseClass0,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.B.T<class BaseClass1,class G2_C831`2<class BaseClass0,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.B.B<class G2_C831`2<class BaseClass0,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.T<class BaseClass0,class G2_C831`2<class BaseClass0,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.A<class G2_C831`2<class BaseClass0,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C831`2<class BaseClass0,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.A.T<class BaseClass1,class G2_C831`2<class BaseClass0,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.A.B<class G2_C831`2<class BaseClass0,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G2_C831::ClassMethod3042.12649()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G2_C831.T.T<class BaseClass0,class BaseClass1,class G2_C831`2<class BaseClass0,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G2_C831::ClassMethod3042.12649()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G2_C831.A.T<class BaseClass1,class G2_C831`2<class BaseClass0,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G2_C831::ClassMethod3042.12649()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G2_C831.A.B<class G2_C831`2<class BaseClass0,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass1,class BaseClass0,class G2_C831`2<class BaseClass0,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.B.T<class BaseClass0,class G2_C831`2<class BaseClass0,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.B.A<class G2_C831`2<class BaseClass0,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.T<class BaseClass1,class G2_C831`2<class BaseClass0,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.B<class G2_C831`2<class BaseClass0,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G2_C831`2<class BaseClass0,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.A.T<class BaseClass0,class G2_C831`2<class BaseClass0,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.A.A<class G2_C831`2<class BaseClass0,class BaseClass1>>(!!0,string)
newobj instance void class G2_C831`2<class BaseClass1,class BaseClass0>::.ctor()
stloc.0
ldloc.0
ldstr "G2_C831::Method4.12643()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G1_C15.T.T<class BaseClass1,class BaseClass1,class G2_C831`2<class BaseClass1,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G1_C15.B.T<class BaseClass1,class G2_C831`2<class BaseClass1,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G1_C15.B.B<class G2_C831`2<class BaseClass1,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C831`2<class BaseClass1,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.B.T<class BaseClass1,class G2_C831`2<class BaseClass1,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.B.B<class G2_C831`2<class BaseClass1,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.T<class BaseClass0,class G2_C831`2<class BaseClass1,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.A<class G2_C831`2<class BaseClass1,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C831`2<class BaseClass1,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.A.T<class BaseClass1,class G2_C831`2<class BaseClass1,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.A.B<class G2_C831`2<class BaseClass1,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G2_C831::ClassMethod3042.12649()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G2_C831.T.T<class BaseClass1,class BaseClass0,class G2_C831`2<class BaseClass1,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G2_C831::ClassMethod3042.12649()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G2_C831.B.T<class BaseClass0,class G2_C831`2<class BaseClass1,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G2_C831::ClassMethod3042.12649()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G2_C831.B.A<class G2_C831`2<class BaseClass1,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass1,class BaseClass0,class G2_C831`2<class BaseClass1,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.B.T<class BaseClass0,class G2_C831`2<class BaseClass1,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.B.A<class G2_C831`2<class BaseClass1,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.T<class BaseClass1,class G2_C831`2<class BaseClass1,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.B<class G2_C831`2<class BaseClass1,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G2_C831`2<class BaseClass1,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.A.T<class BaseClass0,class G2_C831`2<class BaseClass1,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.A.A<class G2_C831`2<class BaseClass1,class BaseClass0>>(!!0,string)
newobj instance void class G2_C831`2<class BaseClass1,class BaseClass1>::.ctor()
stloc.0
ldloc.0
ldstr "G2_C831::Method4.12643()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G1_C15.T.T<class BaseClass1,class BaseClass1,class G2_C831`2<class BaseClass1,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G1_C15.B.T<class BaseClass1,class G2_C831`2<class BaseClass1,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G1_C15.B.B<class G2_C831`2<class BaseClass1,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C831`2<class BaseClass1,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.B.T<class BaseClass1,class G2_C831`2<class BaseClass1,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.B.B<class G2_C831`2<class BaseClass1,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.T<class BaseClass0,class G2_C831`2<class BaseClass1,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.A<class G2_C831`2<class BaseClass1,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C831`2<class BaseClass1,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.A.T<class BaseClass1,class G2_C831`2<class BaseClass1,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.A.B<class G2_C831`2<class BaseClass1,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G2_C831::ClassMethod3042.12649()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G2_C831.T.T<class BaseClass1,class BaseClass1,class G2_C831`2<class BaseClass1,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G2_C831::ClassMethod3042.12649()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G2_C831.B.T<class BaseClass1,class G2_C831`2<class BaseClass1,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G2_C831::ClassMethod3042.12649()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G2_C831.B.B<class G2_C831`2<class BaseClass1,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass1,class BaseClass0,class G2_C831`2<class BaseClass1,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.B.T<class BaseClass0,class G2_C831`2<class BaseClass1,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.B.A<class G2_C831`2<class BaseClass1,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.T<class BaseClass1,class G2_C831`2<class BaseClass1,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.B<class G2_C831`2<class BaseClass1,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G2_C831`2<class BaseClass1,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.A.T<class BaseClass0,class G2_C831`2<class BaseClass1,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.A.A<class G2_C831`2<class BaseClass1,class BaseClass1>>(!!0,string)
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static void StructConstrainedInterfaceCallsTest() cil managed
{
.maxstack 10
ldstr "===================== Struct Constrained Interface Calls Test ====================="
call void [mscorlib]System.Console::WriteLine(string)
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static void CalliTest() cil managed
{
.maxstack 10
.locals init (object V_0)
ldstr "========================== Method Calli Test =========================="
call void [mscorlib]System.Console::WriteLine(string)
newobj instance void class G3_C1911`1<class BaseClass0>::.ctor()
stloc.0
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method6<object>()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G1_C15::Method6.4891<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method5()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G1_C15::Method5.4889()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method4()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G2_C831::Method4.12643()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass0>::ClassMethod3042()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G3_C1911::ClassMethod3042.MI.18673()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass0>::ClassMethod3041()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G2_C831::ClassMethod3041.12648()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass0>::Method6<object>()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G2_C831::Method6.12646<System.Object>()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass0>::Method5()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G2_C831::Method5.12644()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass0>::Method4()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G2_C831::Method4.12643()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass0>::Method7<object>()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass1> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass1> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1911`1<class BaseClass0>
ldloc.0
ldvirtftn instance string class G3_C1911`1<class BaseClass0>::ClassMethod5216()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G3_C1911::ClassMethod5216.18672()"
ldstr "class G3_C1911`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1911`1<class BaseClass0>
ldloc.0
ldvirtftn instance string class G3_C1911`1<class BaseClass0>::ClassMethod5215()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G3_C1911::ClassMethod5215.18671()"
ldstr "class G3_C1911`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1911`1<class BaseClass0>
ldloc.0
ldvirtftn instance string class G3_C1911`1<class BaseClass0>::Method7<object>()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class G3_C1911`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1911`1<class BaseClass0>
ldloc.0
ldvirtftn instance string class G3_C1911`1<class BaseClass0>::ClassMethod3042()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G3_C1911::ClassMethod3042.MI.18673()"
ldstr "class G3_C1911`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1911`1<class BaseClass0>
ldloc.0
ldvirtftn instance string class G3_C1911`1<class BaseClass0>::ClassMethod3041()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G2_C831::ClassMethod3041.12648()"
ldstr "class G3_C1911`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1911`1<class BaseClass0>
ldloc.0
ldvirtftn instance string class G3_C1911`1<class BaseClass0>::Method6<object>()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G2_C831::Method6.12646<System.Object>()"
ldstr "class G3_C1911`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1911`1<class BaseClass0>
ldloc.0
ldvirtftn instance string class G3_C1911`1<class BaseClass0>::Method5()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G2_C831::Method5.12644()"
ldstr "class G3_C1911`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1911`1<class BaseClass0>
ldloc.0
ldvirtftn instance string class G3_C1911`1<class BaseClass0>::Method4()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G2_C831::Method4.12643()"
ldstr "class G3_C1911`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
newobj instance void class G3_C1911`1<class BaseClass1>::.ctor()
stloc.0
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method6<object>()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G1_C15::Method6.4891<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method5()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G1_C15::Method5.4889()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method4()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G2_C831::Method4.12643()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass1>::ClassMethod3042()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G3_C1911::ClassMethod3042.MI.18673()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass1>::ClassMethod3041()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G2_C831::ClassMethod3041.12648()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass1>::Method6<object>()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G2_C831::Method6.12646<System.Object>()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass1>::Method5()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G2_C831::Method5.12644()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass1>::Method4()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G2_C831::Method4.12643()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1911`1<class BaseClass1>
ldloc.0
ldvirtftn instance string class G3_C1911`1<class BaseClass1>::ClassMethod5216()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G3_C1911::ClassMethod5216.18672()"
ldstr "class G3_C1911`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1911`1<class BaseClass1>
ldloc.0
ldvirtftn instance string class G3_C1911`1<class BaseClass1>::ClassMethod5215()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G3_C1911::ClassMethod5215.18671()"
ldstr "class G3_C1911`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1911`1<class BaseClass1>
ldloc.0
ldvirtftn instance string class G3_C1911`1<class BaseClass1>::Method7<object>()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class G3_C1911`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1911`1<class BaseClass1>
ldloc.0
ldvirtftn instance string class G3_C1911`1<class BaseClass1>::ClassMethod3042()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G3_C1911::ClassMethod3042.MI.18673()"
ldstr "class G3_C1911`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1911`1<class BaseClass1>
ldloc.0
ldvirtftn instance string class G3_C1911`1<class BaseClass1>::ClassMethod3041()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G2_C831::ClassMethod3041.12648()"
ldstr "class G3_C1911`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1911`1<class BaseClass1>
ldloc.0
ldvirtftn instance string class G3_C1911`1<class BaseClass1>::Method6<object>()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G2_C831::Method6.12646<System.Object>()"
ldstr "class G3_C1911`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1911`1<class BaseClass1>
ldloc.0
ldvirtftn instance string class G3_C1911`1<class BaseClass1>::Method5()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G2_C831::Method5.12644()"
ldstr "class G3_C1911`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1911`1<class BaseClass1>
ldloc.0
ldvirtftn instance string class G3_C1911`1<class BaseClass1>::Method4()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G2_C831::Method4.12643()"
ldstr "class G3_C1911`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
newobj instance void class G2_C831`2<class BaseClass0,class BaseClass0>::.ctor()
stloc.0
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method6<object>()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass0>)
ldstr "G1_C15::Method6.4891<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method5()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass0>)
ldstr "G1_C15::Method5.4889()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method4()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C831::Method4.12643()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass0,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass0,class BaseClass0>::ClassMethod3042()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C831::ClassMethod3042.12649()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass0,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass0,class BaseClass0>::ClassMethod3041()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C831::ClassMethod3041.12648()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass0,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass0,class BaseClass0>::Method6<object>()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C831::Method6.12646<System.Object>()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass0,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass0,class BaseClass0>::Method5()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C831::Method5.12644()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass0,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass0,class BaseClass0>::Method4()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C831::Method4.12643()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass0,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass0,class BaseClass0>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
newobj instance void class G2_C831`2<class BaseClass0,class BaseClass1>::.ctor()
stloc.0
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method6<object>()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass1>)
ldstr "G1_C15::Method6.4891<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method5()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass1>)
ldstr "G1_C15::Method5.4889()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method4()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C831::Method4.12643()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass0,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass0,class BaseClass1>::ClassMethod3042()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C831::ClassMethod3042.12649()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass0,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass0,class BaseClass1>::ClassMethod3041()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C831::ClassMethod3041.12648()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass0,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass0,class BaseClass1>::Method6<object>()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C831::Method6.12646<System.Object>()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass0,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass0,class BaseClass1>::Method5()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C831::Method5.12644()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass0,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass0,class BaseClass1>::Method4()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C831::Method4.12643()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass0,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
newobj instance void class G2_C831`2<class BaseClass1,class BaseClass0>::.ctor()
stloc.0
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method6<object>()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass0>)
ldstr "G1_C15::Method6.4891<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method5()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass0>)
ldstr "G1_C15::Method5.4889()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method4()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C831::Method4.12643()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass0>::ClassMethod3042()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C831::ClassMethod3042.12649()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass0>::ClassMethod3041()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C831::ClassMethod3041.12648()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass0>::Method6<object>()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C831::Method6.12646<System.Object>()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass0>::Method5()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C831::Method5.12644()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass0>::Method4()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C831::Method4.12643()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass0>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
newobj instance void class G2_C831`2<class BaseClass1,class BaseClass1>::.ctor()
stloc.0
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method6<object>()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass1>)
ldstr "G1_C15::Method6.4891<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method5()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass1>)
ldstr "G1_C15::Method5.4889()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method4()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C831::Method4.12643()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass1>::ClassMethod3042()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C831::ClassMethod3042.12649()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass1>::ClassMethod3041()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C831::ClassMethod3041.12648()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass1>::Method6<object>()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C831::Method6.12646<System.Object>()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass1>::Method5()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C831::Method5.12644()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass1>::Method4()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C831::Method4.12643()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static int32 Main() cil managed
{
.custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = (
01 00 00 00
)
.entrypoint
.maxstack 10
call void Generated1439::MethodCallingTest()
call void Generated1439::ConstrainedCallsTest()
call void Generated1439::StructConstrainedInterfaceCallsTest()
call void Generated1439::CalliTest()
ldc.i4 100
ret
}
}
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
.assembly extern mscorlib { .publickeytoken = (B7 7A 5C 56 19 34 E0 89 ) .ver 4:0:0:0 }
.assembly extern TestFramework { .publickeytoken = ( B0 3F 5F 7F 11 D5 0A 3A ) }
//TYPES IN FORWARDER ASSEMBLIES:
//TEST ASSEMBLY:
.assembly Generated1439 { .hash algorithm 0x00008004 }
.assembly extern xunit.core {}
.class public BaseClass0
{
.method public hidebysig specialname rtspecialname instance void .ctor() cil managed {
ldarg.0
call instance void [mscorlib]System.Object::.ctor()
ret
}
}
.class public BaseClass1
extends BaseClass0
{
.method public hidebysig specialname rtspecialname instance void .ctor() cil managed {
ldarg.0
call instance void BaseClass0::.ctor()
ret
}
}
.class public G3_C1911`1<T0>
extends class G2_C831`2<class BaseClass1,!T0>
implements class IBase2`2<!T0,class BaseClass0>
{
.method public hidebysig virtual instance string Method7<M0>() cil managed noinlining {
ldstr "G3_C1911::Method7.18670<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string ClassMethod5215() cil managed noinlining {
ldstr "G3_C1911::ClassMethod5215.18671()"
ret
}
.method public hidebysig newslot virtual instance string ClassMethod5216() cil managed noinlining {
ldstr "G3_C1911::ClassMethod5216.18672()"
ret
}
.method public hidebysig newslot virtual instance string 'G2_C831<class BaseClass1,T0>.ClassMethod3042'() cil managed noinlining {
.override method instance string class G2_C831`2<class BaseClass1,!T0>::ClassMethod3042()
ldstr "G3_C1911::ClassMethod3042.MI.18673()"
ret
}
.method public hidebysig specialname rtspecialname instance void .ctor() cil managed {
ldarg.0
call instance void class G2_C831`2<class BaseClass1,!T0>::.ctor()
ret
}
}
.class public G2_C831`2<T0, T1>
extends class G1_C15`2<class BaseClass1,class BaseClass1>
implements class IBase2`2<class BaseClass1,class BaseClass0>, class IBase1`1<class BaseClass1>
{
.method public hidebysig virtual instance string Method7<M0>() cil managed noinlining {
ldstr "G2_C831::Method7.12642<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig virtual instance string Method4() cil managed noinlining {
ldstr "G2_C831::Method4.12643()"
ret
}
.method public hidebysig newslot virtual instance string Method5() cil managed noinlining {
ldstr "G2_C831::Method5.12644()"
ret
}
.method public hidebysig newslot virtual instance string 'IBase1<class BaseClass1>.Method5'() cil managed noinlining {
.override method instance string class IBase1`1<class BaseClass1>::Method5()
ldstr "G2_C831::Method5.MI.12645()"
ret
}
.method public hidebysig newslot virtual instance string Method6<M0>() cil managed noinlining {
ldstr "G2_C831::Method6.12646<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string 'IBase1<class BaseClass1>.Method6'<M0>() cil managed noinlining {
.override method instance string class IBase1`1<class BaseClass1>::Method6<[1]>()
ldstr "G2_C831::Method6.MI.12647<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string ClassMethod3041() cil managed noinlining {
ldstr "G2_C831::ClassMethod3041.12648()"
ret
}
.method public hidebysig newslot virtual instance string ClassMethod3042() cil managed noinlining {
ldstr "G2_C831::ClassMethod3042.12649()"
ret
}
.method public hidebysig specialname rtspecialname instance void .ctor() cil managed {
ldarg.0
call instance void class G1_C15`2<class BaseClass1,class BaseClass1>::.ctor()
ret
}
}
.class interface public abstract IBase2`2<+T0, -T1>
{
.method public hidebysig newslot abstract virtual instance string Method7<M0>() cil managed { }
}
.class public abstract G1_C15`2<T0, T1>
implements class IBase2`2<!T1,!T1>, class IBase1`1<class BaseClass0>
{
.method public hidebysig newslot virtual instance string Method7<M0>() cil managed noinlining {
ldstr "G1_C15::Method7.4885<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string 'IBase2<T1,T1>.Method7'<M0>() cil managed noinlining {
.override method instance string class IBase2`2<!T1,!T1>::Method7<[1]>()
ldstr "G1_C15::Method7.MI.4886<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig virtual instance string Method4() cil managed noinlining {
ldstr "G1_C15::Method4.4887()"
ret
}
.method public hidebysig newslot virtual instance string 'IBase1<class BaseClass0>.Method4'() cil managed noinlining {
.override method instance string class IBase1`1<class BaseClass0>::Method4()
ldstr "G1_C15::Method4.MI.4888()"
ret
}
.method public hidebysig virtual instance string Method5() cil managed noinlining {
ldstr "G1_C15::Method5.4889()"
ret
}
.method public hidebysig newslot virtual instance string 'IBase1<class BaseClass0>.Method5'() cil managed noinlining {
.override method instance string class IBase1`1<class BaseClass0>::Method5()
ldstr "G1_C15::Method5.MI.4890()"
ret
}
.method public hidebysig virtual instance string Method6<M0>() cil managed noinlining {
ldstr "G1_C15::Method6.4891<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string 'IBase1<class BaseClass0>.Method6'<M0>() cil managed noinlining {
.override method instance string class IBase1`1<class BaseClass0>::Method6<[1]>()
ldstr "G1_C15::Method6.MI.4892<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig specialname rtspecialname instance void .ctor() cil managed {
ldarg.0
call instance void [mscorlib]System.Object::.ctor()
ret
}
}
.class interface public abstract IBase1`1<+T0>
{
.method public hidebysig newslot abstract virtual instance string Method4() cil managed { }
.method public hidebysig newslot abstract virtual instance string Method5() cil managed { }
.method public hidebysig newslot abstract virtual instance string Method6<M0>() cil managed { }
}
.class public auto ansi beforefieldinit Generated1439 {
.method static void M.BaseClass0<(BaseClass0)W>(!!W inst, string exp) cil managed {
.maxstack 5
.locals init (string[] actualResults)
ldc.i4.s 0
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.BaseClass0<(BaseClass0)W>(!!W inst, string exp)"
ldc.i4.s 0
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.BaseClass1<(BaseClass1)W>(!!W inst, string exp) cil managed {
.maxstack 5
.locals init (string[] actualResults)
ldc.i4.s 0
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.BaseClass1<(BaseClass1)W>(!!W inst, string exp)"
ldc.i4.s 0
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G3_C1911.T<T0,(class G3_C1911`1<!!T0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 13
.locals init (string[] actualResults)
ldc.i4.s 8
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G3_C1911.T<T0,(class G3_C1911`1<!!T0>)W>(!!W 'inst', string exp)"
ldc.i4.s 8
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<!!T0>::ClassMethod3041()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<!!T0>::ClassMethod3042()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<!!T0>::ClassMethod5215()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<!!T0>::ClassMethod5216()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<!!T0>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<!!T0>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<!!T0>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 7
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<!!T0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G3_C1911.A<(class G3_C1911`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 13
.locals init (string[] actualResults)
ldc.i4.s 8
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G3_C1911.A<(class G3_C1911`1<class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 8
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<class BaseClass0>::ClassMethod3041()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<class BaseClass0>::ClassMethod3042()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<class BaseClass0>::ClassMethod5215()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<class BaseClass0>::ClassMethod5216()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<class BaseClass0>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<class BaseClass0>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<class BaseClass0>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 7
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G3_C1911.B<(class G3_C1911`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 13
.locals init (string[] actualResults)
ldc.i4.s 8
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G3_C1911.B<(class G3_C1911`1<class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 8
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<class BaseClass1>::ClassMethod3041()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<class BaseClass1>::ClassMethod3042()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<class BaseClass1>::ClassMethod5215()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<class BaseClass1>::ClassMethod5216()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 6
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 7
ldarga.s 0
constrained. !!W
callvirt instance string class G3_C1911`1<class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G2_C831.T.T<T0,T1,(class G2_C831`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 11
.locals init (string[] actualResults)
ldc.i4.s 6
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G2_C831.T.T<T0,T1,(class G2_C831`2<!!T0,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 6
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<!!T0,!!T1>::ClassMethod3041()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<!!T0,!!T1>::ClassMethod3042()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<!!T0,!!T1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<!!T0,!!T1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<!!T0,!!T1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<!!T0,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G2_C831.A.T<T1,(class G2_C831`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 11
.locals init (string[] actualResults)
ldc.i4.s 6
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G2_C831.A.T<T1,(class G2_C831`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 6
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass0,!!T1>::ClassMethod3041()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass0,!!T1>::ClassMethod3042()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass0,!!T1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass0,!!T1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass0,!!T1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass0,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G2_C831.A.A<(class G2_C831`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 11
.locals init (string[] actualResults)
ldc.i4.s 6
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G2_C831.A.A<(class G2_C831`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 6
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass0>::ClassMethod3041()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass0>::ClassMethod3042()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass0>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass0>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass0>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G2_C831.A.B<(class G2_C831`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 11
.locals init (string[] actualResults)
ldc.i4.s 6
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G2_C831.A.B<(class G2_C831`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 6
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass1>::ClassMethod3041()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass1>::ClassMethod3042()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G2_C831.B.T<T1,(class G2_C831`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 11
.locals init (string[] actualResults)
ldc.i4.s 6
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G2_C831.B.T<T1,(class G2_C831`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 6
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass1,!!T1>::ClassMethod3041()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass1,!!T1>::ClassMethod3042()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass1,!!T1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass1,!!T1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass1,!!T1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass1,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G2_C831.B.A<(class G2_C831`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 11
.locals init (string[] actualResults)
ldc.i4.s 6
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G2_C831.B.A<(class G2_C831`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 6
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass0>::ClassMethod3041()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass0>::ClassMethod3042()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass0>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass0>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass0>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G2_C831.B.B<(class G2_C831`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 11
.locals init (string[] actualResults)
ldc.i4.s 6
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G2_C831.B.B<(class G2_C831`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 6
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass1>::ClassMethod3041()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass1>::ClassMethod3042()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 4
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 5
ldarga.s 0
constrained. !!W
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<!!T0,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass0,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass1,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G1_C15.T.T<T0,T1,(class G1_C15`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 9
.locals init (string[] actualResults)
ldc.i4.s 4
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G1_C15.T.T<T0,T1,(class G1_C15`2<!!T0,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 4
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<!!T0,!!T1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<!!T0,!!T1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<!!T0,!!T1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<!!T0,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G1_C15.A.T<T1,(class G1_C15`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 9
.locals init (string[] actualResults)
ldc.i4.s 4
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G1_C15.A.T<T1,(class G1_C15`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 4
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass0,!!T1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass0,!!T1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass0,!!T1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass0,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G1_C15.A.A<(class G1_C15`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 9
.locals init (string[] actualResults)
ldc.i4.s 4
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G1_C15.A.A<(class G1_C15`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 4
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G1_C15.A.B<(class G1_C15`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 9
.locals init (string[] actualResults)
ldc.i4.s 4
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G1_C15.A.B<(class G1_C15`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 4
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass0,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G1_C15.B.T<T1,(class G1_C15`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 9
.locals init (string[] actualResults)
ldc.i4.s 4
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G1_C15.B.T<T1,(class G1_C15`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 4
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass1,!!T1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass1,!!T1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass1,!!T1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass1,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G1_C15.B.A<(class G1_C15`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 9
.locals init (string[] actualResults)
ldc.i4.s 4
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G1_C15.B.A<(class G1_C15`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 4
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass0>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass0>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass0>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.G1_C15.B.B<(class G1_C15`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 9
.locals init (string[] actualResults)
ldc.i4.s 4
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.G1_C15.B.B<(class G1_C15`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 4
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 3
ldarga.s 0
constrained. !!W
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase1.T<T0,(class IBase1`1<!!T0>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<!!T0>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<!!T0>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<!!T0>::Method6<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase1.A<(class IBase1`1<class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass0>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass0>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 8
.locals init (string[] actualResults)
ldc.i4.s 3
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase1.B<(class IBase1`1<class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 3
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
stelem.ref
ldloc.s actualResults
ldc.i4.s 2
ldarga.s 0
constrained. !!W
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method public hidebysig static void MethodCallingTest() cil managed
{
.maxstack 10
.locals init (object V_0)
ldstr "========================== Method Calling Test =========================="
call void [mscorlib]System.Console::WriteLine(string)
newobj instance void class G3_C1911`1<class BaseClass0>::.ctor()
stloc.0
ldloc.0
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method6<object>()
ldstr "G1_C15::Method6.4891<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method5()
ldstr "G1_C15::Method5.4889()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method5()
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>()
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass0>::ClassMethod3042()
ldstr "G3_C1911::ClassMethod3042.MI.18673()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass0>::ClassMethod3041()
ldstr "G2_C831::ClassMethod3041.12648()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass0>::Method6<object>()
ldstr "G2_C831::Method6.12646<System.Object>()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass0>::Method5()
ldstr "G2_C831::Method5.12644()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass0>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass0>::Method7<object>()
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass1> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass1> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
castclass class G3_C1911`1<class BaseClass0>
callvirt instance string class G3_C1911`1<class BaseClass0>::ClassMethod5216()
ldstr "G3_C1911::ClassMethod5216.18672()"
ldstr "class G3_C1911`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1911`1<class BaseClass0>
callvirt instance string class G3_C1911`1<class BaseClass0>::ClassMethod5215()
ldstr "G3_C1911::ClassMethod5215.18671()"
ldstr "class G3_C1911`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1911`1<class BaseClass0>
callvirt instance string class G3_C1911`1<class BaseClass0>::Method7<object>()
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class G3_C1911`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1911`1<class BaseClass0>
callvirt instance string class G3_C1911`1<class BaseClass0>::ClassMethod3042()
ldstr "G3_C1911::ClassMethod3042.MI.18673()"
ldstr "class G3_C1911`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1911`1<class BaseClass0>
callvirt instance string class G3_C1911`1<class BaseClass0>::ClassMethod3041()
ldstr "G2_C831::ClassMethod3041.12648()"
ldstr "class G3_C1911`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1911`1<class BaseClass0>
callvirt instance string class G3_C1911`1<class BaseClass0>::Method6<object>()
ldstr "G2_C831::Method6.12646<System.Object>()"
ldstr "class G3_C1911`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1911`1<class BaseClass0>
callvirt instance string class G3_C1911`1<class BaseClass0>::Method5()
ldstr "G2_C831::Method5.12644()"
ldstr "class G3_C1911`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1911`1<class BaseClass0>
callvirt instance string class G3_C1911`1<class BaseClass0>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class G3_C1911`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
newobj instance void class G3_C1911`1<class BaseClass1>::.ctor()
stloc.0
ldloc.0
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method6<object>()
ldstr "G1_C15::Method6.4891<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method5()
ldstr "G1_C15::Method5.4889()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method5()
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>()
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass1>::ClassMethod3042()
ldstr "G3_C1911::ClassMethod3042.MI.18673()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass1>::ClassMethod3041()
ldstr "G2_C831::ClassMethod3041.12648()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass1>::Method6<object>()
ldstr "G2_C831::Method6.12646<System.Object>()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass1>::Method5()
ldstr "G2_C831::Method5.12644()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass1>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
castclass class G3_C1911`1<class BaseClass1>
callvirt instance string class G3_C1911`1<class BaseClass1>::ClassMethod5216()
ldstr "G3_C1911::ClassMethod5216.18672()"
ldstr "class G3_C1911`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1911`1<class BaseClass1>
callvirt instance string class G3_C1911`1<class BaseClass1>::ClassMethod5215()
ldstr "G3_C1911::ClassMethod5215.18671()"
ldstr "class G3_C1911`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1911`1<class BaseClass1>
callvirt instance string class G3_C1911`1<class BaseClass1>::Method7<object>()
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class G3_C1911`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1911`1<class BaseClass1>
callvirt instance string class G3_C1911`1<class BaseClass1>::ClassMethod3042()
ldstr "G3_C1911::ClassMethod3042.MI.18673()"
ldstr "class G3_C1911`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1911`1<class BaseClass1>
callvirt instance string class G3_C1911`1<class BaseClass1>::ClassMethod3041()
ldstr "G2_C831::ClassMethod3041.12648()"
ldstr "class G3_C1911`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1911`1<class BaseClass1>
callvirt instance string class G3_C1911`1<class BaseClass1>::Method6<object>()
ldstr "G2_C831::Method6.12646<System.Object>()"
ldstr "class G3_C1911`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1911`1<class BaseClass1>
callvirt instance string class G3_C1911`1<class BaseClass1>::Method5()
ldstr "G2_C831::Method5.12644()"
ldstr "class G3_C1911`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G3_C1911`1<class BaseClass1>
callvirt instance string class G3_C1911`1<class BaseClass1>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class G3_C1911`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
newobj instance void class G2_C831`2<class BaseClass0,class BaseClass0>::.ctor()
stloc.0
ldloc.0
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method6<object>()
ldstr "G1_C15::Method6.4891<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method5()
ldstr "G1_C15::Method5.4889()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method5()
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>()
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
castclass class G2_C831`2<class BaseClass0,class BaseClass0>
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass0>::ClassMethod3042()
ldstr "G2_C831::ClassMethod3042.12649()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass0,class BaseClass0>
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass0>::ClassMethod3041()
ldstr "G2_C831::ClassMethod3041.12648()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass0,class BaseClass0>
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass0>::Method6<object>()
ldstr "G2_C831::Method6.12646<System.Object>()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass0,class BaseClass0>
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass0>::Method5()
ldstr "G2_C831::Method5.12644()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass0,class BaseClass0>
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass0>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass0,class BaseClass0>
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass0>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
newobj instance void class G2_C831`2<class BaseClass0,class BaseClass1>::.ctor()
stloc.0
ldloc.0
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method6<object>()
ldstr "G1_C15::Method6.4891<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method5()
ldstr "G1_C15::Method5.4889()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method5()
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>()
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
castclass class G2_C831`2<class BaseClass0,class BaseClass1>
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass1>::ClassMethod3042()
ldstr "G2_C831::ClassMethod3042.12649()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass0,class BaseClass1>
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass1>::ClassMethod3041()
ldstr "G2_C831::ClassMethod3041.12648()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass0,class BaseClass1>
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass1>::Method6<object>()
ldstr "G2_C831::Method6.12646<System.Object>()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass0,class BaseClass1>
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass1>::Method5()
ldstr "G2_C831::Method5.12644()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass0,class BaseClass1>
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass1>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass0,class BaseClass1>
callvirt instance string class G2_C831`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
newobj instance void class G2_C831`2<class BaseClass1,class BaseClass0>::.ctor()
stloc.0
ldloc.0
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method6<object>()
ldstr "G1_C15::Method6.4891<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method5()
ldstr "G1_C15::Method5.4889()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method5()
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>()
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass0>::ClassMethod3042()
ldstr "G2_C831::ClassMethod3042.12649()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass0>::ClassMethod3041()
ldstr "G2_C831::ClassMethod3041.12648()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass0>::Method6<object>()
ldstr "G2_C831::Method6.12646<System.Object>()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass0>::Method5()
ldstr "G2_C831::Method5.12644()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass0>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass0>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
newobj instance void class G2_C831`2<class BaseClass1,class BaseClass1>::.ctor()
stloc.0
ldloc.0
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method6<object>()
ldstr "G1_C15::Method6.4891<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method5()
ldstr "G1_C15::Method5.4889()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
callvirt instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method5()
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass0>::Method6<object>()
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass1>::ClassMethod3042()
ldstr "G2_C831::ClassMethod3042.12649()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass1>::ClassMethod3041()
ldstr "G2_C831::ClassMethod3041.12648()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass1>::Method6<object>()
ldstr "G2_C831::Method6.12646<System.Object>()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass1>::Method5()
ldstr "G2_C831::Method5.12644()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass1>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
callvirt instance string class G2_C831`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method4()
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method5()
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
callvirt instance string class IBase1`1<class BaseClass1>::Method6<object>()
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc.0
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static void ConstrainedCallsTest() cil managed
{
.maxstack 10
.locals init (object V_0)
ldstr "========================== Constrained Calls Test =========================="
call void [mscorlib]System.Console::WriteLine(string)
newobj instance void class G3_C1911`1<class BaseClass0>::.ctor()
stloc.0
ldloc.0
ldstr "G2_C831::Method4.12643()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.G1_C15.T.T<class BaseClass1,class BaseClass1,class G3_C1911`1<class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.G1_C15.B.T<class BaseClass1,class G3_C1911`1<class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.G1_C15.B.B<class G3_C1911`1<class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G3_C1911`1<class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.B.T<class BaseClass1,class G3_C1911`1<class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.B.B<class G3_C1911`1<class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.T<class BaseClass0,class G3_C1911`1<class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.A<class G3_C1911`1<class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G3_C1911`1<class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.A.T<class BaseClass1,class G3_C1911`1<class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.A.B<class G3_C1911`1<class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G3_C1911::ClassMethod3042.MI.18673()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.G2_C831.T.T<class BaseClass1,class BaseClass0,class G3_C1911`1<class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G3_C1911::ClassMethod3042.MI.18673()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.G2_C831.B.T<class BaseClass0,class G3_C1911`1<class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G3_C1911::ClassMethod3042.MI.18673()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.G2_C831.B.A<class G3_C1911`1<class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass1,class BaseClass0,class G3_C1911`1<class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.B.T<class BaseClass0,class G3_C1911`1<class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.B.A<class G3_C1911`1<class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.T<class BaseClass1,class G3_C1911`1<class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.B<class G3_C1911`1<class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G3_C1911`1<class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.A.T<class BaseClass0,class G3_C1911`1<class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.A.A<class G3_C1911`1<class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G3_C1911::ClassMethod3042.MI.18673()#G3_C1911::ClassMethod5215.18671()#G3_C1911::ClassMethod5216.18672()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.G3_C1911.T<class BaseClass0,class G3_C1911`1<class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G3_C1911::ClassMethod3042.MI.18673()#G3_C1911::ClassMethod5215.18671()#G3_C1911::ClassMethod5216.18672()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.G3_C1911.A<class G3_C1911`1<class BaseClass0>>(!!0,string)
newobj instance void class G3_C1911`1<class BaseClass1>::.ctor()
stloc.0
ldloc.0
ldstr "G2_C831::Method4.12643()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.G1_C15.T.T<class BaseClass1,class BaseClass1,class G3_C1911`1<class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.G1_C15.B.T<class BaseClass1,class G3_C1911`1<class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.G1_C15.B.B<class G3_C1911`1<class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G3_C1911`1<class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.B.T<class BaseClass1,class G3_C1911`1<class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.B.B<class G3_C1911`1<class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.T<class BaseClass0,class G3_C1911`1<class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.A<class G3_C1911`1<class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G3_C1911`1<class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.A.T<class BaseClass1,class G3_C1911`1<class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.A.B<class G3_C1911`1<class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G3_C1911::ClassMethod3042.MI.18673()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.G2_C831.T.T<class BaseClass1,class BaseClass1,class G3_C1911`1<class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G3_C1911::ClassMethod3042.MI.18673()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.G2_C831.B.T<class BaseClass1,class G3_C1911`1<class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G3_C1911::ClassMethod3042.MI.18673()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.G2_C831.B.B<class G3_C1911`1<class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass1,class BaseClass0,class G3_C1911`1<class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.B.T<class BaseClass0,class G3_C1911`1<class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.B.A<class G3_C1911`1<class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.T<class BaseClass1,class G3_C1911`1<class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.B<class G3_C1911`1<class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G3_C1911`1<class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.A.T<class BaseClass0,class G3_C1911`1<class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.IBase2.A.A<class G3_C1911`1<class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G3_C1911::ClassMethod3042.MI.18673()#G3_C1911::ClassMethod5215.18671()#G3_C1911::ClassMethod5216.18672()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.G3_C1911.T<class BaseClass1,class G3_C1911`1<class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G3_C1911::ClassMethod3042.MI.18673()#G3_C1911::ClassMethod5215.18671()#G3_C1911::ClassMethod5216.18672()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G3_C1911::Method7.18670<System.Object>()#"
call void Generated1439::M.G3_C1911.B<class G3_C1911`1<class BaseClass1>>(!!0,string)
newobj instance void class G2_C831`2<class BaseClass0,class BaseClass0>::.ctor()
stloc.0
ldloc.0
ldstr "G2_C831::Method4.12643()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G1_C15.T.T<class BaseClass1,class BaseClass1,class G2_C831`2<class BaseClass0,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G1_C15.B.T<class BaseClass1,class G2_C831`2<class BaseClass0,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G1_C15.B.B<class G2_C831`2<class BaseClass0,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C831`2<class BaseClass0,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.B.T<class BaseClass1,class G2_C831`2<class BaseClass0,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.B.B<class G2_C831`2<class BaseClass0,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.T<class BaseClass0,class G2_C831`2<class BaseClass0,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.A<class G2_C831`2<class BaseClass0,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C831`2<class BaseClass0,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.A.T<class BaseClass1,class G2_C831`2<class BaseClass0,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.A.B<class G2_C831`2<class BaseClass0,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G2_C831::ClassMethod3042.12649()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G2_C831.T.T<class BaseClass0,class BaseClass0,class G2_C831`2<class BaseClass0,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G2_C831::ClassMethod3042.12649()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G2_C831.A.T<class BaseClass0,class G2_C831`2<class BaseClass0,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G2_C831::ClassMethod3042.12649()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G2_C831.A.A<class G2_C831`2<class BaseClass0,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass1,class BaseClass0,class G2_C831`2<class BaseClass0,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.B.T<class BaseClass0,class G2_C831`2<class BaseClass0,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.B.A<class G2_C831`2<class BaseClass0,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.T<class BaseClass1,class G2_C831`2<class BaseClass0,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.B<class G2_C831`2<class BaseClass0,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G2_C831`2<class BaseClass0,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.A.T<class BaseClass0,class G2_C831`2<class BaseClass0,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.A.A<class G2_C831`2<class BaseClass0,class BaseClass0>>(!!0,string)
newobj instance void class G2_C831`2<class BaseClass0,class BaseClass1>::.ctor()
stloc.0
ldloc.0
ldstr "G2_C831::Method4.12643()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G1_C15.T.T<class BaseClass1,class BaseClass1,class G2_C831`2<class BaseClass0,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G1_C15.B.T<class BaseClass1,class G2_C831`2<class BaseClass0,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G1_C15.B.B<class G2_C831`2<class BaseClass0,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C831`2<class BaseClass0,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.B.T<class BaseClass1,class G2_C831`2<class BaseClass0,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.B.B<class G2_C831`2<class BaseClass0,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.T<class BaseClass0,class G2_C831`2<class BaseClass0,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.A<class G2_C831`2<class BaseClass0,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C831`2<class BaseClass0,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.A.T<class BaseClass1,class G2_C831`2<class BaseClass0,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.A.B<class G2_C831`2<class BaseClass0,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G2_C831::ClassMethod3042.12649()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G2_C831.T.T<class BaseClass0,class BaseClass1,class G2_C831`2<class BaseClass0,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G2_C831::ClassMethod3042.12649()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G2_C831.A.T<class BaseClass1,class G2_C831`2<class BaseClass0,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G2_C831::ClassMethod3042.12649()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G2_C831.A.B<class G2_C831`2<class BaseClass0,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass1,class BaseClass0,class G2_C831`2<class BaseClass0,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.B.T<class BaseClass0,class G2_C831`2<class BaseClass0,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.B.A<class G2_C831`2<class BaseClass0,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.T<class BaseClass1,class G2_C831`2<class BaseClass0,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.B<class G2_C831`2<class BaseClass0,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G2_C831`2<class BaseClass0,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.A.T<class BaseClass0,class G2_C831`2<class BaseClass0,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.A.A<class G2_C831`2<class BaseClass0,class BaseClass1>>(!!0,string)
newobj instance void class G2_C831`2<class BaseClass1,class BaseClass0>::.ctor()
stloc.0
ldloc.0
ldstr "G2_C831::Method4.12643()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G1_C15.T.T<class BaseClass1,class BaseClass1,class G2_C831`2<class BaseClass1,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G1_C15.B.T<class BaseClass1,class G2_C831`2<class BaseClass1,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G1_C15.B.B<class G2_C831`2<class BaseClass1,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C831`2<class BaseClass1,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.B.T<class BaseClass1,class G2_C831`2<class BaseClass1,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.B.B<class G2_C831`2<class BaseClass1,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.T<class BaseClass0,class G2_C831`2<class BaseClass1,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.A<class G2_C831`2<class BaseClass1,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C831`2<class BaseClass1,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.A.T<class BaseClass1,class G2_C831`2<class BaseClass1,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.A.B<class G2_C831`2<class BaseClass1,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G2_C831::ClassMethod3042.12649()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G2_C831.T.T<class BaseClass1,class BaseClass0,class G2_C831`2<class BaseClass1,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G2_C831::ClassMethod3042.12649()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G2_C831.B.T<class BaseClass0,class G2_C831`2<class BaseClass1,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G2_C831::ClassMethod3042.12649()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G2_C831.B.A<class G2_C831`2<class BaseClass1,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass1,class BaseClass0,class G2_C831`2<class BaseClass1,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.B.T<class BaseClass0,class G2_C831`2<class BaseClass1,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.B.A<class G2_C831`2<class BaseClass1,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.T<class BaseClass1,class G2_C831`2<class BaseClass1,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.B<class G2_C831`2<class BaseClass1,class BaseClass0>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G2_C831`2<class BaseClass1,class BaseClass0>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.A.T<class BaseClass0,class G2_C831`2<class BaseClass1,class BaseClass0>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.A.A<class G2_C831`2<class BaseClass1,class BaseClass0>>(!!0,string)
newobj instance void class G2_C831`2<class BaseClass1,class BaseClass1>::.ctor()
stloc.0
ldloc.0
ldstr "G2_C831::Method4.12643()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G1_C15.T.T<class BaseClass1,class BaseClass1,class G2_C831`2<class BaseClass1,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G1_C15.B.T<class BaseClass1,class G2_C831`2<class BaseClass1,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G1_C15::Method5.4889()#G1_C15::Method6.4891<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G1_C15.B.B<class G2_C831`2<class BaseClass1,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass1,class BaseClass1,class G2_C831`2<class BaseClass1,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.B.T<class BaseClass1,class G2_C831`2<class BaseClass1,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.B.B<class G2_C831`2<class BaseClass1,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.T<class BaseClass0,class G2_C831`2<class BaseClass1,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.A<class G2_C831`2<class BaseClass1,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass0,class BaseClass1,class G2_C831`2<class BaseClass1,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.A.T<class BaseClass1,class G2_C831`2<class BaseClass1,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.A.B<class G2_C831`2<class BaseClass1,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G2_C831::ClassMethod3042.12649()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G2_C831.T.T<class BaseClass1,class BaseClass1,class G2_C831`2<class BaseClass1,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G2_C831::ClassMethod3042.12649()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G2_C831.B.T<class BaseClass1,class G2_C831`2<class BaseClass1,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C831::ClassMethod3041.12648()#G2_C831::ClassMethod3042.12649()#G2_C831::Method4.12643()#G2_C831::Method5.12644()#G2_C831::Method6.12646<System.Object>()#G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.G2_C831.B.B<class G2_C831`2<class BaseClass1,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass1,class BaseClass0,class G2_C831`2<class BaseClass1,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.B.T<class BaseClass0,class G2_C831`2<class BaseClass1,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.B.A<class G2_C831`2<class BaseClass1,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.T<class BaseClass1,class G2_C831`2<class BaseClass1,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method4.12643()#G2_C831::Method5.MI.12645()#G2_C831::Method6.MI.12647<System.Object>()#"
call void Generated1439::M.IBase1.B<class G2_C831`2<class BaseClass1,class BaseClass1>>(!!0,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.T.T<class BaseClass0,class BaseClass0,class G2_C831`2<class BaseClass1,class BaseClass1>>(!!2,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.A.T<class BaseClass0,class G2_C831`2<class BaseClass1,class BaseClass1>>(!!1,string)
ldloc.0
ldstr "G2_C831::Method7.12642<System.Object>()#"
call void Generated1439::M.IBase2.A.A<class G2_C831`2<class BaseClass1,class BaseClass1>>(!!0,string)
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static void StructConstrainedInterfaceCallsTest() cil managed
{
.maxstack 10
ldstr "===================== Struct Constrained Interface Calls Test ====================="
call void [mscorlib]System.Console::WriteLine(string)
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static void CalliTest() cil managed
{
.maxstack 10
.locals init (object V_0)
ldstr "========================== Method Calli Test =========================="
call void [mscorlib]System.Console::WriteLine(string)
newobj instance void class G3_C1911`1<class BaseClass0>::.ctor()
stloc.0
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method6<object>()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G1_C15::Method6.4891<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method5()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G1_C15::Method5.4889()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method4()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G2_C831::Method4.12643()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass0>::ClassMethod3042()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G3_C1911::ClassMethod3042.MI.18673()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass0>::ClassMethod3041()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G2_C831::ClassMethod3041.12648()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass0>::Method6<object>()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G2_C831::Method6.12646<System.Object>()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass0>::Method5()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G2_C831::Method5.12644()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass0>::Method4()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G2_C831::Method4.12643()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass0>::Method7<object>()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass1> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass1> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1911`1<class BaseClass0>
ldloc.0
ldvirtftn instance string class G3_C1911`1<class BaseClass0>::ClassMethod5216()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G3_C1911::ClassMethod5216.18672()"
ldstr "class G3_C1911`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1911`1<class BaseClass0>
ldloc.0
ldvirtftn instance string class G3_C1911`1<class BaseClass0>::ClassMethod5215()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G3_C1911::ClassMethod5215.18671()"
ldstr "class G3_C1911`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1911`1<class BaseClass0>
ldloc.0
ldvirtftn instance string class G3_C1911`1<class BaseClass0>::Method7<object>()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class G3_C1911`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1911`1<class BaseClass0>
ldloc.0
ldvirtftn instance string class G3_C1911`1<class BaseClass0>::ClassMethod3042()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G3_C1911::ClassMethod3042.MI.18673()"
ldstr "class G3_C1911`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1911`1<class BaseClass0>
ldloc.0
ldvirtftn instance string class G3_C1911`1<class BaseClass0>::ClassMethod3041()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G2_C831::ClassMethod3041.12648()"
ldstr "class G3_C1911`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1911`1<class BaseClass0>
ldloc.0
ldvirtftn instance string class G3_C1911`1<class BaseClass0>::Method6<object>()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G2_C831::Method6.12646<System.Object>()"
ldstr "class G3_C1911`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1911`1<class BaseClass0>
ldloc.0
ldvirtftn instance string class G3_C1911`1<class BaseClass0>::Method5()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G2_C831::Method5.12644()"
ldstr "class G3_C1911`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1911`1<class BaseClass0>
ldloc.0
ldvirtftn instance string class G3_C1911`1<class BaseClass0>::Method4()
calli default string(class G3_C1911`1<class BaseClass0>)
ldstr "G2_C831::Method4.12643()"
ldstr "class G3_C1911`1<class BaseClass0> on type class G3_C1911`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
newobj instance void class G3_C1911`1<class BaseClass1>::.ctor()
stloc.0
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method6<object>()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G1_C15::Method6.4891<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method5()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G1_C15::Method5.4889()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method4()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G2_C831::Method4.12643()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass1>::ClassMethod3042()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G3_C1911::ClassMethod3042.MI.18673()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass1>::ClassMethod3041()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G2_C831::ClassMethod3041.12648()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass1>::Method6<object>()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G2_C831::Method6.12646<System.Object>()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass1>::Method5()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G2_C831::Method5.12644()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass1>::Method4()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G2_C831::Method4.12643()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1911`1<class BaseClass1>
ldloc.0
ldvirtftn instance string class G3_C1911`1<class BaseClass1>::ClassMethod5216()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G3_C1911::ClassMethod5216.18672()"
ldstr "class G3_C1911`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1911`1<class BaseClass1>
ldloc.0
ldvirtftn instance string class G3_C1911`1<class BaseClass1>::ClassMethod5215()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G3_C1911::ClassMethod5215.18671()"
ldstr "class G3_C1911`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1911`1<class BaseClass1>
ldloc.0
ldvirtftn instance string class G3_C1911`1<class BaseClass1>::Method7<object>()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G3_C1911::Method7.18670<System.Object>()"
ldstr "class G3_C1911`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1911`1<class BaseClass1>
ldloc.0
ldvirtftn instance string class G3_C1911`1<class BaseClass1>::ClassMethod3042()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G3_C1911::ClassMethod3042.MI.18673()"
ldstr "class G3_C1911`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1911`1<class BaseClass1>
ldloc.0
ldvirtftn instance string class G3_C1911`1<class BaseClass1>::ClassMethod3041()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G2_C831::ClassMethod3041.12648()"
ldstr "class G3_C1911`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1911`1<class BaseClass1>
ldloc.0
ldvirtftn instance string class G3_C1911`1<class BaseClass1>::Method6<object>()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G2_C831::Method6.12646<System.Object>()"
ldstr "class G3_C1911`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1911`1<class BaseClass1>
ldloc.0
ldvirtftn instance string class G3_C1911`1<class BaseClass1>::Method5()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G2_C831::Method5.12644()"
ldstr "class G3_C1911`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G3_C1911`1<class BaseClass1>
ldloc.0
ldvirtftn instance string class G3_C1911`1<class BaseClass1>::Method4()
calli default string(class G3_C1911`1<class BaseClass1>)
ldstr "G2_C831::Method4.12643()"
ldstr "class G3_C1911`1<class BaseClass1> on type class G3_C1911`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
newobj instance void class G2_C831`2<class BaseClass0,class BaseClass0>::.ctor()
stloc.0
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method6<object>()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass0>)
ldstr "G1_C15::Method6.4891<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method5()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass0>)
ldstr "G1_C15::Method5.4889()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method4()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C831::Method4.12643()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass0,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass0,class BaseClass0>::ClassMethod3042()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C831::ClassMethod3042.12649()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass0,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass0,class BaseClass0>::ClassMethod3041()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C831::ClassMethod3041.12648()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass0,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass0,class BaseClass0>::Method6<object>()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C831::Method6.12646<System.Object>()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass0,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass0,class BaseClass0>::Method5()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C831::Method5.12644()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass0,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass0,class BaseClass0>::Method4()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C831::Method4.12643()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass0,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass0,class BaseClass0>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass0>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
newobj instance void class G2_C831`2<class BaseClass0,class BaseClass1>::.ctor()
stloc.0
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method6<object>()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass1>)
ldstr "G1_C15::Method6.4891<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method5()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass1>)
ldstr "G1_C15::Method5.4889()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method4()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C831::Method4.12643()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass0,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass0,class BaseClass1>::ClassMethod3042()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C831::ClassMethod3042.12649()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass0,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass0,class BaseClass1>::ClassMethod3041()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C831::ClassMethod3041.12648()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass0,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass0,class BaseClass1>::Method6<object>()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C831::Method6.12646<System.Object>()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass0,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass0,class BaseClass1>::Method5()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C831::Method5.12644()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass0,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass0,class BaseClass1>::Method4()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C831::Method4.12643()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass0,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class G2_C831`2<class BaseClass0,class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass0,class BaseClass1>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C831`2<class BaseClass0,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
newobj instance void class G2_C831`2<class BaseClass1,class BaseClass0>::.ctor()
stloc.0
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method6<object>()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass0>)
ldstr "G1_C15::Method6.4891<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method5()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass0>)
ldstr "G1_C15::Method5.4889()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method4()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C831::Method4.12643()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass0>::ClassMethod3042()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C831::ClassMethod3042.12649()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass0>::ClassMethod3041()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C831::ClassMethod3041.12648()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass0>::Method6<object>()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C831::Method6.12646<System.Object>()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass0>::Method5()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C831::Method5.12644()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass0>::Method4()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C831::Method4.12643()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass0>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass0>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass0>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
newobj instance void class G2_C831`2<class BaseClass1,class BaseClass1>::.ctor()
stloc.0
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method6<object>()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass1>)
ldstr "G1_C15::Method6.4891<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method5()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass1>)
ldstr "G1_C15::Method5.4889()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method4()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C831::Method4.12643()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G1_C15`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G1_C15`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class G1_C15`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method4()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method5()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass0>::Method6<object>()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass1>::ClassMethod3042()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C831::ClassMethod3042.12649()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass1>::ClassMethod3041()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C831::ClassMethod3041.12648()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass1>::Method6<object>()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C831::Method6.12646<System.Object>()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass1>::Method5()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C831::Method5.12644()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass1>::Method4()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C831::Method4.12643()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
castclass class G2_C831`2<class BaseClass1,class BaseClass1>
ldloc.0
ldvirtftn instance string class G2_C831`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class G2_C831`2<class BaseClass1,class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method4()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C831::Method4.12643()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method5()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C831::Method5.MI.12645()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase1`1<class BaseClass1>::Method6<object>()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C831::Method6.MI.12647<System.Object>()"
ldstr "class IBase1`1<class BaseClass1> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc.0
ldloc.0
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
calli default string(class G2_C831`2<class BaseClass1,class BaseClass1>)
ldstr "G2_C831::Method7.12642<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type class G2_C831`2<class BaseClass1,class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static int32 Main() cil managed
{
.custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = (
01 00 00 00
)
.entrypoint
.maxstack 10
call void Generated1439::MethodCallingTest()
call void Generated1439::ConstrainedCallsTest()
call void Generated1439::StructConstrainedInterfaceCallsTest()
call void Generated1439::CalliTest()
ldc.i4 100
ret
}
}
| -1 |
dotnet/runtime
| 66,411 |
Arm64: Always use SIMD features
|
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
kunalspathak
| 2022-03-09T21:00:00Z | 2022-03-12T04:35:06Z |
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
|
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
|
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
./src/tests/JIT/Regression/CLR-x86-JIT/V1-M09.5-PDC/b11949/b11949.il
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
.assembly extern legacy library mscorlib {}
.assembly extern System.Console
{
.publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A )
.ver 4:0:0:0
}
.assembly b11949
{
}
.namespace A
{
.class public auto ansi X
{
.method public specialname rtspecialname instance void .ctor() il managed
{
// Code size 7 (0x7)
.maxstack 1
ldarg.0
call instance void [mscorlib]System.Object::.ctor()
ret
} // end of method 'X::.ctor'
} // end of class 'X'
//Global methods
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.method public static int32 main() il managed
{
.maxstack 1
.locals (class A.X px)
.entrypoint
ldnull
stloc.0
try_begin:
newobj instance void [mscorlib]System.Exception::.ctor()
throw
leave.s try_end
filter_begin:
pop
newobj instance void A.X::.ctor()
stloc.0
ldc.i4.1
endfilter
except_begin:
pop
leave.s try_end
try_end:
ldc.i4 100
ret
.try try_begin to filter_begin filter filter_begin handler except_begin to try_end
} // end of global method 'main'
}
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
.assembly extern legacy library mscorlib {}
.assembly extern System.Console
{
.publickeytoken = (B0 3F 5F 7F 11 D5 0A 3A )
.ver 4:0:0:0
}
.assembly b11949
{
}
.namespace A
{
.class public auto ansi X
{
.method public specialname rtspecialname instance void .ctor() il managed
{
// Code size 7 (0x7)
.maxstack 1
ldarg.0
call instance void [mscorlib]System.Object::.ctor()
ret
} // end of method 'X::.ctor'
} // end of class 'X'
//Global methods
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.method public static int32 main() il managed
{
.maxstack 1
.locals (class A.X px)
.entrypoint
ldnull
stloc.0
try_begin:
newobj instance void [mscorlib]System.Exception::.ctor()
throw
leave.s try_end
filter_begin:
pop
newobj instance void A.X::.ctor()
stloc.0
ldc.i4.1
endfilter
except_begin:
pop
leave.s try_end
try_end:
ldc.i4 100
ret
.try try_begin to filter_begin filter filter_begin handler except_begin to try_end
} // end of global method 'main'
}
| -1 |
dotnet/runtime
| 66,411 |
Arm64: Always use SIMD features
|
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
kunalspathak
| 2022-03-09T21:00:00Z | 2022-03-12T04:35:06Z |
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
|
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
|
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
./src/tests/JIT/HardwareIntrinsics/General/Vector64/GreaterThan.Int64.cs
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
namespace JIT.HardwareIntrinsics.General
{
public static partial class Program
{
private static void GreaterThanInt64()
{
var test = new VectorBinaryOpTest__GreaterThanInt64();
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
// Validates passing a static member works
test.RunClsVarScenario();
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
// Validates passing an instance member of a class works
test.RunClassFldScenario();
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class VectorBinaryOpTest__GreaterThanInt64
{
private struct DataTable
{
private byte[] inArray1;
private byte[] inArray2;
private byte[] outArray;
private GCHandle inHandle1;
private GCHandle inHandle2;
private GCHandle outHandle;
private ulong alignment;
public DataTable(Int64[] inArray1, Int64[] inArray2, Int64[] outArray, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int64>();
int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int64>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int64>();
if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.inArray2 = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int64, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int64, byte>(ref inArray2[0]), (uint)sizeOfinArray2);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
inHandle2.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector64<Int64> _fld1;
public Vector64<Int64> _fld2;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int64>, byte>(ref testStruct._fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int64>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int64>, byte>(ref testStruct._fld2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int64>>());
return testStruct;
}
public void RunStructFldScenario(VectorBinaryOpTest__GreaterThanInt64 testClass)
{
var result = Vector64.GreaterThan(_fld1, _fld2);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr);
}
}
private static readonly int LargestVectorSize = 8;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Int64>>() / sizeof(Int64);
private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector64<Int64>>() / sizeof(Int64);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<Int64>>() / sizeof(Int64);
private static Int64[] _data1 = new Int64[Op1ElementCount];
private static Int64[] _data2 = new Int64[Op2ElementCount];
private static Vector64<Int64> _clsVar1;
private static Vector64<Int64> _clsVar2;
private Vector64<Int64> _fld1;
private Vector64<Int64> _fld2;
private DataTable _dataTable;
static VectorBinaryOpTest__GreaterThanInt64()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int64>, byte>(ref _clsVar1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int64>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int64>, byte>(ref _clsVar2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int64>>());
}
public VectorBinaryOpTest__GreaterThanInt64()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int64>, byte>(ref _fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int64>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int64>, byte>(ref _fld2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int64>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); }
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); }
_dataTable = new DataTable(_data1, _data2, new Int64[RetElementCount], LargestVectorSize);
}
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = Vector64.GreaterThan(
Unsafe.Read<Vector64<Int64>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector64<Int64>>(_dataTable.inArray2Ptr)
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var method = typeof(Vector64).GetMethod(nameof(Vector64.GreaterThan), new Type[] {
typeof(Vector64<Int64>),
typeof(Vector64<Int64>)
});
if (method is null)
{
method = typeof(Vector64).GetMethod(nameof(Vector64.GreaterThan), 1, new Type[] {
typeof(Vector64<>).MakeGenericType(Type.MakeGenericMethodParameter(0)),
typeof(Vector64<>).MakeGenericType(Type.MakeGenericMethodParameter(0))
});
}
if (method.IsGenericMethodDefinition)
{
method = method.MakeGenericMethod(typeof(Int64));
}
var result = method.Invoke(null, new object[] {
Unsafe.Read<Vector64<Int64>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector64<Int64>>(_dataTable.inArray2Ptr)
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int64>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = Vector64.GreaterThan(
_clsVar1,
_clsVar2
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector64<Int64>>(_dataTable.inArray1Ptr);
var op2 = Unsafe.Read<Vector64<Int64>>(_dataTable.inArray2Ptr);
var result = Vector64.GreaterThan(op1, op2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new VectorBinaryOpTest__GreaterThanInt64();
var result = Vector64.GreaterThan(test._fld1, test._fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = Vector64.GreaterThan(_fld1, _fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = Vector64.GreaterThan(test._fld1, test._fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
private void ValidateResult(Vector64<Int64> op1, Vector64<Int64> op2, void* result, [CallerMemberName] string method = "")
{
Int64[] inArray1 = new Int64[Op1ElementCount];
Int64[] inArray2 = new Int64[Op2ElementCount];
Int64[] outArray = new Int64[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), op1);
Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray2[0]), op2);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int64>>());
ValidateResult(inArray1, inArray2, outArray, method);
}
private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "")
{
Int64[] inArray1 = new Int64[Op1ElementCount];
Int64[] inArray2 = new Int64[Op2ElementCount];
Int64[] outArray = new Int64[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<Int64>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector64<Int64>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int64>>());
ValidateResult(inArray1, inArray2, outArray, method);
}
private void ValidateResult(Int64[] left, Int64[] right, Int64[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
if (result[0] != ((left[0] > right[0]) ? -1 : 0))
{
succeeded = false;
}
else
{
for (var i = 1; i < RetElementCount; i++)
{
if (result[i] != ((left[i] > right[i]) ? -1 : 0))
{
succeeded = false;
break;
}
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(Vector64)}.{nameof(Vector64.GreaterThan)}<Int64>(Vector64<Int64>, Vector64<Int64>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})");
TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
namespace JIT.HardwareIntrinsics.General
{
public static partial class Program
{
private static void GreaterThanInt64()
{
var test = new VectorBinaryOpTest__GreaterThanInt64();
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
// Validates passing a static member works
test.RunClsVarScenario();
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
// Validates passing an instance member of a class works
test.RunClassFldScenario();
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class VectorBinaryOpTest__GreaterThanInt64
{
private struct DataTable
{
private byte[] inArray1;
private byte[] inArray2;
private byte[] outArray;
private GCHandle inHandle1;
private GCHandle inHandle2;
private GCHandle outHandle;
private ulong alignment;
public DataTable(Int64[] inArray1, Int64[] inArray2, Int64[] outArray, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int64>();
int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int64>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int64>();
if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.inArray2 = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int64, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int64, byte>(ref inArray2[0]), (uint)sizeOfinArray2);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
inHandle2.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector64<Int64> _fld1;
public Vector64<Int64> _fld2;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int64>, byte>(ref testStruct._fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int64>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int64>, byte>(ref testStruct._fld2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int64>>());
return testStruct;
}
public void RunStructFldScenario(VectorBinaryOpTest__GreaterThanInt64 testClass)
{
var result = Vector64.GreaterThan(_fld1, _fld2);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr);
}
}
private static readonly int LargestVectorSize = 8;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Int64>>() / sizeof(Int64);
private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector64<Int64>>() / sizeof(Int64);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<Int64>>() / sizeof(Int64);
private static Int64[] _data1 = new Int64[Op1ElementCount];
private static Int64[] _data2 = new Int64[Op2ElementCount];
private static Vector64<Int64> _clsVar1;
private static Vector64<Int64> _clsVar2;
private Vector64<Int64> _fld1;
private Vector64<Int64> _fld2;
private DataTable _dataTable;
static VectorBinaryOpTest__GreaterThanInt64()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int64>, byte>(ref _clsVar1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int64>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int64>, byte>(ref _clsVar2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int64>>());
}
public VectorBinaryOpTest__GreaterThanInt64()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int64>, byte>(ref _fld1), ref Unsafe.As<Int64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int64>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int64>, byte>(ref _fld2), ref Unsafe.As<Int64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int64>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt64(); }
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt64(); }
_dataTable = new DataTable(_data1, _data2, new Int64[RetElementCount], LargestVectorSize);
}
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = Vector64.GreaterThan(
Unsafe.Read<Vector64<Int64>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector64<Int64>>(_dataTable.inArray2Ptr)
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var method = typeof(Vector64).GetMethod(nameof(Vector64.GreaterThan), new Type[] {
typeof(Vector64<Int64>),
typeof(Vector64<Int64>)
});
if (method is null)
{
method = typeof(Vector64).GetMethod(nameof(Vector64.GreaterThan), 1, new Type[] {
typeof(Vector64<>).MakeGenericType(Type.MakeGenericMethodParameter(0)),
typeof(Vector64<>).MakeGenericType(Type.MakeGenericMethodParameter(0))
});
}
if (method.IsGenericMethodDefinition)
{
method = method.MakeGenericMethod(typeof(Int64));
}
var result = method.Invoke(null, new object[] {
Unsafe.Read<Vector64<Int64>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector64<Int64>>(_dataTable.inArray2Ptr)
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int64>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = Vector64.GreaterThan(
_clsVar1,
_clsVar2
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector64<Int64>>(_dataTable.inArray1Ptr);
var op2 = Unsafe.Read<Vector64<Int64>>(_dataTable.inArray2Ptr);
var result = Vector64.GreaterThan(op1, op2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new VectorBinaryOpTest__GreaterThanInt64();
var result = Vector64.GreaterThan(test._fld1, test._fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = Vector64.GreaterThan(_fld1, _fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = Vector64.GreaterThan(test._fld1, test._fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
private void ValidateResult(Vector64<Int64> op1, Vector64<Int64> op2, void* result, [CallerMemberName] string method = "")
{
Int64[] inArray1 = new Int64[Op1ElementCount];
Int64[] inArray2 = new Int64[Op2ElementCount];
Int64[] outArray = new Int64[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), op1);
Unsafe.WriteUnaligned(ref Unsafe.As<Int64, byte>(ref inArray2[0]), op2);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int64>>());
ValidateResult(inArray1, inArray2, outArray, method);
}
private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "")
{
Int64[] inArray1 = new Int64[Op1ElementCount];
Int64[] inArray2 = new Int64[Op2ElementCount];
Int64[] outArray = new Int64[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<Int64>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector64<Int64>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int64>>());
ValidateResult(inArray1, inArray2, outArray, method);
}
private void ValidateResult(Int64[] left, Int64[] right, Int64[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
if (result[0] != ((left[0] > right[0]) ? -1 : 0))
{
succeeded = false;
}
else
{
for (var i = 1; i < RetElementCount; i++)
{
if (result[i] != ((left[i] > right[i]) ? -1 : 0))
{
succeeded = false;
break;
}
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(Vector64)}.{nameof(Vector64.GreaterThan)}<Int64>(Vector64<Int64>, Vector64<Int64>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})");
TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| -1 |
dotnet/runtime
| 66,411 |
Arm64: Always use SIMD features
|
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
kunalspathak
| 2022-03-09T21:00:00Z | 2022-03-12T04:35:06Z |
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
|
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
|
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
./src/tests/GC/Performance/Tests/GCHandleTest.cs
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Runtime.InteropServices;
class GCHandleTest
{
// test variables
private Object[] m_objectArray;
private GCHandle[] m_gchArray;
private const int m_numGCHs = 50;
private long m_numIters;
static void Main(string[] real_args)
{
long iterations = 200;
GCHandleTest test = new GCHandleTest(iterations);
test.GetTargetTest();
test.SetTargetTest();
test.AllocTest(GCHandleType.Normal);
test.AllocTest(GCHandleType.Weak);
test.AllocTest(GCHandleType.WeakTrackResurrection);
test.AllocTest(GCHandleType.Pinned);
test.FreeTest();
}
public GCHandleTest(long numIters)
{
m_numIters = numIters;
m_objectArray = new Object[m_numGCHs];
m_gchArray = new GCHandle[m_numGCHs];
for (int i = 0; i < m_numGCHs; ++i)
{
// create a new string object
String s = "blah" + i;
m_objectArray[i] = s;
}
GC.Collect();
}
public void Init()
{
for (int i = 0; i < m_numGCHs; ++i)
{
m_gchArray[i] = GCHandle.Alloc(m_objectArray[i]);
}
//GC now to get that out of the way
GC.Collect();
}
public void AllocTest(GCHandleType gcht)
{
for (long i = 0; i < m_numIters; i++)
{
m_gchArray[0] = GCHandle.Alloc(m_objectArray[0],gcht);
m_gchArray[1] = GCHandle.Alloc(m_objectArray[1],gcht);
m_gchArray[2] = GCHandle.Alloc(m_objectArray[2],gcht);
m_gchArray[3] = GCHandle.Alloc(m_objectArray[3],gcht);
m_gchArray[4] = GCHandle.Alloc(m_objectArray[4],gcht);
m_gchArray[5] = GCHandle.Alloc(m_objectArray[5],gcht);
m_gchArray[6] = GCHandle.Alloc(m_objectArray[6],gcht);
m_gchArray[7] = GCHandle.Alloc(m_objectArray[7],gcht);
m_gchArray[8] = GCHandle.Alloc(m_objectArray[8],gcht);
m_gchArray[9] = GCHandle.Alloc(m_objectArray[9],gcht);
m_gchArray[10] = GCHandle.Alloc(m_objectArray[10],gcht);
m_gchArray[11] = GCHandle.Alloc(m_objectArray[11],gcht);
m_gchArray[12] = GCHandle.Alloc(m_objectArray[12],gcht);
m_gchArray[13] = GCHandle.Alloc(m_objectArray[13],gcht);
m_gchArray[14] = GCHandle.Alloc(m_objectArray[14],gcht);
m_gchArray[15] = GCHandle.Alloc(m_objectArray[15],gcht);
m_gchArray[16] = GCHandle.Alloc(m_objectArray[16],gcht);
m_gchArray[17] = GCHandle.Alloc(m_objectArray[17],gcht);
m_gchArray[18] = GCHandle.Alloc(m_objectArray[18],gcht);
m_gchArray[19] = GCHandle.Alloc(m_objectArray[19],gcht);
m_gchArray[20] = GCHandle.Alloc(m_objectArray[20],gcht);
m_gchArray[21] = GCHandle.Alloc(m_objectArray[21],gcht);
m_gchArray[22] = GCHandle.Alloc(m_objectArray[22],gcht);
m_gchArray[23] = GCHandle.Alloc(m_objectArray[23],gcht);
m_gchArray[24] = GCHandle.Alloc(m_objectArray[24],gcht);
m_gchArray[25] = GCHandle.Alloc(m_objectArray[25],gcht);
m_gchArray[26] = GCHandle.Alloc(m_objectArray[26],gcht);
m_gchArray[27] = GCHandle.Alloc(m_objectArray[27],gcht);
m_gchArray[28] = GCHandle.Alloc(m_objectArray[28],gcht);
m_gchArray[29] = GCHandle.Alloc(m_objectArray[29],gcht);
m_gchArray[30] = GCHandle.Alloc(m_objectArray[30],gcht);
m_gchArray[31] = GCHandle.Alloc(m_objectArray[31],gcht);
m_gchArray[32] = GCHandle.Alloc(m_objectArray[32],gcht);
m_gchArray[33] = GCHandle.Alloc(m_objectArray[33],gcht);
m_gchArray[34] = GCHandle.Alloc(m_objectArray[34],gcht);
m_gchArray[35] = GCHandle.Alloc(m_objectArray[35],gcht);
m_gchArray[36] = GCHandle.Alloc(m_objectArray[36],gcht);
m_gchArray[37] = GCHandle.Alloc(m_objectArray[37],gcht);
m_gchArray[38] = GCHandle.Alloc(m_objectArray[38],gcht);
m_gchArray[39] = GCHandle.Alloc(m_objectArray[39],gcht);
m_gchArray[40] = GCHandle.Alloc(m_objectArray[40],gcht);
m_gchArray[41] = GCHandle.Alloc(m_objectArray[41],gcht);
m_gchArray[42] = GCHandle.Alloc(m_objectArray[42],gcht);
m_gchArray[43] = GCHandle.Alloc(m_objectArray[43],gcht);
m_gchArray[44] = GCHandle.Alloc(m_objectArray[44],gcht);
m_gchArray[45] = GCHandle.Alloc(m_objectArray[45],gcht);
m_gchArray[46] = GCHandle.Alloc(m_objectArray[46],gcht);
m_gchArray[47] = GCHandle.Alloc(m_objectArray[47],gcht);
m_gchArray[48] = GCHandle.Alloc(m_objectArray[48],gcht);
m_gchArray[49] = GCHandle.Alloc(m_objectArray[49],gcht);
for (int j=0; j< m_gchArray.Length; j++)
{
m_gchArray[j].Free();
}
GC.Collect();
}
}
public void FreeTest()
{
for (long i = 0; i < m_numIters; i++)
{
GC.Collect();
for (int j=0; j< m_gchArray.Length; j++)
{
m_gchArray[j] = GCHandle.Alloc(m_objectArray[j]);
}
m_gchArray[0].Free();
m_gchArray[1].Free();
m_gchArray[2].Free();
m_gchArray[3].Free();
m_gchArray[4].Free();
m_gchArray[5].Free();
m_gchArray[6].Free();
m_gchArray[7].Free();
m_gchArray[8].Free();
m_gchArray[9].Free();
m_gchArray[10].Free();
m_gchArray[11].Free();
m_gchArray[12].Free();
m_gchArray[13].Free();
m_gchArray[14].Free();
m_gchArray[15].Free();
m_gchArray[16].Free();
m_gchArray[17].Free();
m_gchArray[18].Free();
m_gchArray[19].Free();
m_gchArray[20].Free();
m_gchArray[21].Free();
m_gchArray[22].Free();
m_gchArray[23].Free();
m_gchArray[24].Free();
m_gchArray[25].Free();
m_gchArray[26].Free();
m_gchArray[27].Free();
m_gchArray[28].Free();
m_gchArray[29].Free();
m_gchArray[30].Free();
m_gchArray[31].Free();
m_gchArray[32].Free();
m_gchArray[33].Free();
m_gchArray[34].Free();
m_gchArray[35].Free();
m_gchArray[36].Free();
m_gchArray[37].Free();
m_gchArray[38].Free();
m_gchArray[39].Free();
m_gchArray[40].Free();
m_gchArray[41].Free();
m_gchArray[42].Free();
m_gchArray[43].Free();
m_gchArray[44].Free();
m_gchArray[45].Free();
m_gchArray[46].Free();
m_gchArray[47].Free();
m_gchArray[48].Free();
m_gchArray[49].Free();
}
}
public void SetTargetTest()
{
Init();
for (long i = 0; i < m_numIters; i++)
{
m_gchArray[0].Target = m_objectArray[0];
m_gchArray[1].Target = m_objectArray[1];
m_gchArray[2].Target = m_objectArray[2];
m_gchArray[3].Target = m_objectArray[3];
m_gchArray[4].Target = m_objectArray[4];
m_gchArray[5].Target = m_objectArray[5];
m_gchArray[6].Target = m_objectArray[6];
m_gchArray[7].Target = m_objectArray[7];
m_gchArray[8].Target = m_objectArray[8];
m_gchArray[9].Target = m_objectArray[9];
m_gchArray[10].Target = m_objectArray[10];
m_gchArray[11].Target = m_objectArray[11];
m_gchArray[12].Target = m_objectArray[12];
m_gchArray[13].Target = m_objectArray[13];
m_gchArray[14].Target = m_objectArray[14];
m_gchArray[15].Target = m_objectArray[15];
m_gchArray[16].Target = m_objectArray[16];
m_gchArray[17].Target = m_objectArray[17];
m_gchArray[18].Target = m_objectArray[18];
m_gchArray[19].Target = m_objectArray[19];
m_gchArray[20].Target = m_objectArray[20];
m_gchArray[21].Target = m_objectArray[21];
m_gchArray[22].Target = m_objectArray[22];
m_gchArray[23].Target = m_objectArray[23];
m_gchArray[24].Target = m_objectArray[24];
m_gchArray[25].Target = m_objectArray[25];
m_gchArray[26].Target = m_objectArray[26];
m_gchArray[27].Target = m_objectArray[27];
m_gchArray[28].Target = m_objectArray[28];
m_gchArray[29].Target = m_objectArray[29];
m_gchArray[30].Target = m_objectArray[30];
m_gchArray[31].Target = m_objectArray[31];
m_gchArray[32].Target = m_objectArray[32];
m_gchArray[33].Target = m_objectArray[33];
m_gchArray[34].Target = m_objectArray[34];
m_gchArray[35].Target = m_objectArray[35];
m_gchArray[36].Target = m_objectArray[36];
m_gchArray[37].Target = m_objectArray[37];
m_gchArray[38].Target = m_objectArray[38];
m_gchArray[39].Target = m_objectArray[39];
m_gchArray[40].Target = m_objectArray[40];
m_gchArray[41].Target = m_objectArray[41];
m_gchArray[42].Target = m_objectArray[42];
m_gchArray[43].Target = m_objectArray[43];
m_gchArray[44].Target = m_objectArray[44];
m_gchArray[45].Target = m_objectArray[45];
m_gchArray[46].Target = m_objectArray[46];
m_gchArray[47].Target = m_objectArray[47];
m_gchArray[48].Target = m_objectArray[48];
m_gchArray[49].Target = m_objectArray[49];
}
}
public void GetTargetTest()
{
Init();
Object o = null;
for (long i = 0; i < m_numIters; i++)
{
o = m_gchArray[0].Target;
o = m_gchArray[1].Target;
o = m_gchArray[2].Target;
o = m_gchArray[3].Target;
o = m_gchArray[4].Target;
o = m_gchArray[5].Target;
o = m_gchArray[6].Target;
o = m_gchArray[7].Target;
o = m_gchArray[8].Target;
o = m_gchArray[9].Target;
o = m_gchArray[10].Target;
o = m_gchArray[11].Target;
o = m_gchArray[12].Target;
o = m_gchArray[13].Target;
o = m_gchArray[14].Target;
o = m_gchArray[15].Target;
o = m_gchArray[16].Target;
o = m_gchArray[17].Target;
o = m_gchArray[18].Target;
o = m_gchArray[19].Target;
o = m_gchArray[20].Target;
o = m_gchArray[21].Target;
o = m_gchArray[22].Target;
o = m_gchArray[23].Target;
o = m_gchArray[24].Target;
o = m_gchArray[25].Target;
o = m_gchArray[26].Target;
o = m_gchArray[27].Target;
o = m_gchArray[28].Target;
o = m_gchArray[29].Target;
o = m_gchArray[30].Target;
o = m_gchArray[31].Target;
o = m_gchArray[32].Target;
o = m_gchArray[33].Target;
o = m_gchArray[34].Target;
o = m_gchArray[35].Target;
o = m_gchArray[36].Target;
o = m_gchArray[37].Target;
o = m_gchArray[38].Target;
o = m_gchArray[39].Target;
o = m_gchArray[40].Target;
o = m_gchArray[41].Target;
o = m_gchArray[42].Target;
o = m_gchArray[43].Target;
o = m_gchArray[44].Target;
o = m_gchArray[45].Target;
o = m_gchArray[46].Target;
o = m_gchArray[47].Target;
o = m_gchArray[48].Target;
o = m_gchArray[49].Target;
}
GC.KeepAlive(o);
}
}
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Runtime.InteropServices;
class GCHandleTest
{
// test variables
private Object[] m_objectArray;
private GCHandle[] m_gchArray;
private const int m_numGCHs = 50;
private long m_numIters;
static void Main(string[] real_args)
{
long iterations = 200;
GCHandleTest test = new GCHandleTest(iterations);
test.GetTargetTest();
test.SetTargetTest();
test.AllocTest(GCHandleType.Normal);
test.AllocTest(GCHandleType.Weak);
test.AllocTest(GCHandleType.WeakTrackResurrection);
test.AllocTest(GCHandleType.Pinned);
test.FreeTest();
}
public GCHandleTest(long numIters)
{
m_numIters = numIters;
m_objectArray = new Object[m_numGCHs];
m_gchArray = new GCHandle[m_numGCHs];
for (int i = 0; i < m_numGCHs; ++i)
{
// create a new string object
String s = "blah" + i;
m_objectArray[i] = s;
}
GC.Collect();
}
public void Init()
{
for (int i = 0; i < m_numGCHs; ++i)
{
m_gchArray[i] = GCHandle.Alloc(m_objectArray[i]);
}
//GC now to get that out of the way
GC.Collect();
}
public void AllocTest(GCHandleType gcht)
{
for (long i = 0; i < m_numIters; i++)
{
m_gchArray[0] = GCHandle.Alloc(m_objectArray[0],gcht);
m_gchArray[1] = GCHandle.Alloc(m_objectArray[1],gcht);
m_gchArray[2] = GCHandle.Alloc(m_objectArray[2],gcht);
m_gchArray[3] = GCHandle.Alloc(m_objectArray[3],gcht);
m_gchArray[4] = GCHandle.Alloc(m_objectArray[4],gcht);
m_gchArray[5] = GCHandle.Alloc(m_objectArray[5],gcht);
m_gchArray[6] = GCHandle.Alloc(m_objectArray[6],gcht);
m_gchArray[7] = GCHandle.Alloc(m_objectArray[7],gcht);
m_gchArray[8] = GCHandle.Alloc(m_objectArray[8],gcht);
m_gchArray[9] = GCHandle.Alloc(m_objectArray[9],gcht);
m_gchArray[10] = GCHandle.Alloc(m_objectArray[10],gcht);
m_gchArray[11] = GCHandle.Alloc(m_objectArray[11],gcht);
m_gchArray[12] = GCHandle.Alloc(m_objectArray[12],gcht);
m_gchArray[13] = GCHandle.Alloc(m_objectArray[13],gcht);
m_gchArray[14] = GCHandle.Alloc(m_objectArray[14],gcht);
m_gchArray[15] = GCHandle.Alloc(m_objectArray[15],gcht);
m_gchArray[16] = GCHandle.Alloc(m_objectArray[16],gcht);
m_gchArray[17] = GCHandle.Alloc(m_objectArray[17],gcht);
m_gchArray[18] = GCHandle.Alloc(m_objectArray[18],gcht);
m_gchArray[19] = GCHandle.Alloc(m_objectArray[19],gcht);
m_gchArray[20] = GCHandle.Alloc(m_objectArray[20],gcht);
m_gchArray[21] = GCHandle.Alloc(m_objectArray[21],gcht);
m_gchArray[22] = GCHandle.Alloc(m_objectArray[22],gcht);
m_gchArray[23] = GCHandle.Alloc(m_objectArray[23],gcht);
m_gchArray[24] = GCHandle.Alloc(m_objectArray[24],gcht);
m_gchArray[25] = GCHandle.Alloc(m_objectArray[25],gcht);
m_gchArray[26] = GCHandle.Alloc(m_objectArray[26],gcht);
m_gchArray[27] = GCHandle.Alloc(m_objectArray[27],gcht);
m_gchArray[28] = GCHandle.Alloc(m_objectArray[28],gcht);
m_gchArray[29] = GCHandle.Alloc(m_objectArray[29],gcht);
m_gchArray[30] = GCHandle.Alloc(m_objectArray[30],gcht);
m_gchArray[31] = GCHandle.Alloc(m_objectArray[31],gcht);
m_gchArray[32] = GCHandle.Alloc(m_objectArray[32],gcht);
m_gchArray[33] = GCHandle.Alloc(m_objectArray[33],gcht);
m_gchArray[34] = GCHandle.Alloc(m_objectArray[34],gcht);
m_gchArray[35] = GCHandle.Alloc(m_objectArray[35],gcht);
m_gchArray[36] = GCHandle.Alloc(m_objectArray[36],gcht);
m_gchArray[37] = GCHandle.Alloc(m_objectArray[37],gcht);
m_gchArray[38] = GCHandle.Alloc(m_objectArray[38],gcht);
m_gchArray[39] = GCHandle.Alloc(m_objectArray[39],gcht);
m_gchArray[40] = GCHandle.Alloc(m_objectArray[40],gcht);
m_gchArray[41] = GCHandle.Alloc(m_objectArray[41],gcht);
m_gchArray[42] = GCHandle.Alloc(m_objectArray[42],gcht);
m_gchArray[43] = GCHandle.Alloc(m_objectArray[43],gcht);
m_gchArray[44] = GCHandle.Alloc(m_objectArray[44],gcht);
m_gchArray[45] = GCHandle.Alloc(m_objectArray[45],gcht);
m_gchArray[46] = GCHandle.Alloc(m_objectArray[46],gcht);
m_gchArray[47] = GCHandle.Alloc(m_objectArray[47],gcht);
m_gchArray[48] = GCHandle.Alloc(m_objectArray[48],gcht);
m_gchArray[49] = GCHandle.Alloc(m_objectArray[49],gcht);
for (int j=0; j< m_gchArray.Length; j++)
{
m_gchArray[j].Free();
}
GC.Collect();
}
}
public void FreeTest()
{
for (long i = 0; i < m_numIters; i++)
{
GC.Collect();
for (int j=0; j< m_gchArray.Length; j++)
{
m_gchArray[j] = GCHandle.Alloc(m_objectArray[j]);
}
m_gchArray[0].Free();
m_gchArray[1].Free();
m_gchArray[2].Free();
m_gchArray[3].Free();
m_gchArray[4].Free();
m_gchArray[5].Free();
m_gchArray[6].Free();
m_gchArray[7].Free();
m_gchArray[8].Free();
m_gchArray[9].Free();
m_gchArray[10].Free();
m_gchArray[11].Free();
m_gchArray[12].Free();
m_gchArray[13].Free();
m_gchArray[14].Free();
m_gchArray[15].Free();
m_gchArray[16].Free();
m_gchArray[17].Free();
m_gchArray[18].Free();
m_gchArray[19].Free();
m_gchArray[20].Free();
m_gchArray[21].Free();
m_gchArray[22].Free();
m_gchArray[23].Free();
m_gchArray[24].Free();
m_gchArray[25].Free();
m_gchArray[26].Free();
m_gchArray[27].Free();
m_gchArray[28].Free();
m_gchArray[29].Free();
m_gchArray[30].Free();
m_gchArray[31].Free();
m_gchArray[32].Free();
m_gchArray[33].Free();
m_gchArray[34].Free();
m_gchArray[35].Free();
m_gchArray[36].Free();
m_gchArray[37].Free();
m_gchArray[38].Free();
m_gchArray[39].Free();
m_gchArray[40].Free();
m_gchArray[41].Free();
m_gchArray[42].Free();
m_gchArray[43].Free();
m_gchArray[44].Free();
m_gchArray[45].Free();
m_gchArray[46].Free();
m_gchArray[47].Free();
m_gchArray[48].Free();
m_gchArray[49].Free();
}
}
public void SetTargetTest()
{
Init();
for (long i = 0; i < m_numIters; i++)
{
m_gchArray[0].Target = m_objectArray[0];
m_gchArray[1].Target = m_objectArray[1];
m_gchArray[2].Target = m_objectArray[2];
m_gchArray[3].Target = m_objectArray[3];
m_gchArray[4].Target = m_objectArray[4];
m_gchArray[5].Target = m_objectArray[5];
m_gchArray[6].Target = m_objectArray[6];
m_gchArray[7].Target = m_objectArray[7];
m_gchArray[8].Target = m_objectArray[8];
m_gchArray[9].Target = m_objectArray[9];
m_gchArray[10].Target = m_objectArray[10];
m_gchArray[11].Target = m_objectArray[11];
m_gchArray[12].Target = m_objectArray[12];
m_gchArray[13].Target = m_objectArray[13];
m_gchArray[14].Target = m_objectArray[14];
m_gchArray[15].Target = m_objectArray[15];
m_gchArray[16].Target = m_objectArray[16];
m_gchArray[17].Target = m_objectArray[17];
m_gchArray[18].Target = m_objectArray[18];
m_gchArray[19].Target = m_objectArray[19];
m_gchArray[20].Target = m_objectArray[20];
m_gchArray[21].Target = m_objectArray[21];
m_gchArray[22].Target = m_objectArray[22];
m_gchArray[23].Target = m_objectArray[23];
m_gchArray[24].Target = m_objectArray[24];
m_gchArray[25].Target = m_objectArray[25];
m_gchArray[26].Target = m_objectArray[26];
m_gchArray[27].Target = m_objectArray[27];
m_gchArray[28].Target = m_objectArray[28];
m_gchArray[29].Target = m_objectArray[29];
m_gchArray[30].Target = m_objectArray[30];
m_gchArray[31].Target = m_objectArray[31];
m_gchArray[32].Target = m_objectArray[32];
m_gchArray[33].Target = m_objectArray[33];
m_gchArray[34].Target = m_objectArray[34];
m_gchArray[35].Target = m_objectArray[35];
m_gchArray[36].Target = m_objectArray[36];
m_gchArray[37].Target = m_objectArray[37];
m_gchArray[38].Target = m_objectArray[38];
m_gchArray[39].Target = m_objectArray[39];
m_gchArray[40].Target = m_objectArray[40];
m_gchArray[41].Target = m_objectArray[41];
m_gchArray[42].Target = m_objectArray[42];
m_gchArray[43].Target = m_objectArray[43];
m_gchArray[44].Target = m_objectArray[44];
m_gchArray[45].Target = m_objectArray[45];
m_gchArray[46].Target = m_objectArray[46];
m_gchArray[47].Target = m_objectArray[47];
m_gchArray[48].Target = m_objectArray[48];
m_gchArray[49].Target = m_objectArray[49];
}
}
public void GetTargetTest()
{
Init();
Object o = null;
for (long i = 0; i < m_numIters; i++)
{
o = m_gchArray[0].Target;
o = m_gchArray[1].Target;
o = m_gchArray[2].Target;
o = m_gchArray[3].Target;
o = m_gchArray[4].Target;
o = m_gchArray[5].Target;
o = m_gchArray[6].Target;
o = m_gchArray[7].Target;
o = m_gchArray[8].Target;
o = m_gchArray[9].Target;
o = m_gchArray[10].Target;
o = m_gchArray[11].Target;
o = m_gchArray[12].Target;
o = m_gchArray[13].Target;
o = m_gchArray[14].Target;
o = m_gchArray[15].Target;
o = m_gchArray[16].Target;
o = m_gchArray[17].Target;
o = m_gchArray[18].Target;
o = m_gchArray[19].Target;
o = m_gchArray[20].Target;
o = m_gchArray[21].Target;
o = m_gchArray[22].Target;
o = m_gchArray[23].Target;
o = m_gchArray[24].Target;
o = m_gchArray[25].Target;
o = m_gchArray[26].Target;
o = m_gchArray[27].Target;
o = m_gchArray[28].Target;
o = m_gchArray[29].Target;
o = m_gchArray[30].Target;
o = m_gchArray[31].Target;
o = m_gchArray[32].Target;
o = m_gchArray[33].Target;
o = m_gchArray[34].Target;
o = m_gchArray[35].Target;
o = m_gchArray[36].Target;
o = m_gchArray[37].Target;
o = m_gchArray[38].Target;
o = m_gchArray[39].Target;
o = m_gchArray[40].Target;
o = m_gchArray[41].Target;
o = m_gchArray[42].Target;
o = m_gchArray[43].Target;
o = m_gchArray[44].Target;
o = m_gchArray[45].Target;
o = m_gchArray[46].Target;
o = m_gchArray[47].Target;
o = m_gchArray[48].Target;
o = m_gchArray[49].Target;
}
GC.KeepAlive(o);
}
}
| -1 |
dotnet/runtime
| 66,411 |
Arm64: Always use SIMD features
|
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
kunalspathak
| 2022-03-09T21:00:00Z | 2022-03-12T04:35:06Z |
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
|
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
|
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
./src/libraries/System.Data.Common/src/System/Data/XmlReadMode.cs
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
namespace System.Data
{
public enum XmlReadMode
{
Auto = 0,
ReadSchema = 1,
IgnoreSchema = 2,
InferSchema = 3,
DiffGram = 4,
Fragment = 5,
InferTypedSchema = 6
}
}
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
namespace System.Data
{
public enum XmlReadMode
{
Auto = 0,
ReadSchema = 1,
IgnoreSchema = 2,
InferSchema = 3,
DiffGram = 4,
Fragment = 5,
InferTypedSchema = 6
}
}
| -1 |
dotnet/runtime
| 66,411 |
Arm64: Always use SIMD features
|
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
kunalspathak
| 2022-03-09T21:00:00Z | 2022-03-12T04:35:06Z |
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
|
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
|
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
./src/coreclr/tools/aot/ILCompiler.MetadataTransform/Internal/Metadata/NativeFormat/Writer/MdBinaryWriter.cs
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.IO;
using System.Collections.Generic;
using System.Reflection;
using System.Text;
using Internal.LowLevelLinq;
using Internal.NativeFormat;
using Debug = System.Diagnostics.Debug;
namespace Internal.Metadata.NativeFormat.Writer
{
internal static partial class MdBinaryWriter
{
public static void Write(this NativeWriter writer, bool value)
{
writer.WriteUInt8((byte)(value ? 1 : 0));
}
public static void Write(this NativeWriter writer, byte value)
{
writer.WriteUInt8(value);
}
public static void Write(this NativeWriter writer, sbyte value)
{
writer.WriteUInt8((byte)value);
}
public static void Write(this NativeWriter writer, short value)
{
writer.WriteSigned(value);
}
public static void Write(this NativeWriter writer, ushort value)
{
writer.WriteUnsigned(value);
}
public static void Write(this NativeWriter writer, int value)
{
writer.WriteSigned(value);
}
public static void Write(this NativeWriter writer, uint value)
{
writer.WriteUnsigned(value);
}
public static void Write(this NativeWriter writer, ulong value)
{
writer.WriteUnsignedLong(value);
}
public static void Write(this NativeWriter writer, long value)
{
writer.WriteSignedLong(value);
}
public static void Write(this NativeWriter writer, string value)
{
Debug.Assert(value != null);
writer.WriteString(value);
}
public static void Write(this NativeWriter writer, char value)
{
writer.WriteUnsigned((uint)value);
}
public static void Write(this NativeWriter writer, float value)
{
writer.WriteFloat(value);
}
public static void Write(this NativeWriter writer, double value)
{
writer.WriteDouble(value);
}
public static void Write(this NativeWriter writer, MetadataRecord record)
{
if (record != null)
writer.WriteUnsigned((uint)record.HandleType | (uint)(record.HandleOffset << 8));
else
writer.WriteUnsigned(0);
}
}
}
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.IO;
using System.Collections.Generic;
using System.Reflection;
using System.Text;
using Internal.LowLevelLinq;
using Internal.NativeFormat;
using Debug = System.Diagnostics.Debug;
namespace Internal.Metadata.NativeFormat.Writer
{
internal static partial class MdBinaryWriter
{
public static void Write(this NativeWriter writer, bool value)
{
writer.WriteUInt8((byte)(value ? 1 : 0));
}
public static void Write(this NativeWriter writer, byte value)
{
writer.WriteUInt8(value);
}
public static void Write(this NativeWriter writer, sbyte value)
{
writer.WriteUInt8((byte)value);
}
public static void Write(this NativeWriter writer, short value)
{
writer.WriteSigned(value);
}
public static void Write(this NativeWriter writer, ushort value)
{
writer.WriteUnsigned(value);
}
public static void Write(this NativeWriter writer, int value)
{
writer.WriteSigned(value);
}
public static void Write(this NativeWriter writer, uint value)
{
writer.WriteUnsigned(value);
}
public static void Write(this NativeWriter writer, ulong value)
{
writer.WriteUnsignedLong(value);
}
public static void Write(this NativeWriter writer, long value)
{
writer.WriteSignedLong(value);
}
public static void Write(this NativeWriter writer, string value)
{
Debug.Assert(value != null);
writer.WriteString(value);
}
public static void Write(this NativeWriter writer, char value)
{
writer.WriteUnsigned((uint)value);
}
public static void Write(this NativeWriter writer, float value)
{
writer.WriteFloat(value);
}
public static void Write(this NativeWriter writer, double value)
{
writer.WriteDouble(value);
}
public static void Write(this NativeWriter writer, MetadataRecord record)
{
if (record != null)
writer.WriteUnsigned((uint)record.HandleType | (uint)(record.HandleOffset << 8));
else
writer.WriteUnsigned(0);
}
}
}
| -1 |
dotnet/runtime
| 66,411 |
Arm64: Always use SIMD features
|
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
kunalspathak
| 2022-03-09T21:00:00Z | 2022-03-12T04:35:06Z |
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
|
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
|
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
./src/coreclr/nativeaot/System.Private.CoreLib/src/System/Runtime/TypeLoaderExports.cs
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using Internal.Runtime.Augments;
using System.Diagnostics;
using System.Threading;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
namespace System.Runtime
{
[ReflectionBlocked]
public static class TypeLoaderExports
{
public static IntPtr GetThreadStaticsForDynamicType(int index)
{
IntPtr result = RuntimeImports.RhGetThreadLocalStorageForDynamicType(index, 0, 0);
if (result != IntPtr.Zero)
return result;
int numTlsCells;
int tlsStorageSize = RuntimeAugments.TypeLoaderCallbacks.GetThreadStaticsSizeForDynamicType(index, out numTlsCells);
result = RuntimeImports.RhGetThreadLocalStorageForDynamicType(index, tlsStorageSize, numTlsCells);
if (result == IntPtr.Zero)
throw new OutOfMemoryException();
return result;
}
public static unsafe void ActivatorCreateInstanceAny(ref object ptrToData, IntPtr pEETypePtr)
{
EETypePtr pEEType = new EETypePtr(pEETypePtr);
if (pEEType.IsValueType)
{
// Nothing else to do for value types.
return;
}
// For reference types, we need to:
// 1- Allocate the new object
// 2- Call its default ctor
// 3- Update ptrToData to point to that newly allocated object
ptrToData = RuntimeImports.RhNewObject(pEEType);
Entry entry = LookupInCache(s_cache, pEETypePtr, pEETypePtr);
if (entry == null)
{
entry = CacheMiss(pEETypePtr, pEETypePtr,
(IntPtr context, IntPtr signature, object contextObject, ref IntPtr auxResult) =>
{
IntPtr result = RuntimeAugments.TypeLoaderCallbacks.TryGetDefaultConstructorForType(new RuntimeTypeHandle(new EETypePtr(context)));
if (result == IntPtr.Zero)
result = RuntimeAugments.GetFallbackDefaultConstructor();
return result;
});
}
RawCalliHelper.Call(entry.Result, ptrToData);
}
//
// Generic lookup cache
//
private class Entry
{
public IntPtr Context;
public IntPtr Signature;
public IntPtr Result;
public IntPtr AuxResult;
public Entry Next;
}
// Initialize the cache eagerly to avoid null checks.
// Use array with just single element to make this pay-for-play. The actual cache will be allocated only
// once the lazy lookups are actually needed.
private static Entry[] s_cache;
private static Lock s_lock;
private static GCHandle s_previousCache;
internal static void Initialize()
{
s_cache = new Entry[1];
}
public static IntPtr GenericLookup(IntPtr context, IntPtr signature)
{
Entry entry = LookupInCache(s_cache, context, signature);
if (entry == null)
{
entry = CacheMiss(context, signature);
}
return entry.Result;
}
public static void GenericLookupAndCallCtor(object arg, IntPtr context, IntPtr signature)
{
Entry entry = LookupInCache(s_cache, context, signature);
if (entry == null)
{
entry = CacheMiss(context, signature);
}
RawCalliHelper.Call(entry.Result, arg);
}
public static object GenericLookupAndAllocObject(IntPtr context, IntPtr signature)
{
Entry entry = LookupInCache(s_cache, context, signature);
if (entry == null)
{
entry = CacheMiss(context, signature);
}
return RawCalliHelper.Call<object>(entry.Result, entry.AuxResult);
}
public static object GenericLookupAndAllocArray(IntPtr context, IntPtr arg, IntPtr signature)
{
Entry entry = LookupInCache(s_cache, context, signature);
if (entry == null)
{
entry = CacheMiss(context, signature);
}
return RawCalliHelper.Call<object>(entry.Result, entry.AuxResult, arg);
}
public static void GenericLookupAndCheckArrayElemType(IntPtr context, object arg, IntPtr signature)
{
Entry entry = LookupInCache(s_cache, context, signature);
if (entry == null)
{
entry = CacheMiss(context, signature);
}
RawCalliHelper.Call(entry.Result, entry.AuxResult, arg);
}
public static object GenericLookupAndCast(object arg, IntPtr context, IntPtr signature)
{
Entry entry = LookupInCache(s_cache, context, signature);
if (entry == null)
{
entry = CacheMiss(context, signature);
}
return RawCalliHelper.Call<object>(entry.Result, arg, entry.AuxResult);
}
public static IntPtr UpdateTypeFloatingDictionary(IntPtr eetypePtr, IntPtr dictionaryPtr)
{
// No caching needed. Update is in-place, and happens once per dictionary
return RuntimeAugments.TypeLoaderCallbacks.UpdateFloatingDictionary(eetypePtr, dictionaryPtr);
}
public static IntPtr UpdateMethodFloatingDictionary(IntPtr dictionaryPtr)
{
// No caching needed. Update is in-place, and happens once per dictionary
return RuntimeAugments.TypeLoaderCallbacks.UpdateFloatingDictionary(dictionaryPtr, dictionaryPtr);
}
public static unsafe IntPtr GetDelegateThunk(object delegateObj, int whichThunk)
{
Entry entry = LookupInCache(s_cache, (IntPtr)delegateObj.MethodTable, new IntPtr(whichThunk));
if (entry == null)
{
entry = CacheMiss((IntPtr)delegateObj.MethodTable, new IntPtr(whichThunk),
(IntPtr context, IntPtr signature, object contextObject, ref IntPtr auxResult)
=> RuntimeAugments.TypeLoaderCallbacks.GetDelegateThunk((Delegate)contextObject, (int)signature),
delegateObj);
}
return entry.Result;
}
public static unsafe IntPtr GVMLookupForSlot(object obj, RuntimeMethodHandle slot)
{
Entry entry = LookupInCache(s_cache, (IntPtr)obj.MethodTable, *(IntPtr*)&slot);
if (entry == null)
{
entry = CacheMiss((IntPtr)obj.MethodTable, *(IntPtr*)&slot,
(IntPtr context, IntPtr signature, object contextObject, ref IntPtr auxResult)
=> Internal.Runtime.CompilerServices.GenericVirtualMethodSupport.GVMLookupForSlot(new RuntimeTypeHandle(new EETypePtr(context)), *(RuntimeMethodHandle*)&signature));
}
return entry.Result;
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
internal static unsafe IntPtr OpenInstanceMethodLookup(IntPtr openResolver, object obj)
{
Entry entry = LookupInCache(s_cache, (IntPtr)obj.MethodTable, openResolver);
if (entry == null)
{
entry = CacheMiss((IntPtr)obj.MethodTable, openResolver,
(IntPtr context, IntPtr signature, object contextObject, ref IntPtr auxResult)
=> Internal.Runtime.CompilerServices.OpenMethodResolver.ResolveMethodWorker(signature, contextObject),
obj);
}
return entry.Result;
}
[MethodImplAttribute(MethodImplOptions.AggressiveInlining)]
private static Entry LookupInCache(Entry[] cache, IntPtr context, IntPtr signature)
{
int key = ((context.GetHashCode() >> 4) ^ signature.GetHashCode()) & (cache.Length - 1);
Entry entry = cache[key];
while (entry != null)
{
if (entry.Context == context && entry.Signature == signature)
break;
entry = entry.Next;
}
return entry;
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
internal static IntPtr RuntimeCacheLookupInCache(IntPtr context, IntPtr signature, RuntimeObjectFactory factory, object contextObject, out IntPtr auxResult)
{
Entry entry = LookupInCache(s_cache, context, signature);
if (entry == null)
{
entry = CacheMiss(context, signature, factory, contextObject);
}
auxResult = entry.AuxResult;
return entry.Result;
}
private static Entry CacheMiss(IntPtr ctx, IntPtr sig)
{
return CacheMiss(ctx, sig,
(IntPtr context, IntPtr signature, object contextObject, ref IntPtr auxResult) =>
RuntimeAugments.TypeLoaderCallbacks.GenericLookupFromContextAndSignature(context, signature, out auxResult)
);
}
private static unsafe Entry CacheMiss(IntPtr context, IntPtr signature, RuntimeObjectFactory factory, object contextObject = null)
{
IntPtr result = IntPtr.Zero, auxResult = IntPtr.Zero;
bool previouslyCached = false;
//
// Try to find the entry in the previous version of the cache that is kept alive by weak reference
//
if (s_previousCache.IsAllocated)
{
Entry[]? previousCache = (Entry[]?)s_previousCache.Target;
if (previousCache != null)
{
Entry previousEntry = LookupInCache(previousCache, context, signature);
if (previousEntry != null)
{
result = previousEntry.Result;
auxResult = previousEntry.AuxResult;
previouslyCached = true;
}
}
}
//
// Call into the type loader to compute the target
//
if (!previouslyCached)
{
result = factory(context, signature, contextObject, ref auxResult);
}
//
// Update the cache under the lock
//
if (s_lock == null)
Interlocked.CompareExchange(ref s_lock, new Lock(), null);
s_lock.Acquire();
try
{
// Avoid duplicate entries
Entry existingEntry = LookupInCache(s_cache, context, signature);
if (existingEntry != null)
return existingEntry;
// Resize cache as necessary
Entry[] cache = ResizeCacheForNewEntryAsNecessary();
int key = ((context.GetHashCode() >> 4) ^ signature.GetHashCode()) & (cache.Length - 1);
Entry newEntry = new Entry() { Context = context, Signature = signature, Result = result, AuxResult = auxResult, Next = cache[key] };
cache[key] = newEntry;
return newEntry;
}
finally
{
s_lock.Release();
}
}
//
// Parameters and state used by generic lookup cache resizing algorithm
//
private const int InitialCacheSize = 128; // MUST BE A POWER OF TWO
private const int DefaultCacheSize = 1024;
private const int MaximumCacheSize = 128 * 1024;
private static long s_tickCountOfLastOverflow;
private static int s_entries;
private static bool s_roundRobinFlushing;
private static Entry[] ResizeCacheForNewEntryAsNecessary()
{
Entry[] cache = s_cache;
if (cache.Length < InitialCacheSize)
{
// Start with small cache size so that the cache entries used by startup one-time only initialization will get flushed soon
return s_cache = new Entry[InitialCacheSize];
}
int entries = s_entries++;
// If the cache has spare space, we are done
if (2 * entries < cache.Length)
{
if (s_roundRobinFlushing)
{
cache[2 * entries] = null;
cache[2 * entries + 1] = null;
}
return cache;
}
//
// Now, we have cache that is overflowing with the stuff. We need to decide whether to resize it or start flushing the old entries instead
//
// Start over counting the entries
s_entries = 0;
// See how long it has been since the last time the cache was overflowing
long tickCount = Environment.TickCount64;
long tickCountSinceLastOverflow = tickCount - s_tickCountOfLastOverflow;
s_tickCountOfLastOverflow = tickCount;
bool shrinkCache = false;
bool growCache = false;
if (cache.Length < DefaultCacheSize)
{
// If the cache have not reached the default size, just grow it without thinking about it much
growCache = true;
}
else
{
if (tickCountSinceLastOverflow < cache.Length / 128)
{
// If the fill rate of the cache is faster than ~0.01ms per entry, grow it
if (cache.Length < MaximumCacheSize)
growCache = true;
}
else
if (tickCountSinceLastOverflow > cache.Length * 16)
{
// If the fill rate of the cache is slower than 16ms per entry, shrink it
if (cache.Length > DefaultCacheSize)
shrinkCache = true;
}
// Otherwise, keep the current size and just keep flushing the entries round robin
}
if (growCache || shrinkCache)
{
s_roundRobinFlushing = false;
// Keep the reference to the old cache in a weak handle. We will try to use to avoid
// hitting the type loader until GC collects it.
if (s_previousCache.IsAllocated)
{
s_previousCache.Target = cache;
}
else
{
s_previousCache = GCHandle.Alloc(cache, GCHandleType.Weak);
}
return s_cache = new Entry[shrinkCache ? (cache.Length / 2) : (cache.Length * 2)];
}
else
{
s_roundRobinFlushing = true;
return cache;
}
}
}
[ReflectionBlocked]
public delegate IntPtr RuntimeObjectFactory(IntPtr context, IntPtr signature, object contextObject, ref IntPtr auxResult);
internal static unsafe class RawCalliHelper
{
[MethodImplAttribute(MethodImplOptions.AggressiveInlining)]
public static void Call(System.IntPtr pfn, ref byte data)
=> ((delegate*<ref byte, void>)pfn)(ref data);
[MethodImplAttribute(MethodImplOptions.AggressiveInlining)]
public static T Call<T>(System.IntPtr pfn, IntPtr arg)
=> ((delegate*<IntPtr, T>)pfn)(arg);
[MethodImplAttribute(MethodImplOptions.AggressiveInlining)]
public static void Call(System.IntPtr pfn, object arg)
=> ((delegate*<object, void>)pfn)(arg);
[MethodImplAttribute(MethodImplOptions.AggressiveInlining)]
public static T Call<T>(System.IntPtr pfn, IntPtr arg1, IntPtr arg2)
=> ((delegate*<IntPtr, IntPtr, T>)pfn)(arg1, arg2);
[MethodImplAttribute(MethodImplOptions.AggressiveInlining)]
public static T Call<T>(System.IntPtr pfn, IntPtr arg1, IntPtr arg2, object arg3, out IntPtr arg4)
=> ((delegate*<IntPtr, IntPtr, object, out IntPtr, T>)pfn)(arg1, arg2, arg3, out arg4);
[MethodImplAttribute(MethodImplOptions.AggressiveInlining)]
public static void Call(System.IntPtr pfn, IntPtr arg1, object arg2)
=> ((delegate*<IntPtr, object, void>)pfn)(arg1, arg2);
[MethodImplAttribute(MethodImplOptions.AggressiveInlining)]
public static T Call<T>(System.IntPtr pfn, object arg1, IntPtr arg2)
=> ((delegate*<object, IntPtr, T>)pfn)(arg1, arg2);
[MethodImplAttribute(MethodImplOptions.AggressiveInlining)]
public static T Call<T>(IntPtr pfn, string[] arg0)
=> ((delegate*<string[], T>)pfn)(arg0);
}
}
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using Internal.Runtime.Augments;
using System.Diagnostics;
using System.Threading;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
namespace System.Runtime
{
[ReflectionBlocked]
public static class TypeLoaderExports
{
public static IntPtr GetThreadStaticsForDynamicType(int index)
{
IntPtr result = RuntimeImports.RhGetThreadLocalStorageForDynamicType(index, 0, 0);
if (result != IntPtr.Zero)
return result;
int numTlsCells;
int tlsStorageSize = RuntimeAugments.TypeLoaderCallbacks.GetThreadStaticsSizeForDynamicType(index, out numTlsCells);
result = RuntimeImports.RhGetThreadLocalStorageForDynamicType(index, tlsStorageSize, numTlsCells);
if (result == IntPtr.Zero)
throw new OutOfMemoryException();
return result;
}
public static unsafe void ActivatorCreateInstanceAny(ref object ptrToData, IntPtr pEETypePtr)
{
EETypePtr pEEType = new EETypePtr(pEETypePtr);
if (pEEType.IsValueType)
{
// Nothing else to do for value types.
return;
}
// For reference types, we need to:
// 1- Allocate the new object
// 2- Call its default ctor
// 3- Update ptrToData to point to that newly allocated object
ptrToData = RuntimeImports.RhNewObject(pEEType);
Entry entry = LookupInCache(s_cache, pEETypePtr, pEETypePtr);
if (entry == null)
{
entry = CacheMiss(pEETypePtr, pEETypePtr,
(IntPtr context, IntPtr signature, object contextObject, ref IntPtr auxResult) =>
{
IntPtr result = RuntimeAugments.TypeLoaderCallbacks.TryGetDefaultConstructorForType(new RuntimeTypeHandle(new EETypePtr(context)));
if (result == IntPtr.Zero)
result = RuntimeAugments.GetFallbackDefaultConstructor();
return result;
});
}
RawCalliHelper.Call(entry.Result, ptrToData);
}
//
// Generic lookup cache
//
private class Entry
{
public IntPtr Context;
public IntPtr Signature;
public IntPtr Result;
public IntPtr AuxResult;
public Entry Next;
}
// Initialize the cache eagerly to avoid null checks.
// Use array with just single element to make this pay-for-play. The actual cache will be allocated only
// once the lazy lookups are actually needed.
private static Entry[] s_cache;
private static Lock s_lock;
private static GCHandle s_previousCache;
internal static void Initialize()
{
s_cache = new Entry[1];
}
public static IntPtr GenericLookup(IntPtr context, IntPtr signature)
{
Entry entry = LookupInCache(s_cache, context, signature);
if (entry == null)
{
entry = CacheMiss(context, signature);
}
return entry.Result;
}
public static void GenericLookupAndCallCtor(object arg, IntPtr context, IntPtr signature)
{
Entry entry = LookupInCache(s_cache, context, signature);
if (entry == null)
{
entry = CacheMiss(context, signature);
}
RawCalliHelper.Call(entry.Result, arg);
}
public static object GenericLookupAndAllocObject(IntPtr context, IntPtr signature)
{
Entry entry = LookupInCache(s_cache, context, signature);
if (entry == null)
{
entry = CacheMiss(context, signature);
}
return RawCalliHelper.Call<object>(entry.Result, entry.AuxResult);
}
public static object GenericLookupAndAllocArray(IntPtr context, IntPtr arg, IntPtr signature)
{
Entry entry = LookupInCache(s_cache, context, signature);
if (entry == null)
{
entry = CacheMiss(context, signature);
}
return RawCalliHelper.Call<object>(entry.Result, entry.AuxResult, arg);
}
public static void GenericLookupAndCheckArrayElemType(IntPtr context, object arg, IntPtr signature)
{
Entry entry = LookupInCache(s_cache, context, signature);
if (entry == null)
{
entry = CacheMiss(context, signature);
}
RawCalliHelper.Call(entry.Result, entry.AuxResult, arg);
}
public static object GenericLookupAndCast(object arg, IntPtr context, IntPtr signature)
{
Entry entry = LookupInCache(s_cache, context, signature);
if (entry == null)
{
entry = CacheMiss(context, signature);
}
return RawCalliHelper.Call<object>(entry.Result, arg, entry.AuxResult);
}
public static IntPtr UpdateTypeFloatingDictionary(IntPtr eetypePtr, IntPtr dictionaryPtr)
{
// No caching needed. Update is in-place, and happens once per dictionary
return RuntimeAugments.TypeLoaderCallbacks.UpdateFloatingDictionary(eetypePtr, dictionaryPtr);
}
public static IntPtr UpdateMethodFloatingDictionary(IntPtr dictionaryPtr)
{
// No caching needed. Update is in-place, and happens once per dictionary
return RuntimeAugments.TypeLoaderCallbacks.UpdateFloatingDictionary(dictionaryPtr, dictionaryPtr);
}
public static unsafe IntPtr GetDelegateThunk(object delegateObj, int whichThunk)
{
Entry entry = LookupInCache(s_cache, (IntPtr)delegateObj.MethodTable, new IntPtr(whichThunk));
if (entry == null)
{
entry = CacheMiss((IntPtr)delegateObj.MethodTable, new IntPtr(whichThunk),
(IntPtr context, IntPtr signature, object contextObject, ref IntPtr auxResult)
=> RuntimeAugments.TypeLoaderCallbacks.GetDelegateThunk((Delegate)contextObject, (int)signature),
delegateObj);
}
return entry.Result;
}
public static unsafe IntPtr GVMLookupForSlot(object obj, RuntimeMethodHandle slot)
{
Entry entry = LookupInCache(s_cache, (IntPtr)obj.MethodTable, *(IntPtr*)&slot);
if (entry == null)
{
entry = CacheMiss((IntPtr)obj.MethodTable, *(IntPtr*)&slot,
(IntPtr context, IntPtr signature, object contextObject, ref IntPtr auxResult)
=> Internal.Runtime.CompilerServices.GenericVirtualMethodSupport.GVMLookupForSlot(new RuntimeTypeHandle(new EETypePtr(context)), *(RuntimeMethodHandle*)&signature));
}
return entry.Result;
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
internal static unsafe IntPtr OpenInstanceMethodLookup(IntPtr openResolver, object obj)
{
Entry entry = LookupInCache(s_cache, (IntPtr)obj.MethodTable, openResolver);
if (entry == null)
{
entry = CacheMiss((IntPtr)obj.MethodTable, openResolver,
(IntPtr context, IntPtr signature, object contextObject, ref IntPtr auxResult)
=> Internal.Runtime.CompilerServices.OpenMethodResolver.ResolveMethodWorker(signature, contextObject),
obj);
}
return entry.Result;
}
[MethodImplAttribute(MethodImplOptions.AggressiveInlining)]
private static Entry LookupInCache(Entry[] cache, IntPtr context, IntPtr signature)
{
int key = ((context.GetHashCode() >> 4) ^ signature.GetHashCode()) & (cache.Length - 1);
Entry entry = cache[key];
while (entry != null)
{
if (entry.Context == context && entry.Signature == signature)
break;
entry = entry.Next;
}
return entry;
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
internal static IntPtr RuntimeCacheLookupInCache(IntPtr context, IntPtr signature, RuntimeObjectFactory factory, object contextObject, out IntPtr auxResult)
{
Entry entry = LookupInCache(s_cache, context, signature);
if (entry == null)
{
entry = CacheMiss(context, signature, factory, contextObject);
}
auxResult = entry.AuxResult;
return entry.Result;
}
private static Entry CacheMiss(IntPtr ctx, IntPtr sig)
{
return CacheMiss(ctx, sig,
(IntPtr context, IntPtr signature, object contextObject, ref IntPtr auxResult) =>
RuntimeAugments.TypeLoaderCallbacks.GenericLookupFromContextAndSignature(context, signature, out auxResult)
);
}
private static unsafe Entry CacheMiss(IntPtr context, IntPtr signature, RuntimeObjectFactory factory, object contextObject = null)
{
IntPtr result = IntPtr.Zero, auxResult = IntPtr.Zero;
bool previouslyCached = false;
//
// Try to find the entry in the previous version of the cache that is kept alive by weak reference
//
if (s_previousCache.IsAllocated)
{
Entry[]? previousCache = (Entry[]?)s_previousCache.Target;
if (previousCache != null)
{
Entry previousEntry = LookupInCache(previousCache, context, signature);
if (previousEntry != null)
{
result = previousEntry.Result;
auxResult = previousEntry.AuxResult;
previouslyCached = true;
}
}
}
//
// Call into the type loader to compute the target
//
if (!previouslyCached)
{
result = factory(context, signature, contextObject, ref auxResult);
}
//
// Update the cache under the lock
//
if (s_lock == null)
Interlocked.CompareExchange(ref s_lock, new Lock(), null);
s_lock.Acquire();
try
{
// Avoid duplicate entries
Entry existingEntry = LookupInCache(s_cache, context, signature);
if (existingEntry != null)
return existingEntry;
// Resize cache as necessary
Entry[] cache = ResizeCacheForNewEntryAsNecessary();
int key = ((context.GetHashCode() >> 4) ^ signature.GetHashCode()) & (cache.Length - 1);
Entry newEntry = new Entry() { Context = context, Signature = signature, Result = result, AuxResult = auxResult, Next = cache[key] };
cache[key] = newEntry;
return newEntry;
}
finally
{
s_lock.Release();
}
}
//
// Parameters and state used by generic lookup cache resizing algorithm
//
private const int InitialCacheSize = 128; // MUST BE A POWER OF TWO
private const int DefaultCacheSize = 1024;
private const int MaximumCacheSize = 128 * 1024;
private static long s_tickCountOfLastOverflow;
private static int s_entries;
private static bool s_roundRobinFlushing;
private static Entry[] ResizeCacheForNewEntryAsNecessary()
{
Entry[] cache = s_cache;
if (cache.Length < InitialCacheSize)
{
// Start with small cache size so that the cache entries used by startup one-time only initialization will get flushed soon
return s_cache = new Entry[InitialCacheSize];
}
int entries = s_entries++;
// If the cache has spare space, we are done
if (2 * entries < cache.Length)
{
if (s_roundRobinFlushing)
{
cache[2 * entries] = null;
cache[2 * entries + 1] = null;
}
return cache;
}
//
// Now, we have cache that is overflowing with the stuff. We need to decide whether to resize it or start flushing the old entries instead
//
// Start over counting the entries
s_entries = 0;
// See how long it has been since the last time the cache was overflowing
long tickCount = Environment.TickCount64;
long tickCountSinceLastOverflow = tickCount - s_tickCountOfLastOverflow;
s_tickCountOfLastOverflow = tickCount;
bool shrinkCache = false;
bool growCache = false;
if (cache.Length < DefaultCacheSize)
{
// If the cache have not reached the default size, just grow it without thinking about it much
growCache = true;
}
else
{
if (tickCountSinceLastOverflow < cache.Length / 128)
{
// If the fill rate of the cache is faster than ~0.01ms per entry, grow it
if (cache.Length < MaximumCacheSize)
growCache = true;
}
else
if (tickCountSinceLastOverflow > cache.Length * 16)
{
// If the fill rate of the cache is slower than 16ms per entry, shrink it
if (cache.Length > DefaultCacheSize)
shrinkCache = true;
}
// Otherwise, keep the current size and just keep flushing the entries round robin
}
if (growCache || shrinkCache)
{
s_roundRobinFlushing = false;
// Keep the reference to the old cache in a weak handle. We will try to use to avoid
// hitting the type loader until GC collects it.
if (s_previousCache.IsAllocated)
{
s_previousCache.Target = cache;
}
else
{
s_previousCache = GCHandle.Alloc(cache, GCHandleType.Weak);
}
return s_cache = new Entry[shrinkCache ? (cache.Length / 2) : (cache.Length * 2)];
}
else
{
s_roundRobinFlushing = true;
return cache;
}
}
}
[ReflectionBlocked]
public delegate IntPtr RuntimeObjectFactory(IntPtr context, IntPtr signature, object contextObject, ref IntPtr auxResult);
internal static unsafe class RawCalliHelper
{
[MethodImplAttribute(MethodImplOptions.AggressiveInlining)]
public static void Call(System.IntPtr pfn, ref byte data)
=> ((delegate*<ref byte, void>)pfn)(ref data);
[MethodImplAttribute(MethodImplOptions.AggressiveInlining)]
public static T Call<T>(System.IntPtr pfn, IntPtr arg)
=> ((delegate*<IntPtr, T>)pfn)(arg);
[MethodImplAttribute(MethodImplOptions.AggressiveInlining)]
public static void Call(System.IntPtr pfn, object arg)
=> ((delegate*<object, void>)pfn)(arg);
[MethodImplAttribute(MethodImplOptions.AggressiveInlining)]
public static T Call<T>(System.IntPtr pfn, IntPtr arg1, IntPtr arg2)
=> ((delegate*<IntPtr, IntPtr, T>)pfn)(arg1, arg2);
[MethodImplAttribute(MethodImplOptions.AggressiveInlining)]
public static T Call<T>(System.IntPtr pfn, IntPtr arg1, IntPtr arg2, object arg3, out IntPtr arg4)
=> ((delegate*<IntPtr, IntPtr, object, out IntPtr, T>)pfn)(arg1, arg2, arg3, out arg4);
[MethodImplAttribute(MethodImplOptions.AggressiveInlining)]
public static void Call(System.IntPtr pfn, IntPtr arg1, object arg2)
=> ((delegate*<IntPtr, object, void>)pfn)(arg1, arg2);
[MethodImplAttribute(MethodImplOptions.AggressiveInlining)]
public static T Call<T>(System.IntPtr pfn, object arg1, IntPtr arg2)
=> ((delegate*<object, IntPtr, T>)pfn)(arg1, arg2);
[MethodImplAttribute(MethodImplOptions.AggressiveInlining)]
public static T Call<T>(IntPtr pfn, string[] arg0)
=> ((delegate*<string[], T>)pfn)(arg0);
}
}
| -1 |
dotnet/runtime
| 66,411 |
Arm64: Always use SIMD features
|
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
kunalspathak
| 2022-03-09T21:00:00Z | 2022-03-12T04:35:06Z |
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
|
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
|
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
./src/coreclr/pal/src/libunwind/src/loongarch64/Ginit.c
|
/* libunwind - a platform-independent unwind library
Copyright (C) 2008 CodeSourcery
Copyright (C) 2021 Loongson Technology Corporation Limited
This file is part of libunwind.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <stdatomic.h>
#include "unwind_i.h"
#ifdef UNW_REMOTE_ONLY
/* unw_local_addr_space is a NULL pointer in this case. */
unw_addr_space_t unw_local_addr_space;
#else /* !UNW_REMOTE_ONLY */
static struct unw_addr_space local_addr_space;
unw_addr_space_t unw_local_addr_space = &local_addr_space;
static inline void *
uc_addr (ucontext_t *uc, int reg)
{
if (reg >= UNW_LOONGARCH64_R0 && reg <= UNW_LOONGARCH64_R31)
return &uc->uc_mcontext.__gregs[reg - UNW_LOONGARCH64_R0];
else if (reg == UNW_LOONGARCH64_PC)
return &uc->uc_mcontext.__pc;
else
return NULL;
}
# ifdef UNW_LOCAL_ONLY
HIDDEN void *
tdep_uc_addr (ucontext_t *uc, int reg)
{
return uc_addr (uc, reg);
}
# endif /* UNW_LOCAL_ONLY */
static void
put_unwind_info (unw_addr_space_t as, unw_proc_info_t *proc_info, void *arg)
{
/* it's a no-op */
}
static int
get_dyn_info_list_addr (unw_addr_space_t as, unw_word_t *dyn_info_list_addr,
void *arg)
{
#ifndef UNW_LOCAL_ONLY
# pragma weak _U_dyn_info_list_addr
if (!_U_dyn_info_list_addr)
return -UNW_ENOINFO;
#endif
// Access the `_U_dyn_info_list` from `LOCAL_ONLY` library, i.e. libunwind.so.
*dyn_info_list_addr = _U_dyn_info_list_addr ();
return 0;
}
#define PAGE_SIZE 4096
#define PAGE_START(a) ((a) & ~(PAGE_SIZE-1))
static int mem_validate_pipe[2] = {-1, -1};
#ifdef HAVE_PIPE2
static inline void
do_pipe2 (int pipefd[2])
{
pipe2 (pipefd, O_CLOEXEC | O_NONBLOCK);
}
#else
static inline void
set_pipe_flags (int fd)
{
int fd_flags = fcntl (fd, F_GETFD, 0);
int status_flags = fcntl (fd, F_GETFL, 0);
fd_flags |= FD_CLOEXEC;
fcntl (fd, F_SETFD, fd_flags);
status_flags |= O_NONBLOCK;
fcntl (fd, F_SETFL, status_flags);
}
static inline void
do_pipe2 (int pipefd[2])
{
pipe (pipefd);
set_pipe_flags(pipefd[0]);
set_pipe_flags(pipefd[1]);
}
#endif
static inline void
open_pipe (void)
{
if (mem_validate_pipe[0] != -1)
close (mem_validate_pipe[0]);
if (mem_validate_pipe[1] != -1)
close (mem_validate_pipe[1]);
do_pipe2 (mem_validate_pipe);
}
ALWAYS_INLINE
static int
write_validate (void *addr)
{
int ret = -1;
ssize_t bytes = 0;
do
{
char buf;
bytes = read (mem_validate_pipe[0], &buf, 1);
}
while ( errno == EINTR );
int valid_read = (bytes > 0 || errno == EAGAIN || errno == EWOULDBLOCK);
if (!valid_read)
{
// re-open closed pipe
open_pipe ();
}
do
{
ret = write (mem_validate_pipe[1], addr, 1);
}
while ( errno == EINTR );
return ret;
}
static int (*mem_validate_func) (void *addr, size_t len);
static int msync_validate (void *addr, size_t len)
{
if (msync (addr, len, MS_ASYNC) != 0)
{
return -1;
}
return write_validate (addr);
}
#ifdef HAVE_MINCORE
static int mincore_validate (void *addr, size_t len)
{
unsigned char mvec[2]; /* Unaligned access may cross page boundary */
/* mincore could fail with EAGAIN but we conservatively return -1
instead of looping. */
if (mincore (addr, len, (unsigned char *)mvec) != 0)
{
return -1;
}
return write_validate (addr);
}
#endif
/* Initialise memory validation method. On linux kernels <2.6.21,
mincore() returns incorrect value for MAP_PRIVATE mappings,
such as stacks. If mincore() was available at compile time,
check if we can actually use it. If not, use msync() instead. */
HIDDEN void
tdep_init_mem_validate (void)
{
open_pipe ();
#ifdef HAVE_MINCORE
unsigned char present = 1;
unw_word_t addr = PAGE_START((unw_word_t)&present);
unsigned char mvec[1];
int ret;
while ((ret = mincore ((void*)addr, PAGE_SIZE, (unsigned char *)mvec)) == -1 &&
errno == EAGAIN) {}
if (ret == 0)
{
Debug(1, "using mincore to validate memory\n");
mem_validate_func = mincore_validate;
}
else
#endif
{
Debug(1, "using msync to validate memory\n");
mem_validate_func = msync_validate;
}
}
/* Cache of already validated addresses */
#define NLGA 4
#if defined(HAVE___CACHE_PER_THREAD) && HAVE___CACHE_PER_THREAD
// thread-local variant
static _Thread_local unw_word_t last_good_addr[NLGA];
static _Thread_local int lga_victim;
static int
is_cached_valid_mem(unw_word_t addr)
{
int i;
for (i = 0; i < NLGA; i++)
{
if (addr == last_good_addr[i])
return 1;
}
return 0;
}
static void
cache_valid_mem(unw_word_t addr)
{
int i, victim;
victim = lga_victim;
for (i = 0; i < NLGA; i++) {
if (last_good_addr[victim] == 0) {
last_good_addr[victim] = addr;
return;
}
victim = (victim + 1) % NLGA;
}
/* All slots full. Evict the victim. */
last_good_addr[victim] = addr;
victim = (victim + 1) % NLGA;
lga_victim = victim;
}
#else
// global, thread safe variant
static _Atomic unw_word_t last_good_addr[NLGA];
static _Atomic int lga_victim;
static int
is_cached_valid_mem(unw_word_t addr)
{
int i;
for (i = 0; i < NLGA; i++)
{
if (addr == atomic_load(&last_good_addr[i]))
return 1;
}
return 0;
}
static void
cache_valid_mem(unw_word_t addr)
{
int i, victim;
victim = atomic_load(&lga_victim);
unw_word_t zero = 0;
for (i = 0; i < NLGA; i++) {
if (atomic_compare_exchange_strong(&last_good_addr[victim], &zero, addr)) {
return;
}
victim = (victim + 1) % NLGA;
}
/* All slots full. Evict the victim. */
atomic_store(&last_good_addr[victim], addr);
victim = (victim + 1) % NLGA;
atomic_store(&lga_victim, victim);
}
#endif
static int
validate_mem (unw_word_t addr)
{
size_t len;
if (PAGE_START(addr + sizeof (unw_word_t) - 1) == PAGE_START(addr))
len = PAGE_SIZE;
else
len = PAGE_SIZE * 2;
addr = PAGE_START(addr);
if (addr == 0)
return -1;
if (is_cached_valid_mem(addr))
return 0;
if (mem_validate_func ((void *) addr, len) == -1)
return -1;
cache_valid_mem(addr);
return 0;
}
static int
access_mem (unw_addr_space_t as, unw_word_t addr, unw_word_t *val, int write,
void *arg)
{
if (unlikely (write))
{
Debug (16, "mem[%llx] <- %llx\n", (long long) addr, (long long) *val);
*(unw_word_t *) (intptr_t) addr = *val;
}
else
{
/* validate address */
const struct cursor *c = (const struct cursor *)arg;
if (likely (c != NULL) && unlikely (c->validate)
&& unlikely (validate_mem (addr))) {
Debug (16, "mem[%016lx] -> invalid\n", addr);
return -1;
}
*val = *(unw_word_t *) (intptr_t) addr;
Debug (16, "mem[%llx] -> %llx\n", (long long) addr, (long long) *val);
}
return 0;
}
static int
access_reg (unw_addr_space_t as, unw_regnum_t reg, unw_word_t *val, int write,
void *arg)
{
unw_word_t *addr;
ucontext_t *uc = ((struct cursor *)arg)->uc;
if (unw_is_fpreg (reg))
goto badreg;
Debug (16, "reg = %s\n", unw_regname (reg));
if (!(addr = uc_addr (uc, reg)))
goto badreg;
if (write)
{
*(unw_word_t *) (intptr_t) addr = (unw_word_t) *val;
Debug (12, "%s <- %llx\n", unw_regname (reg), (long long) *val);
}
else
{
*val = (unw_word_t) *(unw_word_t *) (intptr_t) addr;
Debug (12, "%s -> %llx\n", unw_regname (reg), (long long) *val);
}
return 0;
badreg:
Debug (1, "bad register number %u\n", reg);
return -UNW_EBADREG;
}
static int
access_fpreg (unw_addr_space_t as, unw_regnum_t reg, unw_fpreg_t *val,
int write, void *arg)
{
return 0;
}
static int
get_static_proc_name (unw_addr_space_t as, unw_word_t ip,
char *buf, size_t buf_len, unw_word_t *offp,
void *arg)
{
return elf_w (get_proc_name) (as, getpid (), ip, buf, buf_len, offp);
}
HIDDEN void
loongarch64_local_addr_space_init (void)
{
memset (&local_addr_space, 0, sizeof (local_addr_space));
local_addr_space.caching_policy = UNW_CACHE_GLOBAL;
local_addr_space.acc.find_proc_info = dwarf_find_proc_info;
local_addr_space.acc.put_unwind_info = put_unwind_info;
local_addr_space.acc.get_dyn_info_list_addr = get_dyn_info_list_addr;
local_addr_space.acc.access_mem = access_mem;
local_addr_space.acc.access_reg = access_reg;
local_addr_space.acc.access_fpreg = access_fpreg;
local_addr_space.acc.resume = loongarch64_local_resume;
local_addr_space.acc.get_proc_name = get_static_proc_name;
unw_flush_cache (&local_addr_space, 0, 0);
}
#endif /* !UNW_REMOTE_ONLY */
|
/* libunwind - a platform-independent unwind library
Copyright (C) 2008 CodeSourcery
Copyright (C) 2021 Loongson Technology Corporation Limited
This file is part of libunwind.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <stdatomic.h>
#include "unwind_i.h"
#ifdef UNW_REMOTE_ONLY
/* unw_local_addr_space is a NULL pointer in this case. */
unw_addr_space_t unw_local_addr_space;
#else /* !UNW_REMOTE_ONLY */
static struct unw_addr_space local_addr_space;
unw_addr_space_t unw_local_addr_space = &local_addr_space;
static inline void *
uc_addr (ucontext_t *uc, int reg)
{
if (reg >= UNW_LOONGARCH64_R0 && reg <= UNW_LOONGARCH64_R31)
return &uc->uc_mcontext.__gregs[reg - UNW_LOONGARCH64_R0];
else if (reg == UNW_LOONGARCH64_PC)
return &uc->uc_mcontext.__pc;
else
return NULL;
}
# ifdef UNW_LOCAL_ONLY
HIDDEN void *
tdep_uc_addr (ucontext_t *uc, int reg)
{
return uc_addr (uc, reg);
}
# endif /* UNW_LOCAL_ONLY */
static void
put_unwind_info (unw_addr_space_t as, unw_proc_info_t *proc_info, void *arg)
{
/* it's a no-op */
}
static int
get_dyn_info_list_addr (unw_addr_space_t as, unw_word_t *dyn_info_list_addr,
void *arg)
{
#ifndef UNW_LOCAL_ONLY
# pragma weak _U_dyn_info_list_addr
if (!_U_dyn_info_list_addr)
return -UNW_ENOINFO;
#endif
// Access the `_U_dyn_info_list` from `LOCAL_ONLY` library, i.e. libunwind.so.
*dyn_info_list_addr = _U_dyn_info_list_addr ();
return 0;
}
#define PAGE_SIZE 4096
#define PAGE_START(a) ((a) & ~(PAGE_SIZE-1))
static int mem_validate_pipe[2] = {-1, -1};
#ifdef HAVE_PIPE2
static inline void
do_pipe2 (int pipefd[2])
{
pipe2 (pipefd, O_CLOEXEC | O_NONBLOCK);
}
#else
static inline void
set_pipe_flags (int fd)
{
int fd_flags = fcntl (fd, F_GETFD, 0);
int status_flags = fcntl (fd, F_GETFL, 0);
fd_flags |= FD_CLOEXEC;
fcntl (fd, F_SETFD, fd_flags);
status_flags |= O_NONBLOCK;
fcntl (fd, F_SETFL, status_flags);
}
static inline void
do_pipe2 (int pipefd[2])
{
pipe (pipefd);
set_pipe_flags(pipefd[0]);
set_pipe_flags(pipefd[1]);
}
#endif
static inline void
open_pipe (void)
{
if (mem_validate_pipe[0] != -1)
close (mem_validate_pipe[0]);
if (mem_validate_pipe[1] != -1)
close (mem_validate_pipe[1]);
do_pipe2 (mem_validate_pipe);
}
ALWAYS_INLINE
static int
write_validate (void *addr)
{
int ret = -1;
ssize_t bytes = 0;
do
{
char buf;
bytes = read (mem_validate_pipe[0], &buf, 1);
}
while ( errno == EINTR );
int valid_read = (bytes > 0 || errno == EAGAIN || errno == EWOULDBLOCK);
if (!valid_read)
{
// re-open closed pipe
open_pipe ();
}
do
{
ret = write (mem_validate_pipe[1], addr, 1);
}
while ( errno == EINTR );
return ret;
}
static int (*mem_validate_func) (void *addr, size_t len);
static int msync_validate (void *addr, size_t len)
{
if (msync (addr, len, MS_ASYNC) != 0)
{
return -1;
}
return write_validate (addr);
}
#ifdef HAVE_MINCORE
static int mincore_validate (void *addr, size_t len)
{
unsigned char mvec[2]; /* Unaligned access may cross page boundary */
/* mincore could fail with EAGAIN but we conservatively return -1
instead of looping. */
if (mincore (addr, len, (unsigned char *)mvec) != 0)
{
return -1;
}
return write_validate (addr);
}
#endif
/* Initialise memory validation method. On linux kernels <2.6.21,
mincore() returns incorrect value for MAP_PRIVATE mappings,
such as stacks. If mincore() was available at compile time,
check if we can actually use it. If not, use msync() instead. */
HIDDEN void
tdep_init_mem_validate (void)
{
open_pipe ();
#ifdef HAVE_MINCORE
unsigned char present = 1;
unw_word_t addr = PAGE_START((unw_word_t)&present);
unsigned char mvec[1];
int ret;
while ((ret = mincore ((void*)addr, PAGE_SIZE, (unsigned char *)mvec)) == -1 &&
errno == EAGAIN) {}
if (ret == 0)
{
Debug(1, "using mincore to validate memory\n");
mem_validate_func = mincore_validate;
}
else
#endif
{
Debug(1, "using msync to validate memory\n");
mem_validate_func = msync_validate;
}
}
/* Cache of already validated addresses */
#define NLGA 4
#if defined(HAVE___CACHE_PER_THREAD) && HAVE___CACHE_PER_THREAD
// thread-local variant
static _Thread_local unw_word_t last_good_addr[NLGA];
static _Thread_local int lga_victim;
static int
is_cached_valid_mem(unw_word_t addr)
{
int i;
for (i = 0; i < NLGA; i++)
{
if (addr == last_good_addr[i])
return 1;
}
return 0;
}
static void
cache_valid_mem(unw_word_t addr)
{
int i, victim;
victim = lga_victim;
for (i = 0; i < NLGA; i++) {
if (last_good_addr[victim] == 0) {
last_good_addr[victim] = addr;
return;
}
victim = (victim + 1) % NLGA;
}
/* All slots full. Evict the victim. */
last_good_addr[victim] = addr;
victim = (victim + 1) % NLGA;
lga_victim = victim;
}
#else
// global, thread safe variant
static _Atomic unw_word_t last_good_addr[NLGA];
static _Atomic int lga_victim;
static int
is_cached_valid_mem(unw_word_t addr)
{
int i;
for (i = 0; i < NLGA; i++)
{
if (addr == atomic_load(&last_good_addr[i]))
return 1;
}
return 0;
}
static void
cache_valid_mem(unw_word_t addr)
{
int i, victim;
victim = atomic_load(&lga_victim);
unw_word_t zero = 0;
for (i = 0; i < NLGA; i++) {
if (atomic_compare_exchange_strong(&last_good_addr[victim], &zero, addr)) {
return;
}
victim = (victim + 1) % NLGA;
}
/* All slots full. Evict the victim. */
atomic_store(&last_good_addr[victim], addr);
victim = (victim + 1) % NLGA;
atomic_store(&lga_victim, victim);
}
#endif
static int
validate_mem (unw_word_t addr)
{
size_t len;
if (PAGE_START(addr + sizeof (unw_word_t) - 1) == PAGE_START(addr))
len = PAGE_SIZE;
else
len = PAGE_SIZE * 2;
addr = PAGE_START(addr);
if (addr == 0)
return -1;
if (is_cached_valid_mem(addr))
return 0;
if (mem_validate_func ((void *) addr, len) == -1)
return -1;
cache_valid_mem(addr);
return 0;
}
static int
access_mem (unw_addr_space_t as, unw_word_t addr, unw_word_t *val, int write,
void *arg)
{
if (unlikely (write))
{
Debug (16, "mem[%llx] <- %llx\n", (long long) addr, (long long) *val);
*(unw_word_t *) (intptr_t) addr = *val;
}
else
{
/* validate address */
const struct cursor *c = (const struct cursor *)arg;
if (likely (c != NULL) && unlikely (c->validate)
&& unlikely (validate_mem (addr))) {
Debug (16, "mem[%016lx] -> invalid\n", addr);
return -1;
}
*val = *(unw_word_t *) (intptr_t) addr;
Debug (16, "mem[%llx] -> %llx\n", (long long) addr, (long long) *val);
}
return 0;
}
static int
access_reg (unw_addr_space_t as, unw_regnum_t reg, unw_word_t *val, int write,
void *arg)
{
unw_word_t *addr;
ucontext_t *uc = ((struct cursor *)arg)->uc;
if (unw_is_fpreg (reg))
goto badreg;
Debug (16, "reg = %s\n", unw_regname (reg));
if (!(addr = uc_addr (uc, reg)))
goto badreg;
if (write)
{
*(unw_word_t *) (intptr_t) addr = (unw_word_t) *val;
Debug (12, "%s <- %llx\n", unw_regname (reg), (long long) *val);
}
else
{
*val = (unw_word_t) *(unw_word_t *) (intptr_t) addr;
Debug (12, "%s -> %llx\n", unw_regname (reg), (long long) *val);
}
return 0;
badreg:
Debug (1, "bad register number %u\n", reg);
return -UNW_EBADREG;
}
static int
access_fpreg (unw_addr_space_t as, unw_regnum_t reg, unw_fpreg_t *val,
int write, void *arg)
{
return 0;
}
static int
get_static_proc_name (unw_addr_space_t as, unw_word_t ip,
char *buf, size_t buf_len, unw_word_t *offp,
void *arg)
{
return elf_w (get_proc_name) (as, getpid (), ip, buf, buf_len, offp);
}
HIDDEN void
loongarch64_local_addr_space_init (void)
{
memset (&local_addr_space, 0, sizeof (local_addr_space));
local_addr_space.caching_policy = UNW_CACHE_GLOBAL;
local_addr_space.acc.find_proc_info = dwarf_find_proc_info;
local_addr_space.acc.put_unwind_info = put_unwind_info;
local_addr_space.acc.get_dyn_info_list_addr = get_dyn_info_list_addr;
local_addr_space.acc.access_mem = access_mem;
local_addr_space.acc.access_reg = access_reg;
local_addr_space.acc.access_fpreg = access_fpreg;
local_addr_space.acc.resume = loongarch64_local_resume;
local_addr_space.acc.get_proc_name = get_static_proc_name;
unw_flush_cache (&local_addr_space, 0, 0);
}
#endif /* !UNW_REMOTE_ONLY */
| -1 |
dotnet/runtime
| 66,411 |
Arm64: Always use SIMD features
|
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
kunalspathak
| 2022-03-09T21:00:00Z | 2022-03-12T04:35:06Z |
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
|
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
|
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
./src/tests/Loader/classloader/TypeGeneratorTests/TypeGeneratorTest111/Generated111.il
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
.assembly extern mscorlib { .publickeytoken = (B7 7A 5C 56 19 34 E0 89 ) .ver 4:0:0:0 }
.assembly extern TestFramework { .publickeytoken = ( B0 3F 5F 7F 11 D5 0A 3A ) }
//TYPES IN FORWARDER ASSEMBLIES:
//TEST ASSEMBLY:
.assembly Generated111 { .hash algorithm 0x00008004 }
.assembly extern xunit.core {}
.class public BaseClass0
{
.method public hidebysig specialname rtspecialname instance void .ctor() cil managed {
ldarg.0
call instance void [mscorlib]System.Object::.ctor()
ret
}
}
.class public BaseClass1
extends BaseClass0
{
.method public hidebysig specialname rtspecialname instance void .ctor() cil managed {
ldarg.0
call instance void BaseClass0::.ctor()
ret
}
}
.class public sequential sealed MyStruct161`1<T0>
extends [mscorlib]System.ValueType
implements class IBase2`2<class BaseClass0,class BaseClass1>, class IBase2`2<!T0,class BaseClass0>
{
.pack 0
.size 1
.method public hidebysig newslot virtual instance string Method7<M0>() cil managed noinlining {
ldstr "MyStruct161::Method7.1363<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string 'IBase2<class BaseClass0,class BaseClass1>.Method7'<M0>() cil managed noinlining {
.override method instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<[1]>()
ldstr "MyStruct161::Method7.MI.1364<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot instance string ClassMethod328() cil managed noinlining {
ldstr "MyStruct161::ClassMethod328.1366()"
ret
}
.method public hidebysig newslot instance string ClassMethod329() cil managed noinlining {
ldstr "MyStruct161::ClassMethod329.1367()"
ret
}
.method public hidebysig virtual instance bool Equals(object obj) cil managed { ldc.i4.0 ret }
.method public hidebysig virtual instance int32 GetHashCode() cil managed { ldc.i4.0 ret }
.method public hidebysig virtual instance string ToString() cil managed { ldstr "" ret }
}
.class interface public abstract IBase2`2<+T0, -T1>
{
.method public hidebysig newslot abstract virtual instance string Method7<M0>() cil managed { }
}
.class public auto ansi beforefieldinit Generated111 {
.method static void M.BaseClass0<(BaseClass0)W>(!!W inst, string exp) cil managed {
.maxstack 5
.locals init (string[] actualResults)
ldc.i4.s 0
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.BaseClass0<(BaseClass0)W>(!!W inst, string exp)"
ldc.i4.s 0
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.BaseClass1<(BaseClass1)W>(!!W inst, string exp) cil managed {
.maxstack 5
.locals init (string[] actualResults)
ldc.i4.s 0
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.BaseClass1<(BaseClass1)W>(!!W inst, string exp)"
ldc.i4.s 0
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<!!T0,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass0,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass1,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.MyStruct161.T<T0,(valuetype MyStruct161`1<!!T0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 7
.locals init (string[] actualResults)
ldc.i4.s 2
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.MyStruct161.T<T0,(valuetype MyStruct161`1<!!T0>)W>(!!W 'inst', string exp)"
ldc.i4.s 2
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. valuetype MyStruct161`1<!!T0>
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. valuetype MyStruct161`1<!!T0>
callvirt instance string class IBase2`2<!!T0,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.MyStruct161.A<(valuetype MyStruct161`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 7
.locals init (string[] actualResults)
ldc.i4.s 2
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.MyStruct161.A<(valuetype MyStruct161`1<class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 2
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. valuetype MyStruct161`1<class BaseClass0>
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. valuetype MyStruct161`1<class BaseClass0>
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.MyStruct161.B<(valuetype MyStruct161`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 7
.locals init (string[] actualResults)
ldc.i4.s 2
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.MyStruct161.B<(valuetype MyStruct161`1<class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 2
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. valuetype MyStruct161`1<class BaseClass1>
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. valuetype MyStruct161`1<class BaseClass1>
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method public hidebysig static void MethodCallingTest() cil managed
{
.maxstack 10
.locals init (object V_0)
ldstr "========================== Method Calling Test =========================="
call void [mscorlib]System.Console::WriteLine(string)
.locals init (valuetype MyStruct161`1<class BaseClass0> V_1)
ldloca V_1
initobj valuetype MyStruct161`1<class BaseClass0>
ldloca V_1
dup
call instance string valuetype MyStruct161`1<class BaseClass0>::Method7<object>()
ldstr "MyStruct161::Method7.1363<System.Object>()"
ldstr "valuetype MyStruct161`1<class BaseClass0> on type MyStruct161"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct161`1<class BaseClass0>::ClassMethod328()
ldstr "MyStruct161::ClassMethod328.1366()"
ldstr "valuetype MyStruct161`1<class BaseClass0> on type MyStruct161"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct161`1<class BaseClass0>::ClassMethod329()
ldstr "MyStruct161::ClassMethod329.1367()"
ldstr "valuetype MyStruct161`1<class BaseClass0> on type MyStruct161"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup ldnull call instance bool valuetype MyStruct161`1<class BaseClass0>::Equals(object) pop
dup call instance int32 valuetype MyStruct161`1<class BaseClass0>::GetHashCode() pop
dup call instance string valuetype MyStruct161`1<class BaseClass0>::ToString() pop
pop
ldloc V_1
box valuetype MyStruct161`1<class BaseClass0>
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "MyStruct161::Method7.MI.1364<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct161`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_1
box valuetype MyStruct161`1<class BaseClass0>
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
ldstr "MyStruct161::Method7.1363<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct161`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
.locals init (valuetype MyStruct161`1<class BaseClass1> V_2)
ldloca V_2
initobj valuetype MyStruct161`1<class BaseClass1>
ldloca V_2
dup
call instance string valuetype MyStruct161`1<class BaseClass1>::Method7<object>()
ldstr "MyStruct161::Method7.1363<System.Object>()"
ldstr "valuetype MyStruct161`1<class BaseClass1> on type MyStruct161"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct161`1<class BaseClass1>::ClassMethod328()
ldstr "MyStruct161::ClassMethod328.1366()"
ldstr "valuetype MyStruct161`1<class BaseClass1> on type MyStruct161"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct161`1<class BaseClass1>::ClassMethod329()
ldstr "MyStruct161::ClassMethod329.1367()"
ldstr "valuetype MyStruct161`1<class BaseClass1> on type MyStruct161"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup ldnull call instance bool valuetype MyStruct161`1<class BaseClass1>::Equals(object) pop
dup call instance int32 valuetype MyStruct161`1<class BaseClass1>::GetHashCode() pop
dup call instance string valuetype MyStruct161`1<class BaseClass1>::ToString() pop
pop
ldloc V_2
box valuetype MyStruct161`1<class BaseClass1>
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "MyStruct161::Method7.MI.1364<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct161`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_2
box valuetype MyStruct161`1<class BaseClass1>
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
ldstr "MyStruct161::Method7.1363<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct161`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_2
box valuetype MyStruct161`1<class BaseClass1>
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
ldstr "MyStruct161::Method7.1363<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct161`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_2
box valuetype MyStruct161`1<class BaseClass1>
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "MyStruct161::Method7.1363<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct161`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static void ConstrainedCallsTest() cil managed
{
.maxstack 10
.locals init (object V_0)
ldstr "========================== Constrained Calls Test =========================="
call void [mscorlib]System.Console::WriteLine(string)
.locals init (valuetype MyStruct161`1<class BaseClass0> V_3)
ldloca V_3
initobj valuetype MyStruct161`1<class BaseClass0>
.try { ldloc V_3
ldstr "MyStruct161::Method7.MI.1364<System.Object>()#"
call void Generated111::M.IBase2.T.T<class BaseClass0,class BaseClass1,valuetype MyStruct161`1<class BaseClass0>>(!!2,string) leave.s LV0
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV0} LV0:
.try { ldloc V_3
ldstr "MyStruct161::Method7.MI.1364<System.Object>()#"
call void Generated111::M.IBase2.A.T<class BaseClass1,valuetype MyStruct161`1<class BaseClass0>>(!!1,string) leave.s LV1
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV1} LV1:
.try { ldloc V_3
ldstr "MyStruct161::Method7.MI.1364<System.Object>()#"
call void Generated111::M.IBase2.A.B<valuetype MyStruct161`1<class BaseClass0>>(!!0,string) leave.s LV2
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV2} LV2:
.try { ldloc V_3
ldstr "MyStruct161::Method7.1363<System.Object>()#"
call void Generated111::M.IBase2.T.T<class BaseClass0,class BaseClass0,valuetype MyStruct161`1<class BaseClass0>>(!!2,string) leave.s LV3
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV3} LV3:
.try { ldloc V_3
ldstr "MyStruct161::Method7.1363<System.Object>()#"
call void Generated111::M.IBase2.A.T<class BaseClass0,valuetype MyStruct161`1<class BaseClass0>>(!!1,string) leave.s LV4
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV4} LV4:
.try { ldloc V_3
ldstr "MyStruct161::Method7.1363<System.Object>()#"
call void Generated111::M.IBase2.A.A<valuetype MyStruct161`1<class BaseClass0>>(!!0,string) leave.s LV5
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV5} LV5:
.locals init (valuetype MyStruct161`1<class BaseClass1> V_4)
ldloca V_4
initobj valuetype MyStruct161`1<class BaseClass1>
.try { ldloc V_4
ldstr "MyStruct161::Method7.MI.1364<System.Object>()#"
call void Generated111::M.IBase2.T.T<class BaseClass0,class BaseClass1,valuetype MyStruct161`1<class BaseClass1>>(!!2,string) leave.s LV6
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV6} LV6:
.try { ldloc V_4
ldstr "MyStruct161::Method7.MI.1364<System.Object>()#"
call void Generated111::M.IBase2.A.T<class BaseClass1,valuetype MyStruct161`1<class BaseClass1>>(!!1,string) leave.s LV7
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV7} LV7:
.try { ldloc V_4
ldstr "MyStruct161::Method7.MI.1364<System.Object>()#"
call void Generated111::M.IBase2.A.B<valuetype MyStruct161`1<class BaseClass1>>(!!0,string) leave.s LV8
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV8} LV8:
.try { ldloc V_4
ldstr "MyStruct161::Method7.1363<System.Object>()#"
call void Generated111::M.IBase2.T.T<class BaseClass1,class BaseClass0,valuetype MyStruct161`1<class BaseClass1>>(!!2,string) leave.s LV9
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV9} LV9:
.try { ldloc V_4
ldstr "MyStruct161::Method7.1363<System.Object>()#"
call void Generated111::M.IBase2.B.T<class BaseClass0,valuetype MyStruct161`1<class BaseClass1>>(!!1,string) leave.s LV10
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV10} LV10:
.try { ldloc V_4
ldstr "MyStruct161::Method7.1363<System.Object>()#"
call void Generated111::M.IBase2.B.A<valuetype MyStruct161`1<class BaseClass1>>(!!0,string) leave.s LV11
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV11} LV11:
.try { ldloc V_4
ldstr "MyStruct161::Method7.1363<System.Object>()#"
call void Generated111::M.IBase2.T.T<class BaseClass0,class BaseClass0,valuetype MyStruct161`1<class BaseClass1>>(!!2,string) leave.s LV12
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV12} LV12:
.try { ldloc V_4
ldstr "MyStruct161::Method7.1363<System.Object>()#"
call void Generated111::M.IBase2.A.T<class BaseClass0,valuetype MyStruct161`1<class BaseClass1>>(!!1,string) leave.s LV13
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV13} LV13:
.try { ldloc V_4
ldstr "MyStruct161::Method7.1363<System.Object>()#"
call void Generated111::M.IBase2.A.A<valuetype MyStruct161`1<class BaseClass1>>(!!0,string) leave.s LV14
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV14} LV14:
.try { ldloc V_4
ldstr "MyStruct161::Method7.1363<System.Object>()#"
call void Generated111::M.IBase2.T.T<class BaseClass1,class BaseClass1,valuetype MyStruct161`1<class BaseClass1>>(!!2,string) leave.s LV15
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV15} LV15:
.try { ldloc V_4
ldstr "MyStruct161::Method7.1363<System.Object>()#"
call void Generated111::M.IBase2.B.T<class BaseClass1,valuetype MyStruct161`1<class BaseClass1>>(!!1,string) leave.s LV16
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV16} LV16:
.try { ldloc V_4
ldstr "MyStruct161::Method7.1363<System.Object>()#"
call void Generated111::M.IBase2.B.B<valuetype MyStruct161`1<class BaseClass1>>(!!0,string) leave.s LV17
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV17} LV17:
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static void StructConstrainedInterfaceCallsTest() cil managed
{
.maxstack 10
ldstr "===================== Struct Constrained Interface Calls Test ====================="
call void [mscorlib]System.Console::WriteLine(string)
.locals init (valuetype MyStruct161`1<class BaseClass0> V_5)
ldloca V_5
initobj valuetype MyStruct161`1<class BaseClass0>
.try { ldloc V_5
ldstr "MyStruct161::Method7.MI.1364<System.Object>()#" +
"MyStruct161::Method7.1363<System.Object>()#"
call void Generated111::M.MyStruct161.T<class BaseClass0,valuetype MyStruct161`1<class BaseClass0>>(!!1,string) leave.s LV0
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV0} LV0:
.try { ldloc V_5
ldstr "MyStruct161::Method7.MI.1364<System.Object>()#" +
"MyStruct161::Method7.1363<System.Object>()#"
call void Generated111::M.MyStruct161.A<valuetype MyStruct161`1<class BaseClass0>>(!!0,string) leave.s LV1
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV1} LV1:
.locals init (valuetype MyStruct161`1<class BaseClass1> V_6)
ldloca V_6
initobj valuetype MyStruct161`1<class BaseClass1>
.try { ldloc V_6
ldstr "MyStruct161::Method7.MI.1364<System.Object>()#" +
"MyStruct161::Method7.1363<System.Object>()#"
call void Generated111::M.MyStruct161.T<class BaseClass1,valuetype MyStruct161`1<class BaseClass1>>(!!1,string) leave.s LV2
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV2} LV2:
.try { ldloc V_6
ldstr "MyStruct161::Method7.MI.1364<System.Object>()#" +
"MyStruct161::Method7.1363<System.Object>()#"
call void Generated111::M.MyStruct161.B<valuetype MyStruct161`1<class BaseClass1>>(!!0,string) leave.s LV3
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV3} LV3:
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static void CalliTest() cil managed
{
.maxstack 10
.locals init (object V_0)
ldstr "========================== Method Calli Test =========================="
call void [mscorlib]System.Console::WriteLine(string)
.locals init (valuetype MyStruct161`1<class BaseClass0> V_7)
ldloca V_7
initobj valuetype MyStruct161`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct161`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct161`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct161`1<class BaseClass0>::Method7<object>()
calli default string(object)
ldstr "MyStruct161::Method7.1363<System.Object>()"
ldstr "valuetype MyStruct161`1<class BaseClass0> on type valuetype MyStruct161`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct161`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct161`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct161`1<class BaseClass0>::ClassMethod328()
calli default string(object)
ldstr "MyStruct161::ClassMethod328.1366()"
ldstr "valuetype MyStruct161`1<class BaseClass0> on type valuetype MyStruct161`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct161`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct161`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct161`1<class BaseClass0>::ClassMethod329()
calli default string(object)
ldstr "MyStruct161::ClassMethod329.1367()"
ldstr "valuetype MyStruct161`1<class BaseClass0> on type valuetype MyStruct161`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7 box valuetype MyStruct161`1<class BaseClass0> ldnull
ldloc V_7 box valuetype MyStruct161`1<class BaseClass0>
ldvirtftn instance bool valuetype MyStruct161`1<class BaseClass0>::Equals(object) calli default bool(object,object) pop
ldloc V_7 box valuetype MyStruct161`1<class BaseClass0>
ldloc V_7 box valuetype MyStruct161`1<class BaseClass0>
ldvirtftn instance int32 valuetype MyStruct161`1<class BaseClass0>::GetHashCode() calli default int32(object) pop
ldloc V_7 box valuetype MyStruct161`1<class BaseClass0>
ldloc V_7 box valuetype MyStruct161`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct161`1<class BaseClass0>::ToString() calli default string(object) pop
ldloc V_7
box valuetype MyStruct161`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct161`1<class BaseClass0>
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(object)
ldstr "MyStruct161::Method7.MI.1364<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct161`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct161`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct161`1<class BaseClass0>
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
calli default string(object)
ldstr "MyStruct161::Method7.1363<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct161`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
.locals init (valuetype MyStruct161`1<class BaseClass1> V_8)
ldloca V_8
initobj valuetype MyStruct161`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct161`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct161`1<class BaseClass1>
ldvirtftn instance string valuetype MyStruct161`1<class BaseClass1>::Method7<object>()
calli default string(object)
ldstr "MyStruct161::Method7.1363<System.Object>()"
ldstr "valuetype MyStruct161`1<class BaseClass1> on type valuetype MyStruct161`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct161`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct161`1<class BaseClass1>
ldvirtftn instance string valuetype MyStruct161`1<class BaseClass1>::ClassMethod328()
calli default string(object)
ldstr "MyStruct161::ClassMethod328.1366()"
ldstr "valuetype MyStruct161`1<class BaseClass1> on type valuetype MyStruct161`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct161`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct161`1<class BaseClass1>
ldvirtftn instance string valuetype MyStruct161`1<class BaseClass1>::ClassMethod329()
calli default string(object)
ldstr "MyStruct161::ClassMethod329.1367()"
ldstr "valuetype MyStruct161`1<class BaseClass1> on type valuetype MyStruct161`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8 box valuetype MyStruct161`1<class BaseClass1> ldnull
ldloc V_8 box valuetype MyStruct161`1<class BaseClass1>
ldvirtftn instance bool valuetype MyStruct161`1<class BaseClass1>::Equals(object) calli default bool(object,object) pop
ldloc V_8 box valuetype MyStruct161`1<class BaseClass1>
ldloc V_8 box valuetype MyStruct161`1<class BaseClass1>
ldvirtftn instance int32 valuetype MyStruct161`1<class BaseClass1>::GetHashCode() calli default int32(object) pop
ldloc V_8 box valuetype MyStruct161`1<class BaseClass1>
ldloc V_8 box valuetype MyStruct161`1<class BaseClass1>
ldvirtftn instance string valuetype MyStruct161`1<class BaseClass1>::ToString() calli default string(object) pop
ldloc V_8
box valuetype MyStruct161`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct161`1<class BaseClass1>
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(object)
ldstr "MyStruct161::Method7.MI.1364<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct161`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct161`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct161`1<class BaseClass1>
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
calli default string(object)
ldstr "MyStruct161::Method7.1363<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct161`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct161`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct161`1<class BaseClass1>
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
calli default string(object)
ldstr "MyStruct161::Method7.1363<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct161`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct161`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct161`1<class BaseClass1>
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(object)
ldstr "MyStruct161::Method7.1363<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct161`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static int32 Main() cil managed
{
.custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = (
01 00 00 00
)
.entrypoint
.maxstack 10
call void Generated111::MethodCallingTest()
call void Generated111::ConstrainedCallsTest()
call void Generated111::StructConstrainedInterfaceCallsTest()
call void Generated111::CalliTest()
ldc.i4 100
ret
}
}
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
.assembly extern mscorlib { .publickeytoken = (B7 7A 5C 56 19 34 E0 89 ) .ver 4:0:0:0 }
.assembly extern TestFramework { .publickeytoken = ( B0 3F 5F 7F 11 D5 0A 3A ) }
//TYPES IN FORWARDER ASSEMBLIES:
//TEST ASSEMBLY:
.assembly Generated111 { .hash algorithm 0x00008004 }
.assembly extern xunit.core {}
.class public BaseClass0
{
.method public hidebysig specialname rtspecialname instance void .ctor() cil managed {
ldarg.0
call instance void [mscorlib]System.Object::.ctor()
ret
}
}
.class public BaseClass1
extends BaseClass0
{
.method public hidebysig specialname rtspecialname instance void .ctor() cil managed {
ldarg.0
call instance void BaseClass0::.ctor()
ret
}
}
.class public sequential sealed MyStruct161`1<T0>
extends [mscorlib]System.ValueType
implements class IBase2`2<class BaseClass0,class BaseClass1>, class IBase2`2<!T0,class BaseClass0>
{
.pack 0
.size 1
.method public hidebysig newslot virtual instance string Method7<M0>() cil managed noinlining {
ldstr "MyStruct161::Method7.1363<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot virtual instance string 'IBase2<class BaseClass0,class BaseClass1>.Method7'<M0>() cil managed noinlining {
.override method instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<[1]>()
ldstr "MyStruct161::Method7.MI.1364<"
ldtoken !!M0
call class [mscorlib]System.Type [mscorlib]System.Type::GetTypeFromHandle(valuetype [mscorlib]System.RuntimeTypeHandle)
call string [mscorlib]System.String::Concat(object,object)
ldstr ">()"
call string [mscorlib]System.String::Concat(object,object)
ret
}
.method public hidebysig newslot instance string ClassMethod328() cil managed noinlining {
ldstr "MyStruct161::ClassMethod328.1366()"
ret
}
.method public hidebysig newslot instance string ClassMethod329() cil managed noinlining {
ldstr "MyStruct161::ClassMethod329.1367()"
ret
}
.method public hidebysig virtual instance bool Equals(object obj) cil managed { ldc.i4.0 ret }
.method public hidebysig virtual instance int32 GetHashCode() cil managed { ldc.i4.0 ret }
.method public hidebysig virtual instance string ToString() cil managed { ldstr "" ret }
}
.class interface public abstract IBase2`2<+T0, -T1>
{
.method public hidebysig newslot abstract virtual instance string Method7<M0>() cil managed { }
}
.class public auto ansi beforefieldinit Generated111 {
.method static void M.BaseClass0<(BaseClass0)W>(!!W inst, string exp) cil managed {
.maxstack 5
.locals init (string[] actualResults)
ldc.i4.s 0
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.BaseClass0<(BaseClass0)W>(!!W inst, string exp)"
ldc.i4.s 0
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.BaseClass1<(BaseClass1)W>(!!W inst, string exp) cil managed {
.maxstack 5
.locals init (string[] actualResults)
ldc.i4.s 0
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.BaseClass1<(BaseClass1)W>(!!W inst, string exp)"
ldc.i4.s 0
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.T.T<T0,T1,(class IBase2`2<!!T0,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<!!T0,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.A.T<T1,(class IBase2`2<class BaseClass0,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass0,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.A.A<(class IBase2`2<class BaseClass0,class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.A.B<(class IBase2`2<class BaseClass0,class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.B.T<T1,(class IBase2`2<class BaseClass1,!!T1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass1,!!T1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.B.A<(class IBase2`2<class BaseClass1,class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 6
.locals init (string[] actualResults)
ldc.i4.s 1
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.IBase2.B.B<(class IBase2`2<class BaseClass1,class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 1
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. !!W
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.MyStruct161.T<T0,(valuetype MyStruct161`1<!!T0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 7
.locals init (string[] actualResults)
ldc.i4.s 2
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.MyStruct161.T<T0,(valuetype MyStruct161`1<!!T0>)W>(!!W 'inst', string exp)"
ldc.i4.s 2
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. valuetype MyStruct161`1<!!T0>
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. valuetype MyStruct161`1<!!T0>
callvirt instance string class IBase2`2<!!T0,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.MyStruct161.A<(valuetype MyStruct161`1<class BaseClass0>)W>(!!W 'inst', string exp) cil managed {
.maxstack 7
.locals init (string[] actualResults)
ldc.i4.s 2
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.MyStruct161.A<(valuetype MyStruct161`1<class BaseClass0>)W>(!!W 'inst', string exp)"
ldc.i4.s 2
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. valuetype MyStruct161`1<class BaseClass0>
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. valuetype MyStruct161`1<class BaseClass0>
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method static void M.MyStruct161.B<(valuetype MyStruct161`1<class BaseClass1>)W>(!!W 'inst', string exp) cil managed {
.maxstack 7
.locals init (string[] actualResults)
ldc.i4.s 2
newarr string
stloc.s actualResults
ldarg.1
ldstr "M.MyStruct161.B<(valuetype MyStruct161`1<class BaseClass1>)W>(!!W 'inst', string exp)"
ldc.i4.s 2
ldloc.s actualResults
ldc.i4.s 0
ldarga.s 0
constrained. valuetype MyStruct161`1<class BaseClass1>
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
stelem.ref
ldloc.s actualResults
ldc.i4.s 1
ldarga.s 0
constrained. valuetype MyStruct161`1<class BaseClass1>
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
stelem.ref
ldloc.s actualResults
call void [TestFramework]TestFramework::MethodCallTest(string,string,int32,string[])
ret
}
.method public hidebysig static void MethodCallingTest() cil managed
{
.maxstack 10
.locals init (object V_0)
ldstr "========================== Method Calling Test =========================="
call void [mscorlib]System.Console::WriteLine(string)
.locals init (valuetype MyStruct161`1<class BaseClass0> V_1)
ldloca V_1
initobj valuetype MyStruct161`1<class BaseClass0>
ldloca V_1
dup
call instance string valuetype MyStruct161`1<class BaseClass0>::Method7<object>()
ldstr "MyStruct161::Method7.1363<System.Object>()"
ldstr "valuetype MyStruct161`1<class BaseClass0> on type MyStruct161"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct161`1<class BaseClass0>::ClassMethod328()
ldstr "MyStruct161::ClassMethod328.1366()"
ldstr "valuetype MyStruct161`1<class BaseClass0> on type MyStruct161"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct161`1<class BaseClass0>::ClassMethod329()
ldstr "MyStruct161::ClassMethod329.1367()"
ldstr "valuetype MyStruct161`1<class BaseClass0> on type MyStruct161"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup ldnull call instance bool valuetype MyStruct161`1<class BaseClass0>::Equals(object) pop
dup call instance int32 valuetype MyStruct161`1<class BaseClass0>::GetHashCode() pop
dup call instance string valuetype MyStruct161`1<class BaseClass0>::ToString() pop
pop
ldloc V_1
box valuetype MyStruct161`1<class BaseClass0>
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "MyStruct161::Method7.MI.1364<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct161`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_1
box valuetype MyStruct161`1<class BaseClass0>
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
ldstr "MyStruct161::Method7.1363<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct161`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
.locals init (valuetype MyStruct161`1<class BaseClass1> V_2)
ldloca V_2
initobj valuetype MyStruct161`1<class BaseClass1>
ldloca V_2
dup
call instance string valuetype MyStruct161`1<class BaseClass1>::Method7<object>()
ldstr "MyStruct161::Method7.1363<System.Object>()"
ldstr "valuetype MyStruct161`1<class BaseClass1> on type MyStruct161"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct161`1<class BaseClass1>::ClassMethod328()
ldstr "MyStruct161::ClassMethod328.1366()"
ldstr "valuetype MyStruct161`1<class BaseClass1> on type MyStruct161"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup
call instance string valuetype MyStruct161`1<class BaseClass1>::ClassMethod329()
ldstr "MyStruct161::ClassMethod329.1367()"
ldstr "valuetype MyStruct161`1<class BaseClass1> on type MyStruct161"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
dup ldnull call instance bool valuetype MyStruct161`1<class BaseClass1>::Equals(object) pop
dup call instance int32 valuetype MyStruct161`1<class BaseClass1>::GetHashCode() pop
dup call instance string valuetype MyStruct161`1<class BaseClass1>::ToString() pop
pop
ldloc V_2
box valuetype MyStruct161`1<class BaseClass1>
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
ldstr "MyStruct161::Method7.MI.1364<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct161`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_2
box valuetype MyStruct161`1<class BaseClass1>
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
ldstr "MyStruct161::Method7.1363<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct161`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_2
box valuetype MyStruct161`1<class BaseClass1>
dup
callvirt instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
ldstr "MyStruct161::Method7.1363<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct161`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldloc V_2
box valuetype MyStruct161`1<class BaseClass1>
dup
callvirt instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
ldstr "MyStruct161::Method7.1363<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct161`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
pop
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static void ConstrainedCallsTest() cil managed
{
.maxstack 10
.locals init (object V_0)
ldstr "========================== Constrained Calls Test =========================="
call void [mscorlib]System.Console::WriteLine(string)
.locals init (valuetype MyStruct161`1<class BaseClass0> V_3)
ldloca V_3
initobj valuetype MyStruct161`1<class BaseClass0>
.try { ldloc V_3
ldstr "MyStruct161::Method7.MI.1364<System.Object>()#"
call void Generated111::M.IBase2.T.T<class BaseClass0,class BaseClass1,valuetype MyStruct161`1<class BaseClass0>>(!!2,string) leave.s LV0
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV0} LV0:
.try { ldloc V_3
ldstr "MyStruct161::Method7.MI.1364<System.Object>()#"
call void Generated111::M.IBase2.A.T<class BaseClass1,valuetype MyStruct161`1<class BaseClass0>>(!!1,string) leave.s LV1
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV1} LV1:
.try { ldloc V_3
ldstr "MyStruct161::Method7.MI.1364<System.Object>()#"
call void Generated111::M.IBase2.A.B<valuetype MyStruct161`1<class BaseClass0>>(!!0,string) leave.s LV2
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV2} LV2:
.try { ldloc V_3
ldstr "MyStruct161::Method7.1363<System.Object>()#"
call void Generated111::M.IBase2.T.T<class BaseClass0,class BaseClass0,valuetype MyStruct161`1<class BaseClass0>>(!!2,string) leave.s LV3
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV3} LV3:
.try { ldloc V_3
ldstr "MyStruct161::Method7.1363<System.Object>()#"
call void Generated111::M.IBase2.A.T<class BaseClass0,valuetype MyStruct161`1<class BaseClass0>>(!!1,string) leave.s LV4
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV4} LV4:
.try { ldloc V_3
ldstr "MyStruct161::Method7.1363<System.Object>()#"
call void Generated111::M.IBase2.A.A<valuetype MyStruct161`1<class BaseClass0>>(!!0,string) leave.s LV5
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV5} LV5:
.locals init (valuetype MyStruct161`1<class BaseClass1> V_4)
ldloca V_4
initobj valuetype MyStruct161`1<class BaseClass1>
.try { ldloc V_4
ldstr "MyStruct161::Method7.MI.1364<System.Object>()#"
call void Generated111::M.IBase2.T.T<class BaseClass0,class BaseClass1,valuetype MyStruct161`1<class BaseClass1>>(!!2,string) leave.s LV6
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV6} LV6:
.try { ldloc V_4
ldstr "MyStruct161::Method7.MI.1364<System.Object>()#"
call void Generated111::M.IBase2.A.T<class BaseClass1,valuetype MyStruct161`1<class BaseClass1>>(!!1,string) leave.s LV7
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV7} LV7:
.try { ldloc V_4
ldstr "MyStruct161::Method7.MI.1364<System.Object>()#"
call void Generated111::M.IBase2.A.B<valuetype MyStruct161`1<class BaseClass1>>(!!0,string) leave.s LV8
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV8} LV8:
.try { ldloc V_4
ldstr "MyStruct161::Method7.1363<System.Object>()#"
call void Generated111::M.IBase2.T.T<class BaseClass1,class BaseClass0,valuetype MyStruct161`1<class BaseClass1>>(!!2,string) leave.s LV9
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV9} LV9:
.try { ldloc V_4
ldstr "MyStruct161::Method7.1363<System.Object>()#"
call void Generated111::M.IBase2.B.T<class BaseClass0,valuetype MyStruct161`1<class BaseClass1>>(!!1,string) leave.s LV10
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV10} LV10:
.try { ldloc V_4
ldstr "MyStruct161::Method7.1363<System.Object>()#"
call void Generated111::M.IBase2.B.A<valuetype MyStruct161`1<class BaseClass1>>(!!0,string) leave.s LV11
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV11} LV11:
.try { ldloc V_4
ldstr "MyStruct161::Method7.1363<System.Object>()#"
call void Generated111::M.IBase2.T.T<class BaseClass0,class BaseClass0,valuetype MyStruct161`1<class BaseClass1>>(!!2,string) leave.s LV12
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV12} LV12:
.try { ldloc V_4
ldstr "MyStruct161::Method7.1363<System.Object>()#"
call void Generated111::M.IBase2.A.T<class BaseClass0,valuetype MyStruct161`1<class BaseClass1>>(!!1,string) leave.s LV13
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV13} LV13:
.try { ldloc V_4
ldstr "MyStruct161::Method7.1363<System.Object>()#"
call void Generated111::M.IBase2.A.A<valuetype MyStruct161`1<class BaseClass1>>(!!0,string) leave.s LV14
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV14} LV14:
.try { ldloc V_4
ldstr "MyStruct161::Method7.1363<System.Object>()#"
call void Generated111::M.IBase2.T.T<class BaseClass1,class BaseClass1,valuetype MyStruct161`1<class BaseClass1>>(!!2,string) leave.s LV15
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV15} LV15:
.try { ldloc V_4
ldstr "MyStruct161::Method7.1363<System.Object>()#"
call void Generated111::M.IBase2.B.T<class BaseClass1,valuetype MyStruct161`1<class BaseClass1>>(!!1,string) leave.s LV16
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV16} LV16:
.try { ldloc V_4
ldstr "MyStruct161::Method7.1363<System.Object>()#"
call void Generated111::M.IBase2.B.B<valuetype MyStruct161`1<class BaseClass1>>(!!0,string) leave.s LV17
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV17} LV17:
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static void StructConstrainedInterfaceCallsTest() cil managed
{
.maxstack 10
ldstr "===================== Struct Constrained Interface Calls Test ====================="
call void [mscorlib]System.Console::WriteLine(string)
.locals init (valuetype MyStruct161`1<class BaseClass0> V_5)
ldloca V_5
initobj valuetype MyStruct161`1<class BaseClass0>
.try { ldloc V_5
ldstr "MyStruct161::Method7.MI.1364<System.Object>()#" +
"MyStruct161::Method7.1363<System.Object>()#"
call void Generated111::M.MyStruct161.T<class BaseClass0,valuetype MyStruct161`1<class BaseClass0>>(!!1,string) leave.s LV0
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV0} LV0:
.try { ldloc V_5
ldstr "MyStruct161::Method7.MI.1364<System.Object>()#" +
"MyStruct161::Method7.1363<System.Object>()#"
call void Generated111::M.MyStruct161.A<valuetype MyStruct161`1<class BaseClass0>>(!!0,string) leave.s LV1
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV1} LV1:
.locals init (valuetype MyStruct161`1<class BaseClass1> V_6)
ldloca V_6
initobj valuetype MyStruct161`1<class BaseClass1>
.try { ldloc V_6
ldstr "MyStruct161::Method7.MI.1364<System.Object>()#" +
"MyStruct161::Method7.1363<System.Object>()#"
call void Generated111::M.MyStruct161.T<class BaseClass1,valuetype MyStruct161`1<class BaseClass1>>(!!1,string) leave.s LV2
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV2} LV2:
.try { ldloc V_6
ldstr "MyStruct161::Method7.MI.1364<System.Object>()#" +
"MyStruct161::Method7.1363<System.Object>()#"
call void Generated111::M.MyStruct161.B<valuetype MyStruct161`1<class BaseClass1>>(!!0,string) leave.s LV3
} catch [mscorlib]System.Security.VerificationException { pop leave.s LV3} LV3:
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static void CalliTest() cil managed
{
.maxstack 10
.locals init (object V_0)
ldstr "========================== Method Calli Test =========================="
call void [mscorlib]System.Console::WriteLine(string)
.locals init (valuetype MyStruct161`1<class BaseClass0> V_7)
ldloca V_7
initobj valuetype MyStruct161`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct161`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct161`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct161`1<class BaseClass0>::Method7<object>()
calli default string(object)
ldstr "MyStruct161::Method7.1363<System.Object>()"
ldstr "valuetype MyStruct161`1<class BaseClass0> on type valuetype MyStruct161`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct161`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct161`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct161`1<class BaseClass0>::ClassMethod328()
calli default string(object)
ldstr "MyStruct161::ClassMethod328.1366()"
ldstr "valuetype MyStruct161`1<class BaseClass0> on type valuetype MyStruct161`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct161`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct161`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct161`1<class BaseClass0>::ClassMethod329()
calli default string(object)
ldstr "MyStruct161::ClassMethod329.1367()"
ldstr "valuetype MyStruct161`1<class BaseClass0> on type valuetype MyStruct161`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7 box valuetype MyStruct161`1<class BaseClass0> ldnull
ldloc V_7 box valuetype MyStruct161`1<class BaseClass0>
ldvirtftn instance bool valuetype MyStruct161`1<class BaseClass0>::Equals(object) calli default bool(object,object) pop
ldloc V_7 box valuetype MyStruct161`1<class BaseClass0>
ldloc V_7 box valuetype MyStruct161`1<class BaseClass0>
ldvirtftn instance int32 valuetype MyStruct161`1<class BaseClass0>::GetHashCode() calli default int32(object) pop
ldloc V_7 box valuetype MyStruct161`1<class BaseClass0>
ldloc V_7 box valuetype MyStruct161`1<class BaseClass0>
ldvirtftn instance string valuetype MyStruct161`1<class BaseClass0>::ToString() calli default string(object) pop
ldloc V_7
box valuetype MyStruct161`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct161`1<class BaseClass0>
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(object)
ldstr "MyStruct161::Method7.MI.1364<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct161`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_7
box valuetype MyStruct161`1<class BaseClass0>
ldloc V_7
box valuetype MyStruct161`1<class BaseClass0>
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
calli default string(object)
ldstr "MyStruct161::Method7.1363<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct161`1<class BaseClass0>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
.locals init (valuetype MyStruct161`1<class BaseClass1> V_8)
ldloca V_8
initobj valuetype MyStruct161`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct161`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct161`1<class BaseClass1>
ldvirtftn instance string valuetype MyStruct161`1<class BaseClass1>::Method7<object>()
calli default string(object)
ldstr "MyStruct161::Method7.1363<System.Object>()"
ldstr "valuetype MyStruct161`1<class BaseClass1> on type valuetype MyStruct161`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct161`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct161`1<class BaseClass1>
ldvirtftn instance string valuetype MyStruct161`1<class BaseClass1>::ClassMethod328()
calli default string(object)
ldstr "MyStruct161::ClassMethod328.1366()"
ldstr "valuetype MyStruct161`1<class BaseClass1> on type valuetype MyStruct161`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct161`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct161`1<class BaseClass1>
ldvirtftn instance string valuetype MyStruct161`1<class BaseClass1>::ClassMethod329()
calli default string(object)
ldstr "MyStruct161::ClassMethod329.1367()"
ldstr "valuetype MyStruct161`1<class BaseClass1> on type valuetype MyStruct161`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8 box valuetype MyStruct161`1<class BaseClass1> ldnull
ldloc V_8 box valuetype MyStruct161`1<class BaseClass1>
ldvirtftn instance bool valuetype MyStruct161`1<class BaseClass1>::Equals(object) calli default bool(object,object) pop
ldloc V_8 box valuetype MyStruct161`1<class BaseClass1>
ldloc V_8 box valuetype MyStruct161`1<class BaseClass1>
ldvirtftn instance int32 valuetype MyStruct161`1<class BaseClass1>::GetHashCode() calli default int32(object) pop
ldloc V_8 box valuetype MyStruct161`1<class BaseClass1>
ldloc V_8 box valuetype MyStruct161`1<class BaseClass1>
ldvirtftn instance string valuetype MyStruct161`1<class BaseClass1>::ToString() calli default string(object) pop
ldloc V_8
box valuetype MyStruct161`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct161`1<class BaseClass1>
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass1>::Method7<object>()
calli default string(object)
ldstr "MyStruct161::Method7.MI.1364<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass1> on type valuetype MyStruct161`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct161`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct161`1<class BaseClass1>
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass0>::Method7<object>()
calli default string(object)
ldstr "MyStruct161::Method7.1363<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass0> on type valuetype MyStruct161`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct161`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct161`1<class BaseClass1>
ldvirtftn instance string class IBase2`2<class BaseClass0,class BaseClass0>::Method7<object>()
calli default string(object)
ldstr "MyStruct161::Method7.1363<System.Object>()"
ldstr "class IBase2`2<class BaseClass0,class BaseClass0> on type valuetype MyStruct161`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldloc V_8
box valuetype MyStruct161`1<class BaseClass1>
ldloc V_8
box valuetype MyStruct161`1<class BaseClass1>
ldvirtftn instance string class IBase2`2<class BaseClass1,class BaseClass1>::Method7<object>()
calli default string(object)
ldstr "MyStruct161::Method7.1363<System.Object>()"
ldstr "class IBase2`2<class BaseClass1,class BaseClass1> on type valuetype MyStruct161`1<class BaseClass1>"
call void [TestFramework]TestFramework::MethodCallTest(string,string,string)
ldstr "========================================================================\n\n"
call void [mscorlib]System.Console::WriteLine(string)
ret
}
.method public hidebysig static int32 Main() cil managed
{
.custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = (
01 00 00 00
)
.entrypoint
.maxstack 10
call void Generated111::MethodCallingTest()
call void Generated111::ConstrainedCallsTest()
call void Generated111::StructConstrainedInterfaceCallsTest()
call void Generated111::CalliTest()
ldc.i4 100
ret
}
}
| -1 |
dotnet/runtime
| 66,411 |
Arm64: Always use SIMD features
|
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
kunalspathak
| 2022-03-09T21:00:00Z | 2022-03-12T04:35:06Z |
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
|
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
|
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
./src/libraries/Common/src/System/Security/Cryptography/ECDiffieHellmanSecurityTransforms.cs
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Diagnostics;
using System.Security.Cryptography.Apple;
namespace System.Security.Cryptography
{
internal static partial class ECDiffieHellmanImplementation
{
public sealed partial class ECDiffieHellmanSecurityTransforms : ECDiffieHellman
{
private readonly EccSecurityTransforms _ecc = new EccSecurityTransforms(nameof(ECDiffieHellman));
public ECDiffieHellmanSecurityTransforms()
{
base.KeySize = 521;
}
internal ECDiffieHellmanSecurityTransforms(SafeSecKeyRefHandle publicKey)
{
KeySizeValue = _ecc.SetKeyAndGetSize(SecKeyPair.PublicOnly(publicKey));
}
internal ECDiffieHellmanSecurityTransforms(SafeSecKeyRefHandle publicKey, SafeSecKeyRefHandle privateKey)
{
KeySizeValue = _ecc.SetKeyAndGetSize(SecKeyPair.PublicPrivatePair(publicKey, privateKey));
}
public override KeySizes[] LegalKeySizes
{
get
{
// Return the three sizes that can be explicitly set (for backwards compatibility)
return new[]
{
new KeySizes(minSize: 256, maxSize: 384, skipSize: 128),
new KeySizes(minSize: 521, maxSize: 521, skipSize: 0),
};
}
}
public override int KeySize
{
get { return base.KeySize; }
set
{
if (KeySize == value)
return;
// Set the KeySize before freeing the key so that an invalid value doesn't throw away the key
base.KeySize = value;
_ecc.DisposeKey();
}
}
private void ThrowIfDisposed()
{
_ecc.ThrowIfDisposed();
}
protected override void Dispose(bool disposing)
{
if (disposing)
{
_ecc.Dispose();
}
base.Dispose(disposing);
}
public override ECParameters ExportExplicitParameters(bool includePrivateParameters)
{
throw new PlatformNotSupportedException(SR.Cryptography_ECC_NamedCurvesOnly);
}
public override ECParameters ExportParameters(bool includePrivateParameters)
{
return _ecc.ExportParameters(includePrivateParameters, KeySize);
}
internal bool TryExportDataKeyParameters(bool includePrivateParameters, ref ECParameters ecParameters)
{
return _ecc.TryExportDataKeyParameters(includePrivateParameters, KeySize, ref ecParameters);
}
public override void ImportParameters(ECParameters parameters)
{
KeySizeValue = _ecc.ImportParameters(parameters);
}
public override void ImportEncryptedPkcs8PrivateKey(
ReadOnlySpan<byte> passwordBytes,
ReadOnlySpan<byte> source,
out int bytesRead)
{
ThrowIfDisposed();
base.ImportEncryptedPkcs8PrivateKey(passwordBytes, source, out bytesRead);
}
public override void ImportEncryptedPkcs8PrivateKey(
ReadOnlySpan<char> password,
ReadOnlySpan<byte> source,
out int bytesRead)
{
ThrowIfDisposed();
base.ImportEncryptedPkcs8PrivateKey(password, source, out bytesRead);
}
public override void GenerateKey(ECCurve curve)
{
KeySizeValue = _ecc.GenerateKey(curve);
}
internal SecKeyPair GetKeys()
{
return _ecc.GetOrGenerateKeys(KeySize);
}
public override byte[] DeriveKeyMaterial(ECDiffieHellmanPublicKey otherPartyPublicKey) =>
DeriveKeyFromHash(otherPartyPublicKey, HashAlgorithmName.SHA256, null, null);
public override byte[] DeriveKeyFromHash(
ECDiffieHellmanPublicKey otherPartyPublicKey!!,
HashAlgorithmName hashAlgorithm,
byte[]? secretPrepend,
byte[]? secretAppend)
{
ArgumentException.ThrowIfNullOrEmpty(hashAlgorithm.Name, nameof(hashAlgorithm));
ThrowIfDisposed();
return ECDiffieHellmanDerivation.DeriveKeyFromHash(
otherPartyPublicKey,
hashAlgorithm,
secretPrepend,
secretAppend,
(pubKey, hasher) => DeriveSecretAgreement(pubKey, hasher));
}
public override byte[] DeriveKeyFromHmac(
ECDiffieHellmanPublicKey otherPartyPublicKey!!,
HashAlgorithmName hashAlgorithm,
byte[]? hmacKey,
byte[]? secretPrepend,
byte[]? secretAppend)
{
ArgumentException.ThrowIfNullOrEmpty(hashAlgorithm.Name, nameof(hashAlgorithm));
ThrowIfDisposed();
return ECDiffieHellmanDerivation.DeriveKeyFromHmac(
otherPartyPublicKey,
hashAlgorithm,
hmacKey,
secretPrepend,
secretAppend,
(pubKey, hasher) => DeriveSecretAgreement(pubKey, hasher));
}
public override byte[] DeriveKeyTls(ECDiffieHellmanPublicKey otherPartyPublicKey!!, byte[] prfLabel!!, byte[] prfSeed!!)
{
ThrowIfDisposed();
return ECDiffieHellmanDerivation.DeriveKeyTls(
otherPartyPublicKey,
prfLabel,
prfSeed,
(pubKey, hasher) => DeriveSecretAgreement(pubKey, hasher));
}
private byte[]? DeriveSecretAgreement(ECDiffieHellmanPublicKey otherPartyPublicKey, IncrementalHash? hasher)
{
if (!(otherPartyPublicKey is ECDiffieHellmanSecurityTransformsPublicKey secTransPubKey))
{
secTransPubKey =
new ECDiffieHellmanSecurityTransformsPublicKey(otherPartyPublicKey.ExportParameters());
}
try
{
SafeSecKeyRefHandle otherPublic = secTransPubKey.KeyHandle;
if (Interop.AppleCrypto.EccGetKeySizeInBits(otherPublic) != KeySize)
{
throw new ArgumentException(
SR.Cryptography_ArgECDHKeySizeMismatch,
nameof(otherPartyPublicKey));
}
SafeSecKeyRefHandle? thisPrivate = GetKeys().PrivateKey;
if (thisPrivate == null)
{
throw new CryptographicException(SR.Cryptography_CSP_NoPrivateKey);
}
// Since Apple only supports secp256r1, secp384r1, and secp521r1; and 521 fits in
// 66 bytes ((521 + 7) / 8), the Span path will always succeed.
Span<byte> secretSpan = stackalloc byte[66];
byte[]? secret = Interop.AppleCrypto.EcdhKeyAgree(
thisPrivate,
otherPublic,
secretSpan,
out int bytesWritten);
// Either we wrote to the span or we returned an array, but not both, and not neither.
// ("neither" would have thrown)
Debug.Assert(
(bytesWritten == 0) != (secret == null),
$"bytesWritten={bytesWritten}, (secret==null)={secret == null}");
if (hasher == null)
{
return secret ?? secretSpan.Slice(0, bytesWritten).ToArray();
}
if (secret == null)
{
hasher.AppendData(secretSpan.Slice(0, bytesWritten));
}
else
{
hasher.AppendData(secret);
Array.Clear(secret);
}
return null;
}
finally
{
if (!ReferenceEquals(otherPartyPublicKey, secTransPubKey))
{
secTransPubKey.Dispose();
}
}
}
public override ECDiffieHellmanPublicKey PublicKey =>
new ECDiffieHellmanSecurityTransformsPublicKey(ExportParameters(false));
private sealed class ECDiffieHellmanSecurityTransformsPublicKey : ECDiffieHellmanPublicKey
{
private EccSecurityTransforms _ecc;
public ECDiffieHellmanSecurityTransformsPublicKey(ECParameters ecParameters)
{
Debug.Assert(ecParameters.D == null);
_ecc = new EccSecurityTransforms(nameof(ECDiffieHellmanPublicKey));
_ecc.ImportParameters(ecParameters);
}
public override string ToXmlString()
{
throw new PlatformNotSupportedException();
}
/// <summary>
/// There is no key blob format for OpenSSL ECDH like there is for Cng ECDH. Instead of allowing
/// this to return a potentially confusing empty byte array, we opt to throw instead.
/// </summary>
public override byte[] ToByteArray()
{
throw new PlatformNotSupportedException();
}
protected override void Dispose(bool disposing)
{
if (disposing)
{
_ecc.Dispose();
}
base.Dispose(disposing);
}
public override ECParameters ExportExplicitParameters() =>
throw new PlatformNotSupportedException(SR.Cryptography_ECC_NamedCurvesOnly);
public override ECParameters ExportParameters() =>
_ecc.ExportParameters(includePrivateParameters: false, keySizeInBits: -1);
internal SafeSecKeyRefHandle KeyHandle =>
_ecc.GetOrGenerateKeys(-1).PublicKey;
}
}
}
}
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Diagnostics;
using System.Security.Cryptography.Apple;
namespace System.Security.Cryptography
{
internal static partial class ECDiffieHellmanImplementation
{
public sealed partial class ECDiffieHellmanSecurityTransforms : ECDiffieHellman
{
private readonly EccSecurityTransforms _ecc = new EccSecurityTransforms(nameof(ECDiffieHellman));
public ECDiffieHellmanSecurityTransforms()
{
base.KeySize = 521;
}
internal ECDiffieHellmanSecurityTransforms(SafeSecKeyRefHandle publicKey)
{
KeySizeValue = _ecc.SetKeyAndGetSize(SecKeyPair.PublicOnly(publicKey));
}
internal ECDiffieHellmanSecurityTransforms(SafeSecKeyRefHandle publicKey, SafeSecKeyRefHandle privateKey)
{
KeySizeValue = _ecc.SetKeyAndGetSize(SecKeyPair.PublicPrivatePair(publicKey, privateKey));
}
public override KeySizes[] LegalKeySizes
{
get
{
// Return the three sizes that can be explicitly set (for backwards compatibility)
return new[]
{
new KeySizes(minSize: 256, maxSize: 384, skipSize: 128),
new KeySizes(minSize: 521, maxSize: 521, skipSize: 0),
};
}
}
public override int KeySize
{
get { return base.KeySize; }
set
{
if (KeySize == value)
return;
// Set the KeySize before freeing the key so that an invalid value doesn't throw away the key
base.KeySize = value;
_ecc.DisposeKey();
}
}
private void ThrowIfDisposed()
{
_ecc.ThrowIfDisposed();
}
protected override void Dispose(bool disposing)
{
if (disposing)
{
_ecc.Dispose();
}
base.Dispose(disposing);
}
public override ECParameters ExportExplicitParameters(bool includePrivateParameters)
{
throw new PlatformNotSupportedException(SR.Cryptography_ECC_NamedCurvesOnly);
}
public override ECParameters ExportParameters(bool includePrivateParameters)
{
return _ecc.ExportParameters(includePrivateParameters, KeySize);
}
internal bool TryExportDataKeyParameters(bool includePrivateParameters, ref ECParameters ecParameters)
{
return _ecc.TryExportDataKeyParameters(includePrivateParameters, KeySize, ref ecParameters);
}
public override void ImportParameters(ECParameters parameters)
{
KeySizeValue = _ecc.ImportParameters(parameters);
}
public override void ImportEncryptedPkcs8PrivateKey(
ReadOnlySpan<byte> passwordBytes,
ReadOnlySpan<byte> source,
out int bytesRead)
{
ThrowIfDisposed();
base.ImportEncryptedPkcs8PrivateKey(passwordBytes, source, out bytesRead);
}
public override void ImportEncryptedPkcs8PrivateKey(
ReadOnlySpan<char> password,
ReadOnlySpan<byte> source,
out int bytesRead)
{
ThrowIfDisposed();
base.ImportEncryptedPkcs8PrivateKey(password, source, out bytesRead);
}
public override void GenerateKey(ECCurve curve)
{
KeySizeValue = _ecc.GenerateKey(curve);
}
internal SecKeyPair GetKeys()
{
return _ecc.GetOrGenerateKeys(KeySize);
}
public override byte[] DeriveKeyMaterial(ECDiffieHellmanPublicKey otherPartyPublicKey) =>
DeriveKeyFromHash(otherPartyPublicKey, HashAlgorithmName.SHA256, null, null);
public override byte[] DeriveKeyFromHash(
ECDiffieHellmanPublicKey otherPartyPublicKey!!,
HashAlgorithmName hashAlgorithm,
byte[]? secretPrepend,
byte[]? secretAppend)
{
ArgumentException.ThrowIfNullOrEmpty(hashAlgorithm.Name, nameof(hashAlgorithm));
ThrowIfDisposed();
return ECDiffieHellmanDerivation.DeriveKeyFromHash(
otherPartyPublicKey,
hashAlgorithm,
secretPrepend,
secretAppend,
(pubKey, hasher) => DeriveSecretAgreement(pubKey, hasher));
}
public override byte[] DeriveKeyFromHmac(
ECDiffieHellmanPublicKey otherPartyPublicKey!!,
HashAlgorithmName hashAlgorithm,
byte[]? hmacKey,
byte[]? secretPrepend,
byte[]? secretAppend)
{
ArgumentException.ThrowIfNullOrEmpty(hashAlgorithm.Name, nameof(hashAlgorithm));
ThrowIfDisposed();
return ECDiffieHellmanDerivation.DeriveKeyFromHmac(
otherPartyPublicKey,
hashAlgorithm,
hmacKey,
secretPrepend,
secretAppend,
(pubKey, hasher) => DeriveSecretAgreement(pubKey, hasher));
}
public override byte[] DeriveKeyTls(ECDiffieHellmanPublicKey otherPartyPublicKey!!, byte[] prfLabel!!, byte[] prfSeed!!)
{
ThrowIfDisposed();
return ECDiffieHellmanDerivation.DeriveKeyTls(
otherPartyPublicKey,
prfLabel,
prfSeed,
(pubKey, hasher) => DeriveSecretAgreement(pubKey, hasher));
}
private byte[]? DeriveSecretAgreement(ECDiffieHellmanPublicKey otherPartyPublicKey, IncrementalHash? hasher)
{
if (!(otherPartyPublicKey is ECDiffieHellmanSecurityTransformsPublicKey secTransPubKey))
{
secTransPubKey =
new ECDiffieHellmanSecurityTransformsPublicKey(otherPartyPublicKey.ExportParameters());
}
try
{
SafeSecKeyRefHandle otherPublic = secTransPubKey.KeyHandle;
if (Interop.AppleCrypto.EccGetKeySizeInBits(otherPublic) != KeySize)
{
throw new ArgumentException(
SR.Cryptography_ArgECDHKeySizeMismatch,
nameof(otherPartyPublicKey));
}
SafeSecKeyRefHandle? thisPrivate = GetKeys().PrivateKey;
if (thisPrivate == null)
{
throw new CryptographicException(SR.Cryptography_CSP_NoPrivateKey);
}
// Since Apple only supports secp256r1, secp384r1, and secp521r1; and 521 fits in
// 66 bytes ((521 + 7) / 8), the Span path will always succeed.
Span<byte> secretSpan = stackalloc byte[66];
byte[]? secret = Interop.AppleCrypto.EcdhKeyAgree(
thisPrivate,
otherPublic,
secretSpan,
out int bytesWritten);
// Either we wrote to the span or we returned an array, but not both, and not neither.
// ("neither" would have thrown)
Debug.Assert(
(bytesWritten == 0) != (secret == null),
$"bytesWritten={bytesWritten}, (secret==null)={secret == null}");
if (hasher == null)
{
return secret ?? secretSpan.Slice(0, bytesWritten).ToArray();
}
if (secret == null)
{
hasher.AppendData(secretSpan.Slice(0, bytesWritten));
}
else
{
hasher.AppendData(secret);
Array.Clear(secret);
}
return null;
}
finally
{
if (!ReferenceEquals(otherPartyPublicKey, secTransPubKey))
{
secTransPubKey.Dispose();
}
}
}
public override ECDiffieHellmanPublicKey PublicKey =>
new ECDiffieHellmanSecurityTransformsPublicKey(ExportParameters(false));
private sealed class ECDiffieHellmanSecurityTransformsPublicKey : ECDiffieHellmanPublicKey
{
private EccSecurityTransforms _ecc;
public ECDiffieHellmanSecurityTransformsPublicKey(ECParameters ecParameters)
{
Debug.Assert(ecParameters.D == null);
_ecc = new EccSecurityTransforms(nameof(ECDiffieHellmanPublicKey));
_ecc.ImportParameters(ecParameters);
}
public override string ToXmlString()
{
throw new PlatformNotSupportedException();
}
/// <summary>
/// There is no key blob format for OpenSSL ECDH like there is for Cng ECDH. Instead of allowing
/// this to return a potentially confusing empty byte array, we opt to throw instead.
/// </summary>
public override byte[] ToByteArray()
{
throw new PlatformNotSupportedException();
}
protected override void Dispose(bool disposing)
{
if (disposing)
{
_ecc.Dispose();
}
base.Dispose(disposing);
}
public override ECParameters ExportExplicitParameters() =>
throw new PlatformNotSupportedException(SR.Cryptography_ECC_NamedCurvesOnly);
public override ECParameters ExportParameters() =>
_ecc.ExportParameters(includePrivateParameters: false, keySizeInBits: -1);
internal SafeSecKeyRefHandle KeyHandle =>
_ecc.GetOrGenerateKeys(-1).PublicKey;
}
}
}
}
| -1 |
dotnet/runtime
| 66,411 |
Arm64: Always use SIMD features
|
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
kunalspathak
| 2022-03-09T21:00:00Z | 2022-03-12T04:35:06Z |
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
|
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
|
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
./src/native/libs/Common/pal_error_common.h
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#pragma once
#include "pal_compiler.h"
#include "pal_types.h"
#include "pal_config.h"
#include "pal_utilities.h"
#include <string.h>
#include <assert.h>
#include <errno.h>
#include <netdb.h>
/**
* Error codes returned via ConvertErrno.
*
* Only the names (without the PAL_ prefix) are specified by POSIX.
*
* The values chosen below are simply assigned arbitrarily (originally
* in alphabetical order they appear in the spec, but they can't change so
* add new values to the end!).
*
* Also, the values chosen are deliberately outside the range of
* typical UNIX errnos (small numbers), HRESULTs (negative for errors)
* and Win32 errors (0x0000 - 0xFFFF). This isn't required for
* correctness, but may help debug a caller that is interpreting a raw
* int incorrectly.
*
* Wherever the spec says "x may be the same value as y", we do use
* the same value so that callers cannot not take a dependency on
* being able to distinguish between them.
*/
typedef enum
{
Error_SUCCESS = 0,
Error_E2BIG = 0x10001, // Argument list too long.
Error_EACCES = 0x10002, // Permission denied.
Error_EADDRINUSE = 0x10003, // Address in use.
Error_EADDRNOTAVAIL = 0x10004, // Address not available.
Error_EAFNOSUPPORT = 0x10005, // Address family not supported.
Error_EAGAIN = 0x10006, // Resource unavailable, try again (same value as EWOULDBLOCK),
Error_EALREADY = 0x10007, // Connection already in progress.
Error_EBADF = 0x10008, // Bad file descriptor.
Error_EBADMSG = 0x10009, // Bad message.
Error_EBUSY = 0x1000A, // Device or resource busy.
Error_ECANCELED = 0x1000B, // Operation canceled.
Error_ECHILD = 0x1000C, // No child processes.
Error_ECONNABORTED = 0x1000D, // Connection aborted.
Error_ECONNREFUSED = 0x1000E, // Connection refused.
Error_ECONNRESET = 0x1000F, // Connection reset.
Error_EDEADLK = 0x10010, // Resource deadlock would occur.
Error_EDESTADDRREQ = 0x10011, // Destination address required.
Error_EDOM = 0x10012, // Mathematics argument out of domain of function.
Error_EDQUOT = 0x10013, // Reserved.
Error_EEXIST = 0x10014, // File exists.
Error_EFAULT = 0x10015, // Bad address.
Error_EFBIG = 0x10016, // File too large.
Error_EHOSTUNREACH = 0x10017, // Host is unreachable.
Error_EIDRM = 0x10018, // Identifier removed.
Error_EILSEQ = 0x10019, // Illegal byte sequence.
Error_EINPROGRESS = 0x1001A, // Operation in progress.
Error_EINTR = 0x1001B, // Interrupted function.
Error_EINVAL = 0x1001C, // Invalid argument.
Error_EIO = 0x1001D, // I/O error.
Error_EISCONN = 0x1001E, // Socket is connected.
Error_EISDIR = 0x1001F, // Is a directory.
Error_ELOOP = 0x10020, // Too many levels of symbolic links.
Error_EMFILE = 0x10021, // File descriptor value too large.
Error_EMLINK = 0x10022, // Too many links.
Error_EMSGSIZE = 0x10023, // Message too large.
Error_EMULTIHOP = 0x10024, // Reserved.
Error_ENAMETOOLONG = 0x10025, // Filename too long.
Error_ENETDOWN = 0x10026, // Network is down.
Error_ENETRESET = 0x10027, // Connection aborted by network.
Error_ENETUNREACH = 0x10028, // Network unreachable.
Error_ENFILE = 0x10029, // Too many files open in system.
Error_ENOBUFS = 0x1002A, // No buffer space available.
Error_ENODEV = 0x1002C, // No such device.
Error_ENOENT = 0x1002D, // No such file or directory.
Error_ENOEXEC = 0x1002E, // Executable file format error.
Error_ENOLCK = 0x1002F, // No locks available.
Error_ENOLINK = 0x10030, // Reserved.
Error_ENOMEM = 0x10031, // Not enough space.
Error_ENOMSG = 0x10032, // No message of the desired type.
Error_ENOPROTOOPT = 0x10033, // Protocol not available.
Error_ENOSPC = 0x10034, // No space left on device.
Error_ENOSYS = 0x10037, // Function not supported.
Error_ENOTCONN = 0x10038, // The socket is not connected.
Error_ENOTDIR = 0x10039, // Not a directory or a symbolic link to a directory.
Error_ENOTEMPTY = 0x1003A, // Directory not empty.
Error_ENOTRECOVERABLE = 0x1003B, // State not recoverable.
Error_ENOTSOCK = 0x1003C, // Not a socket.
Error_ENOTSUP = 0x1003D, // Not supported (same value as EOPNOTSUP).
Error_ENOTTY = 0x1003E, // Inappropriate I/O control operation.
Error_ENXIO = 0x1003F, // No such device or address.
Error_EOVERFLOW = 0x10040, // Value too large to be stored in data type.
Error_EOWNERDEAD = 0x10041, // Previous owner died.
Error_EPERM = 0x10042, // Operation not permitted.
Error_EPIPE = 0x10043, // Broken pipe.
Error_EPROTO = 0x10044, // Protocol error.
Error_EPROTONOSUPPORT = 0x10045, // Protocol not supported.
Error_EPROTOTYPE = 0x10046, // Protocol wrong type for socket.
Error_ERANGE = 0x10047, // Result too large.
Error_EROFS = 0x10048, // Read-only file system.
Error_ESPIPE = 0x10049, // Invalid seek.
Error_ESRCH = 0x1004A, // No such process.
Error_ESTALE = 0x1004B, // Reserved.
Error_ETIMEDOUT = 0x1004D, // Connection timed out.
Error_ETXTBSY = 0x1004E, // Text file busy.
Error_EXDEV = 0x1004F, // Cross-device link.
Error_ESOCKTNOSUPPORT = 0x1005E, // Socket type not supported.
Error_EPFNOSUPPORT = 0x10060, // Protocol family not supported.
Error_ESHUTDOWN = 0x1006C, // Socket shutdown.
Error_EHOSTDOWN = 0x10070, // Host is down.
Error_ENODATA = 0x10071, // No data available.
// Error codes to track errors beyond kernel.
Error_EHOSTNOTFOUND = 0x20001, // Name lookup failed.
Error_ESOCKETERROR = 0x20002, // Unidentified socket error.
// POSIX permits these to have the same value and we make them
// always equal so that we cannot introduce a dependency on
// distinguishing between them that would not work on all
// platforms.
Error_EOPNOTSUPP = Error_ENOTSUP, // Operation not supported on socket
Error_EWOULDBLOCK = Error_EAGAIN, // Operation would block
// This one is not part of POSIX, but is a catch-all for the case
// where we cannot convert the raw errno value to something above.
Error_ENONSTANDARD = 0x1FFFF,
} Error;
/*
Some pal errors don't have a corresponding errno value.
We define values for these errors.
We want these values to be distinct from real errno values.
We base of the Error enum values which are chosen to be out of the
typical errno range, and make them negative because POSIX
requires errno values to be positive.
*/
#define EHOSTNOTFOUND (-Error_EHOSTNOTFOUND)
#define ESOCKETERROR (-Error_ESOCKETERROR)
inline static int32_t ConvertErrorPlatformToPal(int32_t platformErrno)
{
switch (platformErrno)
{
case 0:
return Error_SUCCESS;
case E2BIG:
return Error_E2BIG;
case EACCES:
return Error_EACCES;
case EADDRINUSE:
return Error_EADDRINUSE;
case EADDRNOTAVAIL:
return Error_EADDRNOTAVAIL;
case EAFNOSUPPORT:
return Error_EAFNOSUPPORT;
case EAGAIN:
return Error_EAGAIN;
case EALREADY:
return Error_EALREADY;
case EBADF:
return Error_EBADF;
case EBADMSG:
return Error_EBADMSG;
case EBUSY:
return Error_EBUSY;
case ECANCELED:
return Error_ECANCELED;
case ECHILD:
return Error_ECHILD;
case ECONNABORTED:
return Error_ECONNABORTED;
case ECONNREFUSED:
return Error_ECONNREFUSED;
case ECONNRESET:
return Error_ECONNRESET;
case EDEADLK:
return Error_EDEADLK;
case EDESTADDRREQ:
return Error_EDESTADDRREQ;
case EDOM:
return Error_EDOM;
case EDQUOT:
return Error_EDQUOT;
case EEXIST:
return Error_EEXIST;
case EFAULT:
return Error_EFAULT;
case EFBIG:
return Error_EFBIG;
case EHOSTUNREACH:
return Error_EHOSTUNREACH;
case EIDRM:
return Error_EIDRM;
case EILSEQ:
return Error_EILSEQ;
case EINPROGRESS:
return Error_EINPROGRESS;
case EINTR:
return Error_EINTR;
case EINVAL:
return Error_EINVAL;
case EIO:
return Error_EIO;
case EISCONN:
return Error_EISCONN;
case EISDIR:
return Error_EISDIR;
case ELOOP:
return Error_ELOOP;
case EMFILE:
return Error_EMFILE;
case EMLINK:
return Error_EMLINK;
case EMSGSIZE:
return Error_EMSGSIZE;
case EMULTIHOP:
return Error_EMULTIHOP;
case ENAMETOOLONG:
return Error_ENAMETOOLONG;
case ENETDOWN:
return Error_ENETDOWN;
case ENETRESET:
return Error_ENETRESET;
case ENETUNREACH:
return Error_ENETUNREACH;
case ENFILE:
return Error_ENFILE;
case ENOBUFS:
return Error_ENOBUFS;
case ENODEV:
return Error_ENODEV;
case ENOENT:
return Error_ENOENT;
case ENOEXEC:
return Error_ENOEXEC;
case ENOLCK:
return Error_ENOLCK;
case ENOLINK:
return Error_ENOLINK;
case ENOMEM:
return Error_ENOMEM;
case ENOMSG:
return Error_ENOMSG;
case ENOPROTOOPT:
return Error_ENOPROTOOPT;
case ENOSPC:
return Error_ENOSPC;
case ENOSYS:
return Error_ENOSYS;
case ENOTCONN:
return Error_ENOTCONN;
case ENOTDIR:
return Error_ENOTDIR;
#if ENOTEMPTY != EEXIST // AIX defines this
case ENOTEMPTY:
return Error_ENOTEMPTY;
#endif
#ifdef ENOTRECOVERABLE // not available in NetBSD
case ENOTRECOVERABLE:
return Error_ENOTRECOVERABLE;
#endif
case ENOTSOCK:
return Error_ENOTSOCK;
case ENOTSUP:
return Error_ENOTSUP;
case ENOTTY:
return Error_ENOTTY;
case ENXIO:
return Error_ENXIO;
case EOVERFLOW:
return Error_EOVERFLOW;
#ifdef EOWNERDEAD // not available in NetBSD
case EOWNERDEAD:
return Error_EOWNERDEAD;
#endif
case EPERM:
return Error_EPERM;
case EPIPE:
return Error_EPIPE;
case EPROTO:
return Error_EPROTO;
case EPROTONOSUPPORT:
return Error_EPROTONOSUPPORT;
case EPROTOTYPE:
return Error_EPROTOTYPE;
case ERANGE:
return Error_ERANGE;
case EROFS:
return Error_EROFS;
case ESPIPE:
return Error_ESPIPE;
case ESRCH:
return Error_ESRCH;
case ESTALE:
return Error_ESTALE;
case ETIMEDOUT:
return Error_ETIMEDOUT;
case ETXTBSY:
return Error_ETXTBSY;
case EXDEV:
return Error_EXDEV;
#ifdef ESOCKTNOSUPPORT
case ESOCKTNOSUPPORT:
return Error_ESOCKTNOSUPPORT;
#endif
case EPFNOSUPPORT:
return Error_EPFNOSUPPORT;
case ESHUTDOWN:
return Error_ESHUTDOWN;
case EHOSTDOWN:
return Error_EHOSTDOWN;
#ifdef ENODATA // not available in FreeBSD
case ENODATA:
return Error_ENODATA;
#endif
// #if because these will trigger duplicate case label warnings when
// they have the same value, which is permitted by POSIX and common.
#if EOPNOTSUPP != ENOTSUP
case EOPNOTSUPP:
return Error_EOPNOTSUPP;
#endif
#if EWOULDBLOCK != EAGAIN
case EWOULDBLOCK:
return Error_EWOULDBLOCK;
#endif
}
return Error_ENONSTANDARD;
}
inline static int32_t ConvertErrorPalToPlatform(int32_t error)
{
switch (error)
{
case Error_SUCCESS:
return 0;
case Error_E2BIG:
return E2BIG;
case Error_EACCES:
return EACCES;
case Error_EADDRINUSE:
return EADDRINUSE;
case Error_EADDRNOTAVAIL:
return EADDRNOTAVAIL;
case Error_EAFNOSUPPORT:
return EAFNOSUPPORT;
case Error_EAGAIN:
return EAGAIN;
case Error_EALREADY:
return EALREADY;
case Error_EBADF:
return EBADF;
case Error_EBADMSG:
return EBADMSG;
case Error_EBUSY:
return EBUSY;
case Error_ECANCELED:
return ECANCELED;
case Error_ECHILD:
return ECHILD;
case Error_ECONNABORTED:
return ECONNABORTED;
case Error_ECONNREFUSED:
return ECONNREFUSED;
case Error_ECONNRESET:
return ECONNRESET;
case Error_EDEADLK:
return EDEADLK;
case Error_EDESTADDRREQ:
return EDESTADDRREQ;
case Error_EDOM:
return EDOM;
case Error_EDQUOT:
return EDQUOT;
case Error_EEXIST:
return EEXIST;
case Error_EFAULT:
return EFAULT;
case Error_EFBIG:
return EFBIG;
case Error_EHOSTUNREACH:
return EHOSTUNREACH;
case Error_EIDRM:
return EIDRM;
case Error_EILSEQ:
return EILSEQ;
case Error_EINPROGRESS:
return EINPROGRESS;
case Error_EINTR:
return EINTR;
case Error_EINVAL:
return EINVAL;
case Error_EIO:
return EIO;
case Error_EISCONN:
return EISCONN;
case Error_EISDIR:
return EISDIR;
case Error_ELOOP:
return ELOOP;
case Error_EMFILE:
return EMFILE;
case Error_EMLINK:
return EMLINK;
case Error_EMSGSIZE:
return EMSGSIZE;
case Error_EMULTIHOP:
return EMULTIHOP;
case Error_ENAMETOOLONG:
return ENAMETOOLONG;
case Error_ENETDOWN:
return ENETDOWN;
case Error_ENETRESET:
return ENETRESET;
case Error_ENETUNREACH:
return ENETUNREACH;
case Error_ENFILE:
return ENFILE;
case Error_ENOBUFS:
return ENOBUFS;
case Error_ENODEV:
return ENODEV;
case Error_ENOENT:
return ENOENT;
case Error_ENOEXEC:
return ENOEXEC;
case Error_ENOLCK:
return ENOLCK;
case Error_ENOLINK:
return ENOLINK;
case Error_ENOMEM:
return ENOMEM;
case Error_ENOMSG:
return ENOMSG;
case Error_ENOPROTOOPT:
return ENOPROTOOPT;
case Error_ENOSPC:
return ENOSPC;
case Error_ENOSYS:
return ENOSYS;
case Error_ENOTCONN:
return ENOTCONN;
case Error_ENOTDIR:
return ENOTDIR;
case Error_ENOTEMPTY:
return ENOTEMPTY;
#ifdef ENOTRECOVERABLE // not available in NetBSD
case Error_ENOTRECOVERABLE:
return ENOTRECOVERABLE;
#endif
case Error_ENOTSOCK:
return ENOTSOCK;
case Error_ENOTSUP:
return ENOTSUP;
case Error_ENOTTY:
return ENOTTY;
case Error_ENXIO:
return ENXIO;
case Error_EOVERFLOW:
return EOVERFLOW;
#ifdef EOWNERDEAD // not available in NetBSD
case Error_EOWNERDEAD:
return EOWNERDEAD;
#endif
case Error_EPERM:
return EPERM;
case Error_EPIPE:
return EPIPE;
case Error_EPROTO:
return EPROTO;
case Error_EPROTONOSUPPORT:
return EPROTONOSUPPORT;
case Error_EPROTOTYPE:
return EPROTOTYPE;
case Error_ERANGE:
return ERANGE;
case Error_EROFS:
return EROFS;
case Error_ESPIPE:
return ESPIPE;
case Error_ESRCH:
return ESRCH;
case Error_ESTALE:
return ESTALE;
case Error_ETIMEDOUT:
return ETIMEDOUT;
case Error_ETXTBSY:
return ETXTBSY;
case Error_EXDEV:
return EXDEV;
case Error_EPFNOSUPPORT:
return EPFNOSUPPORT;
#ifdef ESOCKTNOSUPPORT
case Error_ESOCKTNOSUPPORT:
return ESOCKTNOSUPPORT;
#endif
case Error_ESHUTDOWN:
return ESHUTDOWN;
case Error_EHOSTDOWN:
return EHOSTDOWN;
#ifdef ENODATA // not available in FreeBSD
case Error_ENODATA:
return ENODATA;
#endif
case Error_EHOSTNOTFOUND:
return EHOSTNOTFOUND;
case Error_ESOCKETERROR:
return ESOCKETERROR;
case Error_ENONSTANDARD:
break; // fall through to assert
}
// We should not use this function to round-trip platform -> pal
// -> platform. It's here only to synthesize a platform number
// from the fixed set above. Note that the assert is outside the
// switch rather than in a default case block because not
// having a default will trigger a warning (as error) if there's
// an enum value we haven't handled. Should that trigger, make
// note that there is probably a corresponding missing case in the
// other direction above, but the compiler can't warn in that case
// because the platform values are not part of an enum.
assert_err(false, "Unknown error code", (int) error);
return -1;
}
static bool TryConvertErrorToGai(int32_t error, int32_t* gaiError)
{
assert(gaiError != NULL);
switch (error)
{
case EHOSTNOTFOUND:
*gaiError = EAI_NONAME;
return true;
default:
*gaiError = error;
return false;
}
}
inline static const char* StrErrorR(int32_t platformErrno, char* buffer, int32_t bufferSize)
{
assert(buffer != NULL);
assert(bufferSize > 0);
if (bufferSize < 0)
return NULL;
if (platformErrno < 0)
{
// Not a system error
int32_t gaiError;
if (TryConvertErrorToGai(platformErrno, &gaiError))
{
SafeStringCopy(buffer, (size_t)bufferSize, gai_strerror(gaiError));
return buffer;
}
else if (platformErrno == ESOCKETERROR)
{
SafeStringCopy(buffer, (size_t)bufferSize, "Unknown socket error");
return buffer;
}
}
// Note that we must use strerror_r because plain strerror is not
// thread-safe.
//
// However, there are two versions of strerror_r:
// - GNU: char* strerror_r(int, char*, size_t);
// - POSIX: int strerror_r(int, char*, size_t);
//
// The former may or may not use the supplied buffer, and returns
// the error message string. The latter stores the error message
// string into the supplied buffer and returns an error code.
#if HAVE_GNU_STRERROR_R
const char* message = strerror_r(platformErrno, buffer, (uint32_t) bufferSize);
assert(message != NULL);
return message;
#else
int error = strerror_r(platformErrno, buffer, (uint32_t) bufferSize);
if (error == ERANGE)
{
// Buffer is too small to hold the entire message, but has
// still been filled to the extent possible and null-terminated.
return NULL;
}
// The only other valid error codes are 0 for success or EINVAL for
// an unknown error, but in the latter case a reasonable string (e.g
// "Unknown error: 0x123") is returned.
assert_err(error == 0 || error == EINVAL, "invalid error", error);
return buffer;
#endif
}
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#pragma once
#include "pal_compiler.h"
#include "pal_types.h"
#include "pal_config.h"
#include "pal_utilities.h"
#include <string.h>
#include <assert.h>
#include <errno.h>
#include <netdb.h>
/**
* Error codes returned via ConvertErrno.
*
* Only the names (without the PAL_ prefix) are specified by POSIX.
*
* The values chosen below are simply assigned arbitrarily (originally
* in alphabetical order they appear in the spec, but they can't change so
* add new values to the end!).
*
* Also, the values chosen are deliberately outside the range of
* typical UNIX errnos (small numbers), HRESULTs (negative for errors)
* and Win32 errors (0x0000 - 0xFFFF). This isn't required for
* correctness, but may help debug a caller that is interpreting a raw
* int incorrectly.
*
* Wherever the spec says "x may be the same value as y", we do use
* the same value so that callers cannot not take a dependency on
* being able to distinguish between them.
*/
typedef enum
{
Error_SUCCESS = 0,
Error_E2BIG = 0x10001, // Argument list too long.
Error_EACCES = 0x10002, // Permission denied.
Error_EADDRINUSE = 0x10003, // Address in use.
Error_EADDRNOTAVAIL = 0x10004, // Address not available.
Error_EAFNOSUPPORT = 0x10005, // Address family not supported.
Error_EAGAIN = 0x10006, // Resource unavailable, try again (same value as EWOULDBLOCK),
Error_EALREADY = 0x10007, // Connection already in progress.
Error_EBADF = 0x10008, // Bad file descriptor.
Error_EBADMSG = 0x10009, // Bad message.
Error_EBUSY = 0x1000A, // Device or resource busy.
Error_ECANCELED = 0x1000B, // Operation canceled.
Error_ECHILD = 0x1000C, // No child processes.
Error_ECONNABORTED = 0x1000D, // Connection aborted.
Error_ECONNREFUSED = 0x1000E, // Connection refused.
Error_ECONNRESET = 0x1000F, // Connection reset.
Error_EDEADLK = 0x10010, // Resource deadlock would occur.
Error_EDESTADDRREQ = 0x10011, // Destination address required.
Error_EDOM = 0x10012, // Mathematics argument out of domain of function.
Error_EDQUOT = 0x10013, // Reserved.
Error_EEXIST = 0x10014, // File exists.
Error_EFAULT = 0x10015, // Bad address.
Error_EFBIG = 0x10016, // File too large.
Error_EHOSTUNREACH = 0x10017, // Host is unreachable.
Error_EIDRM = 0x10018, // Identifier removed.
Error_EILSEQ = 0x10019, // Illegal byte sequence.
Error_EINPROGRESS = 0x1001A, // Operation in progress.
Error_EINTR = 0x1001B, // Interrupted function.
Error_EINVAL = 0x1001C, // Invalid argument.
Error_EIO = 0x1001D, // I/O error.
Error_EISCONN = 0x1001E, // Socket is connected.
Error_EISDIR = 0x1001F, // Is a directory.
Error_ELOOP = 0x10020, // Too many levels of symbolic links.
Error_EMFILE = 0x10021, // File descriptor value too large.
Error_EMLINK = 0x10022, // Too many links.
Error_EMSGSIZE = 0x10023, // Message too large.
Error_EMULTIHOP = 0x10024, // Reserved.
Error_ENAMETOOLONG = 0x10025, // Filename too long.
Error_ENETDOWN = 0x10026, // Network is down.
Error_ENETRESET = 0x10027, // Connection aborted by network.
Error_ENETUNREACH = 0x10028, // Network unreachable.
Error_ENFILE = 0x10029, // Too many files open in system.
Error_ENOBUFS = 0x1002A, // No buffer space available.
Error_ENODEV = 0x1002C, // No such device.
Error_ENOENT = 0x1002D, // No such file or directory.
Error_ENOEXEC = 0x1002E, // Executable file format error.
Error_ENOLCK = 0x1002F, // No locks available.
Error_ENOLINK = 0x10030, // Reserved.
Error_ENOMEM = 0x10031, // Not enough space.
Error_ENOMSG = 0x10032, // No message of the desired type.
Error_ENOPROTOOPT = 0x10033, // Protocol not available.
Error_ENOSPC = 0x10034, // No space left on device.
Error_ENOSYS = 0x10037, // Function not supported.
Error_ENOTCONN = 0x10038, // The socket is not connected.
Error_ENOTDIR = 0x10039, // Not a directory or a symbolic link to a directory.
Error_ENOTEMPTY = 0x1003A, // Directory not empty.
Error_ENOTRECOVERABLE = 0x1003B, // State not recoverable.
Error_ENOTSOCK = 0x1003C, // Not a socket.
Error_ENOTSUP = 0x1003D, // Not supported (same value as EOPNOTSUP).
Error_ENOTTY = 0x1003E, // Inappropriate I/O control operation.
Error_ENXIO = 0x1003F, // No such device or address.
Error_EOVERFLOW = 0x10040, // Value too large to be stored in data type.
Error_EOWNERDEAD = 0x10041, // Previous owner died.
Error_EPERM = 0x10042, // Operation not permitted.
Error_EPIPE = 0x10043, // Broken pipe.
Error_EPROTO = 0x10044, // Protocol error.
Error_EPROTONOSUPPORT = 0x10045, // Protocol not supported.
Error_EPROTOTYPE = 0x10046, // Protocol wrong type for socket.
Error_ERANGE = 0x10047, // Result too large.
Error_EROFS = 0x10048, // Read-only file system.
Error_ESPIPE = 0x10049, // Invalid seek.
Error_ESRCH = 0x1004A, // No such process.
Error_ESTALE = 0x1004B, // Reserved.
Error_ETIMEDOUT = 0x1004D, // Connection timed out.
Error_ETXTBSY = 0x1004E, // Text file busy.
Error_EXDEV = 0x1004F, // Cross-device link.
Error_ESOCKTNOSUPPORT = 0x1005E, // Socket type not supported.
Error_EPFNOSUPPORT = 0x10060, // Protocol family not supported.
Error_ESHUTDOWN = 0x1006C, // Socket shutdown.
Error_EHOSTDOWN = 0x10070, // Host is down.
Error_ENODATA = 0x10071, // No data available.
// Error codes to track errors beyond kernel.
Error_EHOSTNOTFOUND = 0x20001, // Name lookup failed.
Error_ESOCKETERROR = 0x20002, // Unidentified socket error.
// POSIX permits these to have the same value and we make them
// always equal so that we cannot introduce a dependency on
// distinguishing between them that would not work on all
// platforms.
Error_EOPNOTSUPP = Error_ENOTSUP, // Operation not supported on socket
Error_EWOULDBLOCK = Error_EAGAIN, // Operation would block
// This one is not part of POSIX, but is a catch-all for the case
// where we cannot convert the raw errno value to something above.
Error_ENONSTANDARD = 0x1FFFF,
} Error;
/*
Some pal errors don't have a corresponding errno value.
We define values for these errors.
We want these values to be distinct from real errno values.
We base of the Error enum values which are chosen to be out of the
typical errno range, and make them negative because POSIX
requires errno values to be positive.
*/
#define EHOSTNOTFOUND (-Error_EHOSTNOTFOUND)
#define ESOCKETERROR (-Error_ESOCKETERROR)
inline static int32_t ConvertErrorPlatformToPal(int32_t platformErrno)
{
switch (platformErrno)
{
case 0:
return Error_SUCCESS;
case E2BIG:
return Error_E2BIG;
case EACCES:
return Error_EACCES;
case EADDRINUSE:
return Error_EADDRINUSE;
case EADDRNOTAVAIL:
return Error_EADDRNOTAVAIL;
case EAFNOSUPPORT:
return Error_EAFNOSUPPORT;
case EAGAIN:
return Error_EAGAIN;
case EALREADY:
return Error_EALREADY;
case EBADF:
return Error_EBADF;
case EBADMSG:
return Error_EBADMSG;
case EBUSY:
return Error_EBUSY;
case ECANCELED:
return Error_ECANCELED;
case ECHILD:
return Error_ECHILD;
case ECONNABORTED:
return Error_ECONNABORTED;
case ECONNREFUSED:
return Error_ECONNREFUSED;
case ECONNRESET:
return Error_ECONNRESET;
case EDEADLK:
return Error_EDEADLK;
case EDESTADDRREQ:
return Error_EDESTADDRREQ;
case EDOM:
return Error_EDOM;
case EDQUOT:
return Error_EDQUOT;
case EEXIST:
return Error_EEXIST;
case EFAULT:
return Error_EFAULT;
case EFBIG:
return Error_EFBIG;
case EHOSTUNREACH:
return Error_EHOSTUNREACH;
case EIDRM:
return Error_EIDRM;
case EILSEQ:
return Error_EILSEQ;
case EINPROGRESS:
return Error_EINPROGRESS;
case EINTR:
return Error_EINTR;
case EINVAL:
return Error_EINVAL;
case EIO:
return Error_EIO;
case EISCONN:
return Error_EISCONN;
case EISDIR:
return Error_EISDIR;
case ELOOP:
return Error_ELOOP;
case EMFILE:
return Error_EMFILE;
case EMLINK:
return Error_EMLINK;
case EMSGSIZE:
return Error_EMSGSIZE;
case EMULTIHOP:
return Error_EMULTIHOP;
case ENAMETOOLONG:
return Error_ENAMETOOLONG;
case ENETDOWN:
return Error_ENETDOWN;
case ENETRESET:
return Error_ENETRESET;
case ENETUNREACH:
return Error_ENETUNREACH;
case ENFILE:
return Error_ENFILE;
case ENOBUFS:
return Error_ENOBUFS;
case ENODEV:
return Error_ENODEV;
case ENOENT:
return Error_ENOENT;
case ENOEXEC:
return Error_ENOEXEC;
case ENOLCK:
return Error_ENOLCK;
case ENOLINK:
return Error_ENOLINK;
case ENOMEM:
return Error_ENOMEM;
case ENOMSG:
return Error_ENOMSG;
case ENOPROTOOPT:
return Error_ENOPROTOOPT;
case ENOSPC:
return Error_ENOSPC;
case ENOSYS:
return Error_ENOSYS;
case ENOTCONN:
return Error_ENOTCONN;
case ENOTDIR:
return Error_ENOTDIR;
#if ENOTEMPTY != EEXIST // AIX defines this
case ENOTEMPTY:
return Error_ENOTEMPTY;
#endif
#ifdef ENOTRECOVERABLE // not available in NetBSD
case ENOTRECOVERABLE:
return Error_ENOTRECOVERABLE;
#endif
case ENOTSOCK:
return Error_ENOTSOCK;
case ENOTSUP:
return Error_ENOTSUP;
case ENOTTY:
return Error_ENOTTY;
case ENXIO:
return Error_ENXIO;
case EOVERFLOW:
return Error_EOVERFLOW;
#ifdef EOWNERDEAD // not available in NetBSD
case EOWNERDEAD:
return Error_EOWNERDEAD;
#endif
case EPERM:
return Error_EPERM;
case EPIPE:
return Error_EPIPE;
case EPROTO:
return Error_EPROTO;
case EPROTONOSUPPORT:
return Error_EPROTONOSUPPORT;
case EPROTOTYPE:
return Error_EPROTOTYPE;
case ERANGE:
return Error_ERANGE;
case EROFS:
return Error_EROFS;
case ESPIPE:
return Error_ESPIPE;
case ESRCH:
return Error_ESRCH;
case ESTALE:
return Error_ESTALE;
case ETIMEDOUT:
return Error_ETIMEDOUT;
case ETXTBSY:
return Error_ETXTBSY;
case EXDEV:
return Error_EXDEV;
#ifdef ESOCKTNOSUPPORT
case ESOCKTNOSUPPORT:
return Error_ESOCKTNOSUPPORT;
#endif
case EPFNOSUPPORT:
return Error_EPFNOSUPPORT;
case ESHUTDOWN:
return Error_ESHUTDOWN;
case EHOSTDOWN:
return Error_EHOSTDOWN;
#ifdef ENODATA // not available in FreeBSD
case ENODATA:
return Error_ENODATA;
#endif
// #if because these will trigger duplicate case label warnings when
// they have the same value, which is permitted by POSIX and common.
#if EOPNOTSUPP != ENOTSUP
case EOPNOTSUPP:
return Error_EOPNOTSUPP;
#endif
#if EWOULDBLOCK != EAGAIN
case EWOULDBLOCK:
return Error_EWOULDBLOCK;
#endif
}
return Error_ENONSTANDARD;
}
inline static int32_t ConvertErrorPalToPlatform(int32_t error)
{
switch (error)
{
case Error_SUCCESS:
return 0;
case Error_E2BIG:
return E2BIG;
case Error_EACCES:
return EACCES;
case Error_EADDRINUSE:
return EADDRINUSE;
case Error_EADDRNOTAVAIL:
return EADDRNOTAVAIL;
case Error_EAFNOSUPPORT:
return EAFNOSUPPORT;
case Error_EAGAIN:
return EAGAIN;
case Error_EALREADY:
return EALREADY;
case Error_EBADF:
return EBADF;
case Error_EBADMSG:
return EBADMSG;
case Error_EBUSY:
return EBUSY;
case Error_ECANCELED:
return ECANCELED;
case Error_ECHILD:
return ECHILD;
case Error_ECONNABORTED:
return ECONNABORTED;
case Error_ECONNREFUSED:
return ECONNREFUSED;
case Error_ECONNRESET:
return ECONNRESET;
case Error_EDEADLK:
return EDEADLK;
case Error_EDESTADDRREQ:
return EDESTADDRREQ;
case Error_EDOM:
return EDOM;
case Error_EDQUOT:
return EDQUOT;
case Error_EEXIST:
return EEXIST;
case Error_EFAULT:
return EFAULT;
case Error_EFBIG:
return EFBIG;
case Error_EHOSTUNREACH:
return EHOSTUNREACH;
case Error_EIDRM:
return EIDRM;
case Error_EILSEQ:
return EILSEQ;
case Error_EINPROGRESS:
return EINPROGRESS;
case Error_EINTR:
return EINTR;
case Error_EINVAL:
return EINVAL;
case Error_EIO:
return EIO;
case Error_EISCONN:
return EISCONN;
case Error_EISDIR:
return EISDIR;
case Error_ELOOP:
return ELOOP;
case Error_EMFILE:
return EMFILE;
case Error_EMLINK:
return EMLINK;
case Error_EMSGSIZE:
return EMSGSIZE;
case Error_EMULTIHOP:
return EMULTIHOP;
case Error_ENAMETOOLONG:
return ENAMETOOLONG;
case Error_ENETDOWN:
return ENETDOWN;
case Error_ENETRESET:
return ENETRESET;
case Error_ENETUNREACH:
return ENETUNREACH;
case Error_ENFILE:
return ENFILE;
case Error_ENOBUFS:
return ENOBUFS;
case Error_ENODEV:
return ENODEV;
case Error_ENOENT:
return ENOENT;
case Error_ENOEXEC:
return ENOEXEC;
case Error_ENOLCK:
return ENOLCK;
case Error_ENOLINK:
return ENOLINK;
case Error_ENOMEM:
return ENOMEM;
case Error_ENOMSG:
return ENOMSG;
case Error_ENOPROTOOPT:
return ENOPROTOOPT;
case Error_ENOSPC:
return ENOSPC;
case Error_ENOSYS:
return ENOSYS;
case Error_ENOTCONN:
return ENOTCONN;
case Error_ENOTDIR:
return ENOTDIR;
case Error_ENOTEMPTY:
return ENOTEMPTY;
#ifdef ENOTRECOVERABLE // not available in NetBSD
case Error_ENOTRECOVERABLE:
return ENOTRECOVERABLE;
#endif
case Error_ENOTSOCK:
return ENOTSOCK;
case Error_ENOTSUP:
return ENOTSUP;
case Error_ENOTTY:
return ENOTTY;
case Error_ENXIO:
return ENXIO;
case Error_EOVERFLOW:
return EOVERFLOW;
#ifdef EOWNERDEAD // not available in NetBSD
case Error_EOWNERDEAD:
return EOWNERDEAD;
#endif
case Error_EPERM:
return EPERM;
case Error_EPIPE:
return EPIPE;
case Error_EPROTO:
return EPROTO;
case Error_EPROTONOSUPPORT:
return EPROTONOSUPPORT;
case Error_EPROTOTYPE:
return EPROTOTYPE;
case Error_ERANGE:
return ERANGE;
case Error_EROFS:
return EROFS;
case Error_ESPIPE:
return ESPIPE;
case Error_ESRCH:
return ESRCH;
case Error_ESTALE:
return ESTALE;
case Error_ETIMEDOUT:
return ETIMEDOUT;
case Error_ETXTBSY:
return ETXTBSY;
case Error_EXDEV:
return EXDEV;
case Error_EPFNOSUPPORT:
return EPFNOSUPPORT;
#ifdef ESOCKTNOSUPPORT
case Error_ESOCKTNOSUPPORT:
return ESOCKTNOSUPPORT;
#endif
case Error_ESHUTDOWN:
return ESHUTDOWN;
case Error_EHOSTDOWN:
return EHOSTDOWN;
#ifdef ENODATA // not available in FreeBSD
case Error_ENODATA:
return ENODATA;
#endif
case Error_EHOSTNOTFOUND:
return EHOSTNOTFOUND;
case Error_ESOCKETERROR:
return ESOCKETERROR;
case Error_ENONSTANDARD:
break; // fall through to assert
}
// We should not use this function to round-trip platform -> pal
// -> platform. It's here only to synthesize a platform number
// from the fixed set above. Note that the assert is outside the
// switch rather than in a default case block because not
// having a default will trigger a warning (as error) if there's
// an enum value we haven't handled. Should that trigger, make
// note that there is probably a corresponding missing case in the
// other direction above, but the compiler can't warn in that case
// because the platform values are not part of an enum.
assert_err(false, "Unknown error code", (int) error);
return -1;
}
static bool TryConvertErrorToGai(int32_t error, int32_t* gaiError)
{
assert(gaiError != NULL);
switch (error)
{
case EHOSTNOTFOUND:
*gaiError = EAI_NONAME;
return true;
default:
*gaiError = error;
return false;
}
}
inline static const char* StrErrorR(int32_t platformErrno, char* buffer, int32_t bufferSize)
{
assert(buffer != NULL);
assert(bufferSize > 0);
if (bufferSize < 0)
return NULL;
if (platformErrno < 0)
{
// Not a system error
int32_t gaiError;
if (TryConvertErrorToGai(platformErrno, &gaiError))
{
SafeStringCopy(buffer, (size_t)bufferSize, gai_strerror(gaiError));
return buffer;
}
else if (platformErrno == ESOCKETERROR)
{
SafeStringCopy(buffer, (size_t)bufferSize, "Unknown socket error");
return buffer;
}
}
// Note that we must use strerror_r because plain strerror is not
// thread-safe.
//
// However, there are two versions of strerror_r:
// - GNU: char* strerror_r(int, char*, size_t);
// - POSIX: int strerror_r(int, char*, size_t);
//
// The former may or may not use the supplied buffer, and returns
// the error message string. The latter stores the error message
// string into the supplied buffer and returns an error code.
#if HAVE_GNU_STRERROR_R
const char* message = strerror_r(platformErrno, buffer, (uint32_t) bufferSize);
assert(message != NULL);
return message;
#else
int error = strerror_r(platformErrno, buffer, (uint32_t) bufferSize);
if (error == ERANGE)
{
// Buffer is too small to hold the entire message, but has
// still been filled to the extent possible and null-terminated.
return NULL;
}
// The only other valid error codes are 0 for success or EINVAL for
// an unknown error, but in the latter case a reasonable string (e.g
// "Unknown error: 0x123") is returned.
assert_err(error == 0 || error == EINVAL, "invalid error", error);
return buffer;
#endif
}
| -1 |
dotnet/runtime
| 66,411 |
Arm64: Always use SIMD features
|
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
kunalspathak
| 2022-03-09T21:00:00Z | 2022-03-12T04:35:06Z |
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
|
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
|
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
./src/coreclr/pal/src/libunwind/src/ppc/Gis_signal_frame.c
|
/* libunwind - a platform-independent unwind library
Copyright (C) 2006-2007 IBM
Contributed by
Corey Ashford <[email protected]>
Jose Flavio Aguilar Paulino <[email protected]> <[email protected]>
This file is part of libunwind.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#include <libunwind_i.h>
int
unw_is_signal_frame (unw_cursor_t * cursor)
{
struct cursor *c = (struct cursor *) cursor;
unw_word_t w0, w1, i0, i1, i2, ip;
unw_addr_space_t as;
unw_accessors_t *a;
void *arg;
int ret;
as = c->dwarf.as;
as->validate = 1; /* Don't trust the ip */
arg = c->dwarf.as_arg;
/* Check if return address points at sigreturn sequence.
on ppc64 Linux that is (see libc.so):
0x38210080 addi r1, r1, 128 // pop the stack
0x380000ac li r0, 172 // invoke system service 172
0x44000002 sc
*/
ip = c->dwarf.ip;
if (ip == 0)
return 0;
/* Read up two 8-byte words at the IP. We are only looking at 3
consecutive 32-bit words, so the second 8-byte word needs to be
shifted right by 32 bits (think big-endian) */
a = unw_get_accessors_int (as);
if ((ret = (*a->access_mem) (as, ip, &w0, 0, arg)) < 0
|| (ret = (*a->access_mem) (as, ip + 8, &w1, 0, arg)) < 0)
return 0;
if (tdep_big_endian (as))
{
i0 = w0 >> 32;
i1 = w0 & 0xffffffffUL;
i2 = w1 >> 32;
}
else
{
i0 = w0 & 0xffffffffUL;
i1 = w0 >> 32;
i2 = w1 & 0xffffffffUL;
}
return (i0 == 0x38210080 && i1 == 0x380000ac && i2 == 0x44000002);
}
|
/* libunwind - a platform-independent unwind library
Copyright (C) 2006-2007 IBM
Contributed by
Corey Ashford <[email protected]>
Jose Flavio Aguilar Paulino <[email protected]> <[email protected]>
This file is part of libunwind.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#include <libunwind_i.h>
int
unw_is_signal_frame (unw_cursor_t * cursor)
{
struct cursor *c = (struct cursor *) cursor;
unw_word_t w0, w1, i0, i1, i2, ip;
unw_addr_space_t as;
unw_accessors_t *a;
void *arg;
int ret;
as = c->dwarf.as;
as->validate = 1; /* Don't trust the ip */
arg = c->dwarf.as_arg;
/* Check if return address points at sigreturn sequence.
on ppc64 Linux that is (see libc.so):
0x38210080 addi r1, r1, 128 // pop the stack
0x380000ac li r0, 172 // invoke system service 172
0x44000002 sc
*/
ip = c->dwarf.ip;
if (ip == 0)
return 0;
/* Read up two 8-byte words at the IP. We are only looking at 3
consecutive 32-bit words, so the second 8-byte word needs to be
shifted right by 32 bits (think big-endian) */
a = unw_get_accessors_int (as);
if ((ret = (*a->access_mem) (as, ip, &w0, 0, arg)) < 0
|| (ret = (*a->access_mem) (as, ip + 8, &w1, 0, arg)) < 0)
return 0;
if (tdep_big_endian (as))
{
i0 = w0 >> 32;
i1 = w0 & 0xffffffffUL;
i2 = w1 >> 32;
}
else
{
i0 = w0 & 0xffffffffUL;
i1 = w0 >> 32;
i2 = w1 & 0xffffffffUL;
}
return (i0 == 0x38210080 && i1 == 0x380000ac && i2 == 0x44000002);
}
| -1 |
dotnet/runtime
| 66,411 |
Arm64: Always use SIMD features
|
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
kunalspathak
| 2022-03-09T21:00:00Z | 2022-03-12T04:35:06Z |
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
|
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
|
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
./src/tests/baseservices/exceptions/generics/try-finally01.csproj
|
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<ItemGroup>
<Compile Include="try-finally01.cs" />
</ItemGroup>
</Project>
|
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<ItemGroup>
<Compile Include="try-finally01.cs" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime
| 66,411 |
Arm64: Always use SIMD features
|
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
kunalspathak
| 2022-03-09T21:00:00Z | 2022-03-12T04:35:06Z |
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
|
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
|
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
./src/libraries/System.Private.Xml/tests/Xslt/TestFiles/TestData/XsltScenarios/Schematron/v2Test4.txt
|
In pattern @Title:
The element Person must have a Title attribute
In pattern (@Title = 'Mr' and ex:Gender = 'Male') or @Title != 'Mr':
If the Title is "Mr" then the gender of the person must be "Male".
|
In pattern @Title:
The element Person must have a Title attribute
In pattern (@Title = 'Mr' and ex:Gender = 'Male') or @Title != 'Mr':
If the Title is "Mr" then the gender of the person must be "Male".
| -1 |
dotnet/runtime
| 66,411 |
Arm64: Always use SIMD features
|
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
kunalspathak
| 2022-03-09T21:00:00Z | 2022-03-12T04:35:06Z |
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
|
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
|
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
./src/coreclr/vm/perfinfo.cpp
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// ===========================================================================
// File: perfinfo.cpp
//
#include "common.h"
#if defined(FEATURE_PERFMAP) && !defined(DACCESS_COMPILE)
#include "perfinfo.h"
#include "pal.h"
PerfInfo::PerfInfo(int pid)
: m_Stream(nullptr)
{
LIMITED_METHOD_CONTRACT;
SString tempPath;
if (!WszGetTempPath(tempPath))
{
return;
}
SString path;
path.Printf("%Sperfinfo-%d.map", tempPath.GetUnicode(), pid);
OpenFile(path);
}
// Logs image loads into the process' perfinfo-%d.map file
void PerfInfo::LogImage(PEAssembly* pPEAssembly, WCHAR* guid)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
PRECONDITION(pPEAssembly != nullptr);
PRECONDITION(guid != nullptr);
} CONTRACTL_END;
SString value;
const SString& path = pPEAssembly->GetPath();
if (path.IsEmpty())
{
return;
}
SIZE_T baseAddr = 0;
if (pPEAssembly->IsReadyToRun())
{
PEImageLayout *pLoadedLayout = pPEAssembly->GetLoadedLayout();
if (pLoadedLayout)
{
baseAddr = (SIZE_T)pLoadedLayout->GetBase();
}
}
value.Printf("%S%c%S%c%p", path.GetUnicode(), sDelimiter, guid, sDelimiter, baseAddr);
SString command;
command.Printf("%s", "ImageLoad");
WriteLine(command, value);
}
// Writes a command line, with "type" being the type of command, with "value" as the command's corresponding instructions/values. This is to be used to log specific information, e.g. LogImage
void PerfInfo::WriteLine(SString& type, SString& value)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
} CONTRACTL_END;
if (m_Stream == nullptr)
{
return;
}
SString line;
line.Printf("%S%c%S%c\n",
type.GetUnicode(), sDelimiter, value.GetUnicode(), sDelimiter);
EX_TRY
{
StackScratchBuffer scratch;
const char* strLine = line.GetANSI(scratch);
ULONG inCount = line.GetCount();
ULONG outCount;
m_Stream->Write(strLine, inCount, &outCount);
if (inCount != outCount)
{
// error encountered
}
}
EX_CATCH{} EX_END_CATCH(SwallowAllExceptions);
}
// Opens a file ready to be written in.
void PerfInfo::OpenFile(SString& path)
{
STANDARD_VM_CONTRACT;
m_Stream = new (nothrow) CFileStream();
if (m_Stream != nullptr)
{
HRESULT hr = m_Stream->OpenForWrite(path.GetUnicode());
if (FAILED(hr))
{
delete m_Stream;
m_Stream = nullptr;
}
}
}
PerfInfo::~PerfInfo()
{
LIMITED_METHOD_CONTRACT;
delete m_Stream;
m_Stream = nullptr;
}
#endif
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// ===========================================================================
// File: perfinfo.cpp
//
#include "common.h"
#if defined(FEATURE_PERFMAP) && !defined(DACCESS_COMPILE)
#include "perfinfo.h"
#include "pal.h"
PerfInfo::PerfInfo(int pid)
: m_Stream(nullptr)
{
LIMITED_METHOD_CONTRACT;
SString tempPath;
if (!WszGetTempPath(tempPath))
{
return;
}
SString path;
path.Printf("%Sperfinfo-%d.map", tempPath.GetUnicode(), pid);
OpenFile(path);
}
// Logs image loads into the process' perfinfo-%d.map file
void PerfInfo::LogImage(PEAssembly* pPEAssembly, WCHAR* guid)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
PRECONDITION(pPEAssembly != nullptr);
PRECONDITION(guid != nullptr);
} CONTRACTL_END;
SString value;
const SString& path = pPEAssembly->GetPath();
if (path.IsEmpty())
{
return;
}
SIZE_T baseAddr = 0;
if (pPEAssembly->IsReadyToRun())
{
PEImageLayout *pLoadedLayout = pPEAssembly->GetLoadedLayout();
if (pLoadedLayout)
{
baseAddr = (SIZE_T)pLoadedLayout->GetBase();
}
}
value.Printf("%S%c%S%c%p", path.GetUnicode(), sDelimiter, guid, sDelimiter, baseAddr);
SString command;
command.Printf("%s", "ImageLoad");
WriteLine(command, value);
}
// Writes a command line, with "type" being the type of command, with "value" as the command's corresponding instructions/values. This is to be used to log specific information, e.g. LogImage
void PerfInfo::WriteLine(SString& type, SString& value)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
} CONTRACTL_END;
if (m_Stream == nullptr)
{
return;
}
SString line;
line.Printf("%S%c%S%c\n",
type.GetUnicode(), sDelimiter, value.GetUnicode(), sDelimiter);
EX_TRY
{
StackScratchBuffer scratch;
const char* strLine = line.GetANSI(scratch);
ULONG inCount = line.GetCount();
ULONG outCount;
m_Stream->Write(strLine, inCount, &outCount);
if (inCount != outCount)
{
// error encountered
}
}
EX_CATCH{} EX_END_CATCH(SwallowAllExceptions);
}
// Opens a file ready to be written in.
void PerfInfo::OpenFile(SString& path)
{
STANDARD_VM_CONTRACT;
m_Stream = new (nothrow) CFileStream();
if (m_Stream != nullptr)
{
HRESULT hr = m_Stream->OpenForWrite(path.GetUnicode());
if (FAILED(hr))
{
delete m_Stream;
m_Stream = nullptr;
}
}
}
PerfInfo::~PerfInfo()
{
LIMITED_METHOD_CONTRACT;
delete m_Stream;
m_Stream = nullptr;
}
#endif
| -1 |
dotnet/runtime
| 66,411 |
Arm64: Always use SIMD features
|
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
kunalspathak
| 2022-03-09T21:00:00Z | 2022-03-12T04:35:06Z |
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
|
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
|
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
./src/coreclr/md/compiler/classfactory.h
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//*****************************************************************************
// ClassFactory.h
//
//
// Class factories are used by the pluming in COM to activate new objects.
// This module contains the class factory code to instantiate the debugger
// objects described in <cordb.h>.
//
//*****************************************************************************
#ifndef __ClassFactory__h__
#define __ClassFactory__h__
#include "disp.h"
// This typedef is for a function which will create a new instance of an object.
typedef HRESULT (* PFN_CREATE_OBJ)(REFIID riid, void **ppvObject);
//*****************************************************************************
// This structure is used to declare a global list of coclasses. The class
// factory object is created with a pointer to the correct one of these, so
// that when create instance is called, it can be created.
//*****************************************************************************
struct COCLASS_REGISTER
{
const GUID *pClsid; // Class ID of the coclass.
LPCWSTR szProgID; // Prog ID of the class.
PFN_CREATE_OBJ pfnCreateObject; // Creation function for an instance.
};
//*****************************************************************************
// One class factory object satifies all of our clsid's, to reduce overall
// code bloat.
//*****************************************************************************
class MDClassFactory :
public IClassFactory
{
MDClassFactory() { } // Can't use without data.
public:
MDClassFactory(const COCLASS_REGISTER *pCoClass)
: m_cRef(1), m_pCoClass(pCoClass)
{ }
virtual ~MDClassFactory() {}
//
// IUnknown methods.
//
virtual HRESULT STDMETHODCALLTYPE QueryInterface(
REFIID riid,
void **ppvObject);
virtual ULONG STDMETHODCALLTYPE AddRef()
{
return InterlockedIncrement(&m_cRef);
}
virtual ULONG STDMETHODCALLTYPE Release()
{
LONG cRef = InterlockedDecrement(&m_cRef);
if (cRef <= 0)
delete this;
return (cRef);
}
//
// IClassFactory methods.
//
virtual HRESULT STDMETHODCALLTYPE CreateInstance(
IUnknown *pUnkOuter,
REFIID riid,
void **ppvObject);
virtual HRESULT STDMETHODCALLTYPE LockServer(
BOOL fLock);
private:
LONG m_cRef; // Reference count.
const COCLASS_REGISTER *m_pCoClass; // The class we belong to.
};
#endif // __ClassFactory__h__
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//*****************************************************************************
// ClassFactory.h
//
//
// Class factories are used by the pluming in COM to activate new objects.
// This module contains the class factory code to instantiate the debugger
// objects described in <cordb.h>.
//
//*****************************************************************************
#ifndef __ClassFactory__h__
#define __ClassFactory__h__
#include "disp.h"
// This typedef is for a function which will create a new instance of an object.
typedef HRESULT (* PFN_CREATE_OBJ)(REFIID riid, void **ppvObject);
//*****************************************************************************
// This structure is used to declare a global list of coclasses. The class
// factory object is created with a pointer to the correct one of these, so
// that when create instance is called, it can be created.
//*****************************************************************************
struct COCLASS_REGISTER
{
const GUID *pClsid; // Class ID of the coclass.
LPCWSTR szProgID; // Prog ID of the class.
PFN_CREATE_OBJ pfnCreateObject; // Creation function for an instance.
};
//*****************************************************************************
// One class factory object satifies all of our clsid's, to reduce overall
// code bloat.
//*****************************************************************************
class MDClassFactory :
public IClassFactory
{
MDClassFactory() { } // Can't use without data.
public:
MDClassFactory(const COCLASS_REGISTER *pCoClass)
: m_cRef(1), m_pCoClass(pCoClass)
{ }
virtual ~MDClassFactory() {}
//
// IUnknown methods.
//
virtual HRESULT STDMETHODCALLTYPE QueryInterface(
REFIID riid,
void **ppvObject);
virtual ULONG STDMETHODCALLTYPE AddRef()
{
return InterlockedIncrement(&m_cRef);
}
virtual ULONG STDMETHODCALLTYPE Release()
{
LONG cRef = InterlockedDecrement(&m_cRef);
if (cRef <= 0)
delete this;
return (cRef);
}
//
// IClassFactory methods.
//
virtual HRESULT STDMETHODCALLTYPE CreateInstance(
IUnknown *pUnkOuter,
REFIID riid,
void **ppvObject);
virtual HRESULT STDMETHODCALLTYPE LockServer(
BOOL fLock);
private:
LONG m_cRef; // Reference count.
const COCLASS_REGISTER *m_pCoClass; // The class we belong to.
};
#endif // __ClassFactory__h__
| -1 |
dotnet/runtime
| 66,411 |
Arm64: Always use SIMD features
|
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
kunalspathak
| 2022-03-09T21:00:00Z | 2022-03-12T04:35:06Z |
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
|
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
|
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
./src/mono/wasm/debugger/DebuggerTestSuite/SteppingTests.cs
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Linq;
using System.Threading.Tasks;
using Newtonsoft.Json.Linq;
using Xunit;
namespace DebuggerTests
{
public class SteppingTests : DebuggerTestBase
{
[Fact]
public async Task TrivalStepping()
{
var bp = await SetBreakpoint("dotnet://debugger-test.dll/debugger-test.cs", 10, 8);
await EvaluateAndCheck(
"window.setTimeout(function() { invoke_add(); }, 1);",
"dotnet://debugger-test.dll/debugger-test.cs", 10, 8,
"IntAdd",
wait_for_event_fn: (pause_location) =>
{
//make sure we're on the right bp
Assert.Equal(bp.Value["breakpointId"]?.ToString(), pause_location["hitBreakpoints"]?[0]?.Value<string>());
var top_frame = pause_location["callFrames"][0];
CheckLocation("dotnet://debugger-test.dll/debugger-test.cs", 8, 4, scripts, top_frame["functionLocation"]);
return Task.CompletedTask;
}
);
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 11, 8, "IntAdd",
wait_for_event_fn: (pause_location) =>
{
var top_frame = pause_location["callFrames"][0];
CheckLocation("dotnet://debugger-test.dll/debugger-test.cs", 8, 4, scripts, top_frame["functionLocation"]);
return Task.CompletedTask;
}
);
}
[Fact]
public async Task InspectLocalsDuringStepping()
{
var debugger_test_loc = "dotnet://debugger-test.dll/debugger-test.cs";
await SetBreakpoint(debugger_test_loc, 10, 8);
await EvaluateAndCheck(
"window.setTimeout(function() { invoke_add(); }, 1);",
debugger_test_loc, 10, 8, "IntAdd",
locals_fn: async (locals) =>
{
CheckNumber(locals, "a", 10);
CheckNumber(locals, "b", 20);
CheckNumber(locals, "c", 30);
CheckNumber(locals, "d", 0);
CheckNumber(locals, "e", 0);
await Task.CompletedTask;
}
);
await StepAndCheck(StepKind.Over, debugger_test_loc, 11, 8, "IntAdd",
locals_fn: async (locals) =>
{
CheckNumber(locals, "a", 10);
CheckNumber(locals, "b", 20);
CheckNumber(locals, "c", 30);
CheckNumber(locals, "d", 50);
CheckNumber(locals, "e", 0);
await Task.CompletedTask;
}
);
//step and get locals
await StepAndCheck(StepKind.Over, debugger_test_loc, 12, 8, "IntAdd",
locals_fn: async (locals) =>
{
CheckNumber(locals, "a", 10);
CheckNumber(locals, "b", 20);
CheckNumber(locals, "c", 30);
CheckNumber(locals, "d", 50);
CheckNumber(locals, "e", 60);
await Task.CompletedTask;
}
);
}
[Theory]
[InlineData(false)]
[InlineData(true)]
public async Task InspectLocalsInPreviousFramesDuringSteppingIn2(bool use_cfo)
{
UseCallFunctionOnBeforeGetProperties = use_cfo;
var dep_cs_loc = "dotnet://debugger-test.dll/dependency.cs";
await SetBreakpoint(dep_cs_loc, 35, 8);
var debugger_test_loc = "dotnet://debugger-test.dll/debugger-test.cs";
// Will stop in Complex.DoEvenMoreStuff
var pause_location = await EvaluateAndCheck(
"window.setTimeout(function() { invoke_use_complex (); }, 1);",
dep_cs_loc, 35, 8, "DoEvenMoreStuff",
locals_fn: async (locals) =>
{
Assert.Single(locals);
await CheckObject(locals, "this", "Simple.Complex");
}
);
var props = await GetObjectOnFrame(pause_location["callFrames"][0], "this");
Assert.Equal(4, props.Count());
CheckNumber(props, "A", 10);
await CheckString(props, "B", "xx");
await CheckString(props, "c", "20_xx");
// Check UseComplex frame
var locals_m1 = await GetLocalsForFrame(pause_location["callFrames"][3], debugger_test_loc, 23, 8, "UseComplex");
Assert.Equal(7, locals_m1.Count());
CheckNumber(locals_m1, "a", 10);
CheckNumber(locals_m1, "b", 20);
await CheckObject(locals_m1, "complex", "Simple.Complex");
CheckNumber(locals_m1, "c", 30);
CheckNumber(locals_m1, "d", 50);
CheckNumber(locals_m1, "e", 60);
CheckNumber(locals_m1, "f", 0);
props = await GetObjectOnFrame(pause_location["callFrames"][3], "complex");
Assert.Equal(4, props.Count());
CheckNumber(props, "A", 10);
await CheckString(props, "B", "xx");
await CheckString(props, "c", "20_xx");
pause_location = await StepAndCheck(StepKind.Over, dep_cs_loc, 25, 8, "DoStuff", times: 2);
// Check UseComplex frame again
locals_m1 = await GetLocalsForFrame(pause_location["callFrames"][1], debugger_test_loc, 23, 8, "UseComplex");
Assert.Equal(7, locals_m1.Count());
CheckNumber(locals_m1, "a", 10);
CheckNumber(locals_m1, "b", 20);
await CheckObject(locals_m1, "complex", "Simple.Complex");
CheckNumber(locals_m1, "c", 30);
CheckNumber(locals_m1, "d", 50);
CheckNumber(locals_m1, "e", 60);
CheckNumber(locals_m1, "f", 0);
props = await GetObjectOnFrame(pause_location["callFrames"][1], "complex");
Assert.Equal(4, props.Count());
CheckNumber(props, "A", 10);
await CheckString(props, "B", "xx");
await CheckString(props, "c", "20_xx");
}
[Theory]
[InlineData(false)]
[InlineData(true)]
public async Task InspectLocalsInPreviousFramesDuringSteppingIn(bool use_cfo)
{
UseCallFunctionOnBeforeGetProperties = use_cfo;
var debugger_test_loc = "dotnet://debugger-test.dll/debugger-test.cs";
await SetBreakpoint(debugger_test_loc, 111, 12);
// Will stop in InnerMethod
var wait_res = await EvaluateAndCheck(
"window.setTimeout(function() { invoke_outer_method(); }, 1);",
debugger_test_loc, 111, 12, "InnerMethod",
locals_fn: async (locals) =>
{
Assert.Equal(4, locals.Count());
CheckNumber(locals, "i", 5);
CheckNumber(locals, "j", 24);
await CheckString(locals, "foo_str", "foo");
await CheckObject(locals, "this", "Math.NestedInMath");
await Task.CompletedTask;
}
);
var this_props = await GetObjectOnFrame(wait_res["callFrames"][0], "this");
Assert.Equal(2, this_props.Count());
await CheckObject(this_props, "m", "Math");
await CheckValueType(this_props, "SimpleStructProperty", "Math.SimpleStruct");
var ss_props = await GetObjectOnLocals(this_props, "SimpleStructProperty");
var dt = new DateTime(2020, 1, 2, 3, 4, 5);
await CheckProps(ss_props, new
{
dt = TDateTime(dt),
gs = TValueType("Math.GenericStruct<System.DateTime>")
}, "ss_props");
// Check OuterMethod frame
var locals_m1 = await GetLocalsForFrame(wait_res["callFrames"][1], debugger_test_loc, 87, 8, "OuterMethod");
Assert.Equal(5, locals_m1.Count());
// FIXME: Failing test CheckNumber (locals_m1, "i", 5);
// FIXME: Failing test CheckString (locals_m1, "text", "Hello");
CheckNumber(locals_m1, "new_i", 0);
CheckNumber(locals_m1, "k", 0);
await CheckObject(locals_m1, "nim", "Math.NestedInMath");
// step back into OuterMethod
await StepAndCheck(StepKind.Over, debugger_test_loc, 91, 8, "OuterMethod", times: 6,
locals_fn: async (locals) =>
{
Assert.Equal(5, locals.Count());
// FIXME: Failing test CheckNumber (locals_m1, "i", 5);
await CheckString(locals, "text", "Hello");
// FIXME: Failing test CheckNumber (locals, "new_i", 24);
CheckNumber(locals, "k", 19);
await CheckObject(locals, "nim", "Math.NestedInMath");
}
);
//await StepAndCheck (StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 81, 2, "OuterMethod", times: 2);
// step into InnerMethod2
await StepAndCheck(StepKind.Into, "dotnet://debugger-test.dll/debugger-test.cs", 96, 4, "InnerMethod2",
locals_fn: async (locals) =>
{
Assert.Equal(3, locals.Count());
await CheckString(locals, "s", "test string");
//out var: CheckNumber (locals, "k", 0);
CheckNumber(locals, "i", 24);
await Task.CompletedTask;
}
);
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 100, 4, "InnerMethod2", times: 4,
locals_fn: async (locals) =>
{
Assert.Equal(3, locals.Count());
await CheckString(locals, "s", "test string");
// FIXME: Failing test CheckNumber (locals, "k", 34);
CheckNumber(locals, "i", 24);
await Task.CompletedTask;
}
);
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 92, 8, "OuterMethod", times: 1,
locals_fn: async (locals) =>
{
Assert.Equal(5, locals.Count());
await CheckString(locals, "text", "Hello");
// FIXME: failing test CheckNumber (locals, "i", 5);
CheckNumber(locals, "new_i", 22);
CheckNumber(locals, "k", 34);
await CheckObject(locals, "nim", "Math.NestedInMath");
}
);
}
[Fact]
public async Task InspectLocalsDuringSteppingIn()
{
await SetBreakpoint("dotnet://debugger-test.dll/debugger-test.cs", 86, 8);
await EvaluateAndCheck("window.setTimeout(function() { invoke_outer_method(); }, 1);",
"dotnet://debugger-test.dll/debugger-test.cs", 86, 8, "OuterMethod",
locals_fn: async (locals) =>
{
Assert.Equal(5, locals.Count());
await CheckObject(locals, "nim", "Math.NestedInMath");
CheckNumber(locals, "i", 5);
CheckNumber(locals, "k", 0);
CheckNumber(locals, "new_i", 0);
await CheckString(locals, "text", null);
}
);
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 87, 8, "OuterMethod",
locals_fn: async (locals) =>
{
Assert.Equal(5, locals.Count());
await CheckObject(locals, "nim", "Math.NestedInMath");
// FIXME: Failing test CheckNumber (locals, "i", 5);
CheckNumber(locals, "k", 0);
CheckNumber(locals, "new_i", 0);
await CheckString(locals, "text", "Hello");
await Task.CompletedTask;
}
);
// Step into InnerMethod
await StepAndCheck(StepKind.Into, "dotnet://debugger-test.dll/debugger-test.cs", 105, 8, "InnerMethod");
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 110, 12, "InnerMethod", times: 5,
locals_fn: async (locals) =>
{
Assert.Equal(4, locals.Count());
CheckNumber(locals, "i", 5);
CheckNumber(locals, "j", 15);
await CheckString(locals, "foo_str", "foo");
await CheckObject(locals, "this", "Math.NestedInMath");
await Task.CompletedTask;
}
);
// Step back to OuterMethod
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 90, 8, "OuterMethod", times: 6,
locals_fn: async (locals) =>
{
Assert.Equal(5, locals.Count());
await CheckObject(locals, "nim", "Math.NestedInMath");
// FIXME: Failing test CheckNumber (locals, "i", 5);
CheckNumber(locals, "k", 0);
CheckNumber(locals, "new_i", 24);
await CheckString(locals, "text", "Hello");
}
);
}
[Theory]
[InlineData(false)]
[InlineData(true)]
public async Task InspectLocalsInAsyncMethods(bool use_cfo)
{
UseCallFunctionOnBeforeGetProperties = use_cfo;
var debugger_test_loc = "dotnet://debugger-test.dll/debugger-test.cs";
await SetBreakpoint(debugger_test_loc, 120, 12);
await SetBreakpoint(debugger_test_loc, 135, 12);
// Will stop in Asyncmethod0
var wait_res = await EvaluateAndCheck(
"window.setTimeout(function() { invoke_async_method_with_await(); }, 1);",
debugger_test_loc, 120, 12, "MoveNext", //FIXME:
locals_fn: async (locals) =>
{
Assert.Equal(4, locals.Count());
await CheckString(locals, "s", "string from js");
CheckNumber(locals, "i", 42);
await CheckString(locals, "local0", "value0");
await CheckObject(locals, "this", "Math.NestedInMath");
}
);
Console.WriteLine(wait_res);
#if false // Disabled for now, as we don't have proper async traces
var locals = await GetProperties(wait_res["callFrames"][2]["callFrameId"].Value<string>());
Assert.Equal(4, locals.Count());
await CheckString(locals, "ls", "string from jstest").ConfigureAwait(false);
CheckNumber(locals, "li", 52);
#endif
// TODO: previous frames have async machinery details, so no point checking that right now
var pause_loc = await SendCommandAndCheck(null, "Debugger.resume", debugger_test_loc, 135, 12, /*FIXME: "AsyncMethodNoReturn"*/ "MoveNext",
locals_fn: async (locals) =>
{
Assert.Equal(4, locals.Count());
await CheckString(locals, "str", "AsyncMethodNoReturn's local");
await CheckObject(locals, "this", "Math.NestedInMath");
//FIXME: check fields
await CheckValueType(locals, "ss", "Math.SimpleStruct");
await CheckArray(locals, "ss_arr", "Math.SimpleStruct[]", "Math.SimpleStruct[0]");
// TODO: struct fields
await Task.CompletedTask;
}
);
var this_props = await GetObjectOnFrame(pause_loc["callFrames"][0], "this");
Assert.Equal(2, this_props.Count());
await CheckObject(this_props, "m", "Math");
await CheckValueType(this_props, "SimpleStructProperty", "Math.SimpleStruct");
// TODO: Check `this` properties
}
[Theory]
[InlineData(false)]
[InlineData(true)]
public async Task InspectValueTypeMethodArgsWhileStepping(bool use_cfo)
{
UseCallFunctionOnBeforeGetProperties = use_cfo;
var debugger_test_loc = "dotnet://debugger-test.dll/debugger-valuetypes-test.cs";
await SetBreakpoint(debugger_test_loc, 36, 12);
var pause_location = await EvaluateAndCheck(
"window.setTimeout(function() { invoke_static_method ('[debugger-test] DebuggerTests.ValueTypesTest:TestStructsAsMethodArgs'); }, 1);",
debugger_test_loc, 36, 12, "MethodWithStructArgs");
var locals = await GetProperties(pause_location["callFrames"][0]["callFrameId"].Value<string>());
{
Assert.Equal(3, locals.Count());
await CheckString(locals, "label", "TestStructsAsMethodArgs#label");
await CheckValueType(locals, "ss_arg", "DebuggerTests.ValueTypesTest.SimpleStruct");
CheckNumber(locals, "x", 3);
}
var dt = new DateTime(2025, 6, 7, 8, 10, 11);
var ss_local_as_ss_arg = new
{
V = TGetter("V"),
str_member = TString("ss_local#SimpleStruct#string#0#SimpleStruct#str_member"),
dt = TDateTime(dt),
gs = TValueType("DebuggerTests.ValueTypesTest.GenericStruct<System.DateTime>"),
Kind = TEnum("System.DateTimeKind", "Local")
};
var ss_local_gs = new
{
StringField = TString("ss_local#SimpleStruct#string#0#SimpleStruct#gs#StringField"),
List = TObject("System.Collections.Generic.List<System.DateTime>", description: "Count = 1"),
Options = TEnum("DebuggerTests.Options", "Option1")
};
// Check ss_arg's properties
var ss_arg_props = await GetObjectOnFrame(pause_location["callFrames"][0], "ss_arg");
await CheckProps(ss_arg_props, ss_local_as_ss_arg, "ss_arg");
var res = await InvokeGetter(GetAndAssertObjectWithName(locals, "ss_arg"), "V");
await CheckValue(res.Value["result"], TNumber(0xDEADBEEF + (uint)dt.Month), "ss_arg#V");
{
// Check ss_local.gs
await CompareObjectPropertiesFor(ss_arg_props, "gs", ss_local_gs);
}
pause_location = await StepAndCheck(StepKind.Over, debugger_test_loc, 40, 8, "MethodWithStructArgs", times: 4,
locals_fn: async (l) => { /* non-null to make sure that locals get fetched */ await Task.CompletedTask; });
locals = await GetProperties(pause_location["callFrames"][0]["callFrameId"].Value<string>());
{
Assert.Equal(3, locals.Count());
await CheckString(locals, "label", "TestStructsAsMethodArgs#label");
await CheckValueType(locals, "ss_arg", "DebuggerTests.ValueTypesTest.SimpleStruct");
CheckNumber(locals, "x", 3);
}
var ss_arg_updated = new
{
V = TGetter("V"),
str_member = TString("ValueTypesTest#MethodWithStructArgs#updated#ss_arg#str_member"),
dt = TDateTime(dt),
gs = TValueType("DebuggerTests.ValueTypesTest.GenericStruct<System.DateTime>"),
Kind = TEnum("System.DateTimeKind", "Utc")
};
ss_arg_props = await GetObjectOnFrame(pause_location["callFrames"][0], "ss_arg");
await CheckProps(ss_arg_props, ss_arg_updated, "ss_arg");
res = await InvokeGetter(GetAndAssertObjectWithName(locals, "ss_arg"), "V");
await CheckValue(res.Value["result"], TNumber(0xDEADBEEF + (uint)dt.Month), "ss_arg#V");
{
// Check ss_local.gs
await CompareObjectPropertiesFor(ss_arg_props, "gs", new
{
StringField = TString("ValueTypesTest#MethodWithStructArgs#updated#gs#StringField#3"),
List = TObject("System.Collections.Generic.List<System.DateTime>", description: "Count = 1"),
Options = TEnum("DebuggerTests.Options", "Option1")
});
}
// Check locals on previous frame, same as earlier in this test
ss_arg_props = await GetObjectOnFrame(pause_location["callFrames"][1], "ss_local");
await CheckProps(ss_arg_props, ss_local_as_ss_arg, "ss_local");
{
// Check ss_local.dt
await CheckDateTime(ss_arg_props, "dt", dt);
// Check ss_local.gs
var gs_props = await GetObjectOnLocals(ss_arg_props, "gs");
await CheckString(gs_props, "StringField", "ss_local#SimpleStruct#string#0#SimpleStruct#gs#StringField");
await CheckObject(gs_props, "List", "System.Collections.Generic.List<System.DateTime>", description: "Count = 1");
}
// ----------- Step back to the caller ---------
pause_location = await StepAndCheck(StepKind.Over, debugger_test_loc, 30, 12, "TestStructsAsMethodArgs",
times: 1, locals_fn: async (l) => { /* non-null to make sure that locals get fetched */ await Task.CompletedTask; });
locals = await GetProperties(pause_location["callFrames"][0]["callFrameId"].Value<string>());
await CheckProps(locals, new
{
ss_local = TValueType("DebuggerTests.ValueTypesTest.SimpleStruct"),
ss_ret = TValueType("DebuggerTests.ValueTypesTest.SimpleStruct")
},
"locals#0");
ss_arg_props = await GetObjectOnFrame(pause_location["callFrames"][0], "ss_local");
await CheckProps(ss_arg_props, ss_local_as_ss_arg, "ss_local");
{
// Check ss_local.gs
await CompareObjectPropertiesFor(ss_arg_props, "gs", ss_local_gs, label: "ss_local_gs");
}
// FIXME: check ss_local.gs.List's members
}
[Fact]
public async Task CheckUpdatedValueTypeFieldsOnResume()
{
var debugger_test_loc = "dotnet://debugger-test.dll/debugger-valuetypes-test.cs";
var lines = new[] { 205, 208 };
await SetBreakpoint(debugger_test_loc, lines[0], 12);
await SetBreakpoint(debugger_test_loc, lines[1], 12);
var pause_location = await EvaluateAndCheck(
"window.setTimeout(function() { invoke_static_method ('[debugger-test] DebuggerTests.ValueTypesTest:MethodUpdatingValueTypeMembers'); }, 1);",
debugger_test_loc, lines[0], 12, "MethodUpdatingValueTypeMembers");
await CheckLocals(pause_location, new DateTime(1, 2, 3, 4, 5, 6), new DateTime(4, 5, 6, 7, 8, 9));
// Resume
pause_location = await SendCommandAndCheck(JObject.FromObject(new { }), "Debugger.resume", debugger_test_loc, lines[1], 12, "MethodUpdatingValueTypeMembers");
await CheckLocals(pause_location, new DateTime(9, 8, 7, 6, 5, 4), new DateTime(5, 1, 3, 7, 9, 10));
async Task CheckLocals(JToken pause_location, DateTime obj_dt, DateTime vt_dt)
{
var locals = await GetProperties(pause_location["callFrames"][0]["callFrameId"].Value<string>());
await CheckProps(locals, new
{
obj = TObject("DebuggerTests.ClassForToStringTests"),
vt = TObject("DebuggerTests.StructForToStringTests")
}, "locals");
var obj_props = await GetObjectOnLocals(locals, "obj");
{
await CheckProps(obj_props, new
{
DT = TDateTime(obj_dt)
}, "locals#obj.DT", num_fields: 5);
}
var vt_props = await GetObjectOnLocals(locals, "vt");
{
await CheckProps(vt_props, new
{
DT = TDateTime(vt_dt)
}, "locals#obj.DT", num_fields: 5);
}
}
}
[Fact]
public async Task CheckUpdatedValueTypeLocalsOnResumeAsync()
{
var debugger_test_loc = "dotnet://debugger-test.dll/debugger-valuetypes-test.cs";
var lines = new[] { 214, 216 };
await SetBreakpoint(debugger_test_loc, lines[0], 12);
await SetBreakpoint(debugger_test_loc, lines[1], 12);
var pause_location = await EvaluateAndCheck(
"window.setTimeout(function() { invoke_static_method ('[debugger-test] DebuggerTests.ValueTypesTest:MethodUpdatingValueTypeLocalsAsync'); }, 1);",
debugger_test_loc, lines[0], 12, "MoveNext");
var dt = new DateTime(1, 2, 3, 4, 5, 6);
var locals = await GetProperties(pause_location["callFrames"][0]["callFrameId"].Value<string>());
await CheckDateTime(locals, "dt", dt);
// Resume
dt = new DateTime(9, 8, 7, 6, 5, 4);
pause_location = await SendCommandAndCheck(JObject.FromObject(new { }), "Debugger.resume", debugger_test_loc, lines[1], 12, "MoveNext");
locals = await GetProperties(pause_location["callFrames"][0]["callFrameId"].Value<string>());
await CheckDateTime(locals, "dt", dt);
}
[Fact]
public async Task CheckUpdatedVTArrayMembersOnResume()
{
var debugger_test_loc = "dotnet://debugger-test.dll/debugger-valuetypes-test.cs";
var lines = new[] { 225, 227 };
await SetBreakpoint(debugger_test_loc, lines[0], 12);
await SetBreakpoint(debugger_test_loc, lines[1], 12);
var dt = new DateTime(1, 2, 3, 4, 5, 6);
var pause_location = await EvaluateAndCheck(
"window.setTimeout(function() { invoke_static_method ('[debugger-test] DebuggerTests.ValueTypesTest:MethodUpdatingVTArrayMembers'); }, 1);",
debugger_test_loc, lines[0], 12, "MethodUpdatingVTArrayMembers");
await CheckArrayElements(pause_location, dt);
// Resume
dt = new DateTime(9, 8, 7, 6, 5, 4);
pause_location = await SendCommandAndCheck(JObject.FromObject(new { }), "Debugger.resume", debugger_test_loc, lines[1], 12, "MethodUpdatingVTArrayMembers");
await CheckArrayElements(pause_location, dt);
async Task CheckArrayElements(JToken pause_location, DateTime dt)
{
var locals = await GetProperties(pause_location["callFrames"][0]["callFrameId"].Value<string>());
await CheckProps(locals, new
{
ssta = TArray("DebuggerTests.StructForToStringTests[]", "DebuggerTests.StructForToStringTests[1]")
}, "locals");
var ssta = await GetObjectOnLocals(locals, "ssta");
var sst0 = await GetObjectOnLocals(ssta, "0");
await CheckProps(sst0, new
{
DT = TDateTime(dt)
}, "dta [0]", num_fields: 5);
}
}
[Fact]
public async Task SteppingIntoMscorlib()
{
var bp = await SetBreakpoint("dotnet://debugger-test.dll/debugger-test.cs", 83, 8);
var pause_location = await EvaluateAndCheck(
"window.setTimeout(function() { invoke_static_method ('[debugger-test] Math:OuterMethod'); }, 1);",
"dotnet://debugger-test.dll/debugger-test.cs", 83, 8,
"OuterMethod");
//make sure we're on the right bp
Assert.Equal(bp.Value["breakpointId"]?.ToString(), pause_location["hitBreakpoints"]?[0]?.Value<string>());
pause_location = await SendCommandAndCheck(null, $"Debugger.stepInto", null, -1, -1, null);
var top_frame = pause_location["callFrames"][0];
AssertEqual("WriteLine", top_frame["functionName"]?.Value<string>(), "Expected to be in WriteLine method");
var script_id = top_frame["functionLocation"]["scriptId"].Value<string>();
Assert.Matches("^dotnet://(mscorlib|System\\.Console)\\.dll/Console.cs", scripts[script_id]);
}
[Fact]
public async Task CreateGoodBreakpointAndHitAndRemoveAndDontHit()
{
var bp = await SetBreakpoint("dotnet://debugger-test.dll/debugger-test.cs", 10, 8);
var bp2 = await SetBreakpoint("dotnet://debugger-test.dll/debugger-test.cs", 12, 8);
var pause_location = await EvaluateAndCheck(
"window.setTimeout(function() { invoke_add(); invoke_add()}, 1);",
"dotnet://debugger-test.dll/debugger-test.cs", 10, 8,
"IntAdd");
Assert.Equal("other", pause_location["reason"]?.Value<string>());
Assert.Equal(bp.Value["breakpointId"]?.ToString(), pause_location["hitBreakpoints"]?[0]?.Value<string>());
await RemoveBreakpoint(bp.Value["breakpointId"]?.ToString());
await SendCommandAndCheck(JObject.FromObject(new { }), "Debugger.resume", "dotnet://debugger-test.dll/debugger-test.cs", 12, 8, "IntAdd");
await SendCommandAndCheck(JObject.FromObject(new { }), "Debugger.resume", "dotnet://debugger-test.dll/debugger-test.cs", 12, 8, "IntAdd");
}
[Fact]
public async Task CreateGoodBreakpointAndHitAndRemoveTwice()
{
var bp = await SetBreakpoint("dotnet://debugger-test.dll/debugger-test.cs", 10, 8);
var bp2 = await SetBreakpoint("dotnet://debugger-test.dll/debugger-test.cs", 12, 8);
var pause_location = await EvaluateAndCheck(
"window.setTimeout(function() { invoke_add(); invoke_add()}, 1);",
"dotnet://debugger-test.dll/debugger-test.cs", 10, 8,
"IntAdd");
Assert.Equal("other", pause_location["reason"]?.Value<string>());
Assert.Equal(bp.Value["breakpointId"]?.ToString(), pause_location["hitBreakpoints"]?[0]?.Value<string>());
await RemoveBreakpoint(bp.Value["breakpointId"]?.ToString());
await RemoveBreakpoint(bp.Value["breakpointId"]?.ToString());
}
[Fact]
public async Task CreateGoodBreakpointAndHitAndRemoveAndDontHitAndCreateAgainAndHit()
{
var bp = await SetBreakpoint("dotnet://debugger-test.dll/debugger-test.cs", 10, 8);
var bp2 = await SetBreakpoint("dotnet://debugger-test.dll/debugger-test.cs", 12, 8);
var pause_location = await EvaluateAndCheck(
"window.setTimeout(function() { invoke_add(); invoke_add(); invoke_add(); invoke_add()}, 1);",
"dotnet://debugger-test.dll/debugger-test.cs", 10, 8,
"IntAdd");
Assert.Equal("other", pause_location["reason"]?.Value<string>());
Assert.Equal(bp.Value["breakpointId"]?.ToString(), pause_location["hitBreakpoints"]?[0]?.Value<string>());
await RemoveBreakpoint(bp.Value["breakpointId"]?.ToString());
await SendCommandAndCheck(JObject.FromObject(new { }), "Debugger.resume", "dotnet://debugger-test.dll/debugger-test.cs", 12, 8, "IntAdd");
await SendCommandAndCheck(JObject.FromObject(new { }), "Debugger.resume", "dotnet://debugger-test.dll/debugger-test.cs", 12, 8, "IntAdd");
bp = await SetBreakpoint("dotnet://debugger-test.dll/debugger-test.cs", 10, 8);
await SendCommandAndCheck(JObject.FromObject(new { }), "Debugger.resume", "dotnet://debugger-test.dll/debugger-test.cs", 10, 8, "IntAdd");
}
// [Fact]
//https://github.com/dotnet/runtime/issues/42421
public async Task BreakAfterAwaitThenStepOverTillBackToCaller()
{
var bp = await SetBreakpointInMethod("debugger-test.dll", "DebuggerTests.AsyncStepClass", "TestAsyncStepOut2", 2);
await EvaluateAndCheck(
"window.setTimeout(function() { invoke_static_method_async('[debugger-test] DebuggerTests.AsyncStepClass:TestAsyncStepOut'); }, 1);",
"dotnet://debugger-test.dll/debugger-async-step.cs", 21, 12,
"MoveNext");
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-async-step.cs", 23, 12, "MoveNext");
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-async-step.cs", 24, 8, "MoveNext");
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-async-step.cs", 15, 12, "MoveNext");
}
// [Fact]
//[ActiveIssue("https://github.com/dotnet/runtime/issues/42421")]
public async Task StepOutOfAsyncMethod()
{
string source_file = "dotnet://debugger-test.dll/debugger-async-step.cs";
await SetBreakpointInMethod("debugger-test.dll", "DebuggerTests.AsyncStepClass", "TestAsyncStepOut2", 2);
await EvaluateAndCheck(
"window.setTimeout(function() { invoke_static_method_async('[debugger-test] DebuggerTests.AsyncStepClass:TestAsyncStepOut'); }, 1);",
"dotnet://debugger-test.dll/debugger-async-step.cs", 21, 12,
"MoveNext");
await StepAndCheck(StepKind.Out, source_file, 15, 4, "TestAsyncStepOut");
}
[Fact]
public async Task ResumeOutOfAsyncMethodToAsyncCallerWithBreakpoint()
{
string source_file = "dotnet://debugger-test.dll/debugger-async-step.cs";
await SetBreakpointInMethod("debugger-test.dll", "DebuggerTests.AsyncStepClass", "TestAsyncStepOut2", 2);
await EvaluateAndCheck(
"window.setTimeout(function() { invoke_static_method_async('[debugger-test] DebuggerTests.AsyncStepClass:TestAsyncStepOut'); }, 1);",
"dotnet://debugger-test.dll/debugger-async-step.cs", 21, 12,
"MoveNext");
await SetBreakpointInMethod("debugger-test.dll", "DebuggerTests.AsyncStepClass", "TestAsyncStepOut", 2);
await SendCommandAndCheck(null, "Debugger.resume", source_file, 16, 8, "MoveNext");
}
[Fact]
public async Task StepOutOfNonAsyncMethod()
{
string source_file = "dotnet://debugger-test.dll/debugger-async-step.cs";
await SetBreakpointInMethod("debugger-test.dll", "DebuggerTests.AsyncStepClass", "OtherMethod0", 1);
await EvaluateAndCheck(
"window.setTimeout(function() { invoke_static_method_async('[debugger-test] DebuggerTests.AsyncStepClass:SimpleMethod'); }, 1);",
source_file, -1, -1,
"OtherMethod0");
await StepAndCheck(StepKind.Out, source_file, 29, 12, "SimpleMethod");
}
[Fact]
public async Task BreakOnAwaitThenStepOverToNextAwaitCall()
{
string source_file = "dotnet://debugger-test.dll/debugger-async-step.cs";
await SetBreakpointInMethod("debugger-test.dll", "DebuggerTests.AsyncStepClass", "MethodWithTwoAwaitsAsync", 2);
await EvaluateAndCheck(
"window.setTimeout(function() { invoke_static_method_async('[debugger-test] DebuggerTests.AsyncStepClass:StepOverTestAsync'); }, 1);",
"dotnet://debugger-test.dll/debugger-async-step.cs", 53, 12,
"MoveNext");
await StepAndCheck(StepKind.Over, source_file, 54, 12, "MoveNext");
}
[Fact]
public async Task BreakOnAwaitThenStepOverToNextLine()
{
string source_file = "dotnet://debugger-test.dll/debugger-async-step.cs";
await SetBreakpointInMethod("debugger-test.dll", "DebuggerTests.AsyncStepClass", "StepOverTestAsync", 1);
await EvaluateAndCheck(
"window.setTimeout(function() { invoke_static_method_async('[debugger-test] DebuggerTests.AsyncStepClass:StepOverTestAsync'); }, 1);",
"dotnet://debugger-test.dll/debugger-async-step.cs", 46, 12,
"MoveNext");
// BUG: chrome: not able to show any bp line indicator
await StepAndCheck(StepKind.Over, source_file, 47, 12, "MoveNext");
}
[Fact]
public async Task BreakOnAwaitThenResumeToNextBreakpoint()
{
string source_file = "dotnet://debugger-test.dll/debugger-async-step.cs";
await SetBreakpointInMethod("debugger-test.dll", "DebuggerTests.AsyncStepClass", "StepOverTestAsync", 1);
await SetBreakpointInMethod("debugger-test.dll", "DebuggerTests.AsyncStepClass", "StepOverTestAsync", 3);
await EvaluateAndCheck(
"window.setTimeout(function() { invoke_static_method_async('[debugger-test] DebuggerTests.AsyncStepClass:StepOverTestAsync'); }, 1);",
"dotnet://debugger-test.dll/debugger-async-step.cs", 46, 12,
"MoveNext");
await StepAndCheck(StepKind.Resume, source_file, 48, 8, "MoveNext");
}
[Fact]
public async Task BreakOnAwaitThenResumeToNextBreakpointAfterSecondAwaitInSameMethod()
{
string source_file = "dotnet://debugger-test.dll/debugger-async-step.cs";
await SetBreakpointInMethod("debugger-test.dll", "DebuggerTests.AsyncStepClass", "MethodWithTwoAwaitsAsync", 1);
await SetBreakpointInMethod("debugger-test.dll", "DebuggerTests.AsyncStepClass", "MethodWithTwoAwaitsAsync", 5);
await EvaluateAndCheck(
"window.setTimeout(function() { invoke_static_method_async('[debugger-test] DebuggerTests.AsyncStepClass:StepOverTestAsync'); }, 1);",
"dotnet://debugger-test.dll/debugger-async-step.cs", 52, 12,
"MoveNext");
await StepAndCheck(StepKind.Resume, source_file, 56, 12, "MoveNext");
}
[Fact]
public async Task BreakOnMethodCalledFromHiddenLine()
{
await SetBreakpointInMethod("debugger-test.dll", "HiddenSequencePointTest", "StepOverHiddenSP2", 0);
var pause_location = await EvaluateAndCheck(
"window.setTimeout(function() { invoke_static_method ('[debugger-test] HiddenSequencePointTest:StepOverHiddenSP'); }, 1);",
"dotnet://debugger-test.dll/debugger-test.cs", 546, 4,
"StepOverHiddenSP2");
// Check previous frame
var top_frame = pause_location["callFrames"][1];
Assert.Equal("StepOverHiddenSP", top_frame["functionName"].Value<string>());
Assert.Contains("debugger-test.cs", top_frame["url"].Value<string>());
CheckLocation("dotnet://debugger-test.dll/debugger-test.cs", 537, 8, scripts, top_frame["location"]);
}
[Fact]
public async Task StepOverHiddenLinesShouldResumeAtNextAvailableLineInTheMethod()
{
string source_loc = "dotnet://debugger-test.dll/debugger-test.cs";
await SetBreakpoint(source_loc, 537, 8);
await EvaluateAndCheck(
"window.setTimeout(function() { invoke_static_method ('[debugger-test] HiddenSequencePointTest:StepOverHiddenSP'); }, 1);",
"dotnet://debugger-test.dll/debugger-test.cs", 537, 8,
"StepOverHiddenSP");
await StepAndCheck(StepKind.Over, source_loc, 542, 8, "StepOverHiddenSP");
}
[Fact]
async Task StepOverHiddenLinesInMethodWithNoNextAvailableLineShouldResumeAtCallSite()
{
string source_loc = "dotnet://debugger-test.dll/debugger-test.cs";
await SetBreakpoint(source_loc, 552, 8);
await EvaluateAndCheck(
"window.setTimeout(function() { invoke_static_method ('[debugger-test] HiddenSequencePointTest:StepOverHiddenSP'); }, 1);",
"dotnet://debugger-test.dll/debugger-test.cs", 552, 8,
"MethodWithHiddenLinesAtTheEnd");
await StepAndCheck(StepKind.Over, source_loc, 544, 4, "StepOverHiddenSP");
}
// [Fact]
// Issue: https://github.com/dotnet/runtime/issues/42704
async Task BreakpointOnHiddenLineShouldStopAtEarliestNextAvailableLine()
{
await SetBreakpoint("dotnet://debugger-test.dll/debugger-test.cs", 539, 8);
await EvaluateAndCheck(
"window.setTimeout(function() { invoke_static_method ('[debugger-test] HiddenSequencePointTest:StepOverHiddenSP'); }, 1);",
"dotnet://debugger-test.dll/debugger-test.cs", 546, 4,
"StepOverHiddenSP2");
}
[Fact]
public async Task BreakpointOnHiddenLineOfMethodWithNoNextVisibleLineShouldNotPause()
{
await SetBreakpoint("dotnet://debugger-test.dll/debugger-test.cs", 554, 12);
string expression = "window.setTimeout(function() { invoke_static_method ('[debugger-test] HiddenSequencePointTest:StepOverHiddenSP'); }, 1);";
await cli.SendCommand($"Runtime.evaluate", JObject.FromObject(new { expression }), token);
Task pause_task = insp.WaitFor(Inspector.PAUSE);
Task t = await Task.WhenAny(pause_task, Task.Delay(2000));
Assert.True(t != pause_task, "Debugger unexpectedly paused");
}
[Fact]
public async Task SimpleStep_RegressionTest_49141()
{
await SetBreakpoint("dotnet://debugger-test.dll/debugger-test.cs", 674, 0);
string expression = "window.setTimeout(function() { invoke_static_method ('[debugger-test] Foo:RunBart'); }, 1);";
await EvaluateAndCheck(
expression,
"dotnet://debugger-test.dll/debugger-test.cs", 674, 12,
"Bart");
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 677, 8, "Bart");
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 678, 4, "Bart");
}
[Fact]
public async Task StepAndEvaluateExpression()
{
await SetBreakpoint("dotnet://debugger-test.dll/debugger-test.cs", 682, 0);
await EvaluateAndCheck(
"window.setTimeout(function() { invoke_static_method ('[debugger-test] Foo:RunBart'); }, 1);",
"dotnet://debugger-test.dll/debugger-test.cs", 682, 8,
"RunBart");
var pause_location = await StepAndCheck(StepKind.Into, "dotnet://debugger-test.dll/debugger-test.cs", 671, 4, "Bart");
var id = pause_location["callFrames"][0]["callFrameId"].Value<string>();
await EvaluateOnCallFrameAndCheck(id, ("this.Bar", TString("Same of something")));
pause_location = await StepAndCheck(StepKind.Into, "dotnet://debugger-test.dll/debugger-test.cs", 673, 8, "Bart");
id = pause_location["callFrames"][0]["callFrameId"].Value<string>();
await EvaluateOnCallFrameAndCheck(id, ("this.Bar", TString("Same of something")));
}
[Fact]
public async Task StepOverWithMoreThanOneCommandInSameLine()
{
await SetBreakpoint("dotnet://debugger-test.dll/debugger-test.cs", 693, 0);
string expression = "window.setTimeout(function() { invoke_static_method ('[debugger-test] Foo:RunBart'); }, 1);";
await EvaluateAndCheck(
expression,
"dotnet://debugger-test.dll/debugger-test.cs", 693, 8,
"OtherBar");
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 694, 8, "OtherBar");
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 696, 8, "OtherBar");
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 699, 8, "OtherBar");
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 701, 8, "OtherBar");
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 702, 4, "OtherBar");
}
[Fact]
public async Task StepOverWithMoreThanOneCommandInSameLineAsync()
{
await SetBreakpoint("dotnet://debugger-test.dll/debugger-test.cs", 710, 0);
string expression = "window.setTimeout(function() { invoke_static_method ('[debugger-test] Foo:RunBart'); }, 1);";
await EvaluateAndCheck(
expression,
"dotnet://debugger-test.dll/debugger-test.cs", 710, 8,
"MoveNext");
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 711, 8, "MoveNext");
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 713, 8, "MoveNext");
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 716, 8, "MoveNext");
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 718, 8, "MoveNext");
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 719, 8, "MoveNext");
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 720, 4, "MoveNext");
}
[Fact]
public async Task CheckResetFrameNumberForEachStep()
{
var bp_conditional = await SetBreakpointInMethod("debugger-test.dll", "SteppingInto", "MethodToStep", 1);
await EvaluateAndCheck(
"window.setTimeout(function() { invoke_static_method('[debugger-test] SteppingInto:MethodToStep'); }, 1);",
"dotnet://debugger-test.dll/debugger-test.cs",
bp_conditional.Value["locations"][0]["lineNumber"].Value<int>(),
bp_conditional.Value["locations"][0]["columnNumber"].Value<int>(),
"MethodToStep"
);
var pause_location = await StepAndCheck(StepKind.Into, "dotnet://debugger-test.dll/debugger-test.cs", 799, 4, "Increment");
pause_location = await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 800, 8, "Increment");
Assert.Equal(pause_location["callFrames"][0]["callFrameId"], "dotnet:scope:1");
pause_location = await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 801, 8, "Increment");
Assert.Equal(pause_location["callFrames"][0]["callFrameId"], "dotnet:scope:1");
pause_location = await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 806, 8, "Increment");
Assert.Equal(pause_location["callFrames"][0]["callFrameId"], "dotnet:scope:1");
}
[Fact]
public async Task DebuggerHiddenIgnoreStepInto()
{
var pause_location = await SetBreakpointInMethod("debugger-test.dll", "DebuggerAttribute", "RunDebuggerHidden", 1);
await EvaluateAndCheck(
"window.setTimeout(function() { invoke_static_method('[debugger-test] DebuggerAttribute:RunDebuggerHidden'); }, 1);",
"dotnet://debugger-test.dll/debugger-test.cs",
pause_location.Value["locations"][0]["lineNumber"].Value<int>(),
pause_location.Value["locations"][0]["columnNumber"].Value<int>(),
"RunDebuggerHidden"
);
var step_into = await SendCommandAndCheck(null, $"Debugger.stepInto", null, -1, -1, null);
Assert.Equal(
step_into["callFrames"][0]["location"]["lineNumber"].Value<int>(),
pause_location.Value["locations"][0]["lineNumber"].Value<int>() + 1
);
}
[Theory]
[InlineData("Debugger.stepInto")]
[InlineData("Debugger.stepOver")]
public async Task DebuggerHiddenIgnoreStepUserBreakpoint(string steppingFunction)
{
var pause_location = await SetBreakpointInMethod("debugger-test.dll", "DebuggerAttribute", "RunDebuggerHidden", 1);
await EvaluateAndCheck(
"window.setTimeout(function() { invoke_static_method('[debugger-test] DebuggerAttribute:RunDebuggerHidden'); }, 1);",
"dotnet://debugger-test.dll/debugger-test.cs",
pause_location.Value["locations"][0]["lineNumber"].Value<int>(),
pause_location.Value["locations"][0]["columnNumber"].Value<int>(),
"RunDebuggerHidden"
);
// stepOver HiddenMethod:
var step_into1 = await SendCommandAndCheck(null, steppingFunction, null, -1, -1, null);
Assert.Equal(
pause_location.Value["locations"][0]["lineNumber"].Value<int>() + 1,
step_into1["callFrames"][0]["location"]["lineNumber"].Value<int>()
);
// freeze on HiddenMethodUserBreak:
var step_into2 = await SendCommandAndCheck(null, steppingFunction, null, -1, -1, null);
Assert.Equal(
pause_location.Value["locations"][0]["lineNumber"].Value<int>() + 1,
step_into2["callFrames"][0]["location"]["lineNumber"].Value<int>()
);
}
}
}
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using System.Linq;
using System.Threading.Tasks;
using Newtonsoft.Json.Linq;
using Xunit;
namespace DebuggerTests
{
public class SteppingTests : DebuggerTestBase
{
[Fact]
public async Task TrivalStepping()
{
var bp = await SetBreakpoint("dotnet://debugger-test.dll/debugger-test.cs", 10, 8);
await EvaluateAndCheck(
"window.setTimeout(function() { invoke_add(); }, 1);",
"dotnet://debugger-test.dll/debugger-test.cs", 10, 8,
"IntAdd",
wait_for_event_fn: (pause_location) =>
{
//make sure we're on the right bp
Assert.Equal(bp.Value["breakpointId"]?.ToString(), pause_location["hitBreakpoints"]?[0]?.Value<string>());
var top_frame = pause_location["callFrames"][0];
CheckLocation("dotnet://debugger-test.dll/debugger-test.cs", 8, 4, scripts, top_frame["functionLocation"]);
return Task.CompletedTask;
}
);
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 11, 8, "IntAdd",
wait_for_event_fn: (pause_location) =>
{
var top_frame = pause_location["callFrames"][0];
CheckLocation("dotnet://debugger-test.dll/debugger-test.cs", 8, 4, scripts, top_frame["functionLocation"]);
return Task.CompletedTask;
}
);
}
[Fact]
public async Task InspectLocalsDuringStepping()
{
var debugger_test_loc = "dotnet://debugger-test.dll/debugger-test.cs";
await SetBreakpoint(debugger_test_loc, 10, 8);
await EvaluateAndCheck(
"window.setTimeout(function() { invoke_add(); }, 1);",
debugger_test_loc, 10, 8, "IntAdd",
locals_fn: async (locals) =>
{
CheckNumber(locals, "a", 10);
CheckNumber(locals, "b", 20);
CheckNumber(locals, "c", 30);
CheckNumber(locals, "d", 0);
CheckNumber(locals, "e", 0);
await Task.CompletedTask;
}
);
await StepAndCheck(StepKind.Over, debugger_test_loc, 11, 8, "IntAdd",
locals_fn: async (locals) =>
{
CheckNumber(locals, "a", 10);
CheckNumber(locals, "b", 20);
CheckNumber(locals, "c", 30);
CheckNumber(locals, "d", 50);
CheckNumber(locals, "e", 0);
await Task.CompletedTask;
}
);
//step and get locals
await StepAndCheck(StepKind.Over, debugger_test_loc, 12, 8, "IntAdd",
locals_fn: async (locals) =>
{
CheckNumber(locals, "a", 10);
CheckNumber(locals, "b", 20);
CheckNumber(locals, "c", 30);
CheckNumber(locals, "d", 50);
CheckNumber(locals, "e", 60);
await Task.CompletedTask;
}
);
}
[Theory]
[InlineData(false)]
[InlineData(true)]
public async Task InspectLocalsInPreviousFramesDuringSteppingIn2(bool use_cfo)
{
UseCallFunctionOnBeforeGetProperties = use_cfo;
var dep_cs_loc = "dotnet://debugger-test.dll/dependency.cs";
await SetBreakpoint(dep_cs_loc, 35, 8);
var debugger_test_loc = "dotnet://debugger-test.dll/debugger-test.cs";
// Will stop in Complex.DoEvenMoreStuff
var pause_location = await EvaluateAndCheck(
"window.setTimeout(function() { invoke_use_complex (); }, 1);",
dep_cs_loc, 35, 8, "DoEvenMoreStuff",
locals_fn: async (locals) =>
{
Assert.Single(locals);
await CheckObject(locals, "this", "Simple.Complex");
}
);
var props = await GetObjectOnFrame(pause_location["callFrames"][0], "this");
Assert.Equal(4, props.Count());
CheckNumber(props, "A", 10);
await CheckString(props, "B", "xx");
await CheckString(props, "c", "20_xx");
// Check UseComplex frame
var locals_m1 = await GetLocalsForFrame(pause_location["callFrames"][3], debugger_test_loc, 23, 8, "UseComplex");
Assert.Equal(7, locals_m1.Count());
CheckNumber(locals_m1, "a", 10);
CheckNumber(locals_m1, "b", 20);
await CheckObject(locals_m1, "complex", "Simple.Complex");
CheckNumber(locals_m1, "c", 30);
CheckNumber(locals_m1, "d", 50);
CheckNumber(locals_m1, "e", 60);
CheckNumber(locals_m1, "f", 0);
props = await GetObjectOnFrame(pause_location["callFrames"][3], "complex");
Assert.Equal(4, props.Count());
CheckNumber(props, "A", 10);
await CheckString(props, "B", "xx");
await CheckString(props, "c", "20_xx");
pause_location = await StepAndCheck(StepKind.Over, dep_cs_loc, 25, 8, "DoStuff", times: 2);
// Check UseComplex frame again
locals_m1 = await GetLocalsForFrame(pause_location["callFrames"][1], debugger_test_loc, 23, 8, "UseComplex");
Assert.Equal(7, locals_m1.Count());
CheckNumber(locals_m1, "a", 10);
CheckNumber(locals_m1, "b", 20);
await CheckObject(locals_m1, "complex", "Simple.Complex");
CheckNumber(locals_m1, "c", 30);
CheckNumber(locals_m1, "d", 50);
CheckNumber(locals_m1, "e", 60);
CheckNumber(locals_m1, "f", 0);
props = await GetObjectOnFrame(pause_location["callFrames"][1], "complex");
Assert.Equal(4, props.Count());
CheckNumber(props, "A", 10);
await CheckString(props, "B", "xx");
await CheckString(props, "c", "20_xx");
}
[Theory]
[InlineData(false)]
[InlineData(true)]
public async Task InspectLocalsInPreviousFramesDuringSteppingIn(bool use_cfo)
{
UseCallFunctionOnBeforeGetProperties = use_cfo;
var debugger_test_loc = "dotnet://debugger-test.dll/debugger-test.cs";
await SetBreakpoint(debugger_test_loc, 111, 12);
// Will stop in InnerMethod
var wait_res = await EvaluateAndCheck(
"window.setTimeout(function() { invoke_outer_method(); }, 1);",
debugger_test_loc, 111, 12, "InnerMethod",
locals_fn: async (locals) =>
{
Assert.Equal(4, locals.Count());
CheckNumber(locals, "i", 5);
CheckNumber(locals, "j", 24);
await CheckString(locals, "foo_str", "foo");
await CheckObject(locals, "this", "Math.NestedInMath");
await Task.CompletedTask;
}
);
var this_props = await GetObjectOnFrame(wait_res["callFrames"][0], "this");
Assert.Equal(2, this_props.Count());
await CheckObject(this_props, "m", "Math");
await CheckValueType(this_props, "SimpleStructProperty", "Math.SimpleStruct");
var ss_props = await GetObjectOnLocals(this_props, "SimpleStructProperty");
var dt = new DateTime(2020, 1, 2, 3, 4, 5);
await CheckProps(ss_props, new
{
dt = TDateTime(dt),
gs = TValueType("Math.GenericStruct<System.DateTime>")
}, "ss_props");
// Check OuterMethod frame
var locals_m1 = await GetLocalsForFrame(wait_res["callFrames"][1], debugger_test_loc, 87, 8, "OuterMethod");
Assert.Equal(5, locals_m1.Count());
// FIXME: Failing test CheckNumber (locals_m1, "i", 5);
// FIXME: Failing test CheckString (locals_m1, "text", "Hello");
CheckNumber(locals_m1, "new_i", 0);
CheckNumber(locals_m1, "k", 0);
await CheckObject(locals_m1, "nim", "Math.NestedInMath");
// step back into OuterMethod
await StepAndCheck(StepKind.Over, debugger_test_loc, 91, 8, "OuterMethod", times: 6,
locals_fn: async (locals) =>
{
Assert.Equal(5, locals.Count());
// FIXME: Failing test CheckNumber (locals_m1, "i", 5);
await CheckString(locals, "text", "Hello");
// FIXME: Failing test CheckNumber (locals, "new_i", 24);
CheckNumber(locals, "k", 19);
await CheckObject(locals, "nim", "Math.NestedInMath");
}
);
//await StepAndCheck (StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 81, 2, "OuterMethod", times: 2);
// step into InnerMethod2
await StepAndCheck(StepKind.Into, "dotnet://debugger-test.dll/debugger-test.cs", 96, 4, "InnerMethod2",
locals_fn: async (locals) =>
{
Assert.Equal(3, locals.Count());
await CheckString(locals, "s", "test string");
//out var: CheckNumber (locals, "k", 0);
CheckNumber(locals, "i", 24);
await Task.CompletedTask;
}
);
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 100, 4, "InnerMethod2", times: 4,
locals_fn: async (locals) =>
{
Assert.Equal(3, locals.Count());
await CheckString(locals, "s", "test string");
// FIXME: Failing test CheckNumber (locals, "k", 34);
CheckNumber(locals, "i", 24);
await Task.CompletedTask;
}
);
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 92, 8, "OuterMethod", times: 1,
locals_fn: async (locals) =>
{
Assert.Equal(5, locals.Count());
await CheckString(locals, "text", "Hello");
// FIXME: failing test CheckNumber (locals, "i", 5);
CheckNumber(locals, "new_i", 22);
CheckNumber(locals, "k", 34);
await CheckObject(locals, "nim", "Math.NestedInMath");
}
);
}
[Fact]
public async Task InspectLocalsDuringSteppingIn()
{
await SetBreakpoint("dotnet://debugger-test.dll/debugger-test.cs", 86, 8);
await EvaluateAndCheck("window.setTimeout(function() { invoke_outer_method(); }, 1);",
"dotnet://debugger-test.dll/debugger-test.cs", 86, 8, "OuterMethod",
locals_fn: async (locals) =>
{
Assert.Equal(5, locals.Count());
await CheckObject(locals, "nim", "Math.NestedInMath");
CheckNumber(locals, "i", 5);
CheckNumber(locals, "k", 0);
CheckNumber(locals, "new_i", 0);
await CheckString(locals, "text", null);
}
);
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 87, 8, "OuterMethod",
locals_fn: async (locals) =>
{
Assert.Equal(5, locals.Count());
await CheckObject(locals, "nim", "Math.NestedInMath");
// FIXME: Failing test CheckNumber (locals, "i", 5);
CheckNumber(locals, "k", 0);
CheckNumber(locals, "new_i", 0);
await CheckString(locals, "text", "Hello");
await Task.CompletedTask;
}
);
// Step into InnerMethod
await StepAndCheck(StepKind.Into, "dotnet://debugger-test.dll/debugger-test.cs", 105, 8, "InnerMethod");
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 110, 12, "InnerMethod", times: 5,
locals_fn: async (locals) =>
{
Assert.Equal(4, locals.Count());
CheckNumber(locals, "i", 5);
CheckNumber(locals, "j", 15);
await CheckString(locals, "foo_str", "foo");
await CheckObject(locals, "this", "Math.NestedInMath");
await Task.CompletedTask;
}
);
// Step back to OuterMethod
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 90, 8, "OuterMethod", times: 6,
locals_fn: async (locals) =>
{
Assert.Equal(5, locals.Count());
await CheckObject(locals, "nim", "Math.NestedInMath");
// FIXME: Failing test CheckNumber (locals, "i", 5);
CheckNumber(locals, "k", 0);
CheckNumber(locals, "new_i", 24);
await CheckString(locals, "text", "Hello");
}
);
}
[Theory]
[InlineData(false)]
[InlineData(true)]
public async Task InspectLocalsInAsyncMethods(bool use_cfo)
{
UseCallFunctionOnBeforeGetProperties = use_cfo;
var debugger_test_loc = "dotnet://debugger-test.dll/debugger-test.cs";
await SetBreakpoint(debugger_test_loc, 120, 12);
await SetBreakpoint(debugger_test_loc, 135, 12);
// Will stop in Asyncmethod0
var wait_res = await EvaluateAndCheck(
"window.setTimeout(function() { invoke_async_method_with_await(); }, 1);",
debugger_test_loc, 120, 12, "MoveNext", //FIXME:
locals_fn: async (locals) =>
{
Assert.Equal(4, locals.Count());
await CheckString(locals, "s", "string from js");
CheckNumber(locals, "i", 42);
await CheckString(locals, "local0", "value0");
await CheckObject(locals, "this", "Math.NestedInMath");
}
);
Console.WriteLine(wait_res);
#if false // Disabled for now, as we don't have proper async traces
var locals = await GetProperties(wait_res["callFrames"][2]["callFrameId"].Value<string>());
Assert.Equal(4, locals.Count());
await CheckString(locals, "ls", "string from jstest").ConfigureAwait(false);
CheckNumber(locals, "li", 52);
#endif
// TODO: previous frames have async machinery details, so no point checking that right now
var pause_loc = await SendCommandAndCheck(null, "Debugger.resume", debugger_test_loc, 135, 12, /*FIXME: "AsyncMethodNoReturn"*/ "MoveNext",
locals_fn: async (locals) =>
{
Assert.Equal(4, locals.Count());
await CheckString(locals, "str", "AsyncMethodNoReturn's local");
await CheckObject(locals, "this", "Math.NestedInMath");
//FIXME: check fields
await CheckValueType(locals, "ss", "Math.SimpleStruct");
await CheckArray(locals, "ss_arr", "Math.SimpleStruct[]", "Math.SimpleStruct[0]");
// TODO: struct fields
await Task.CompletedTask;
}
);
var this_props = await GetObjectOnFrame(pause_loc["callFrames"][0], "this");
Assert.Equal(2, this_props.Count());
await CheckObject(this_props, "m", "Math");
await CheckValueType(this_props, "SimpleStructProperty", "Math.SimpleStruct");
// TODO: Check `this` properties
}
[Theory]
[InlineData(false)]
[InlineData(true)]
public async Task InspectValueTypeMethodArgsWhileStepping(bool use_cfo)
{
UseCallFunctionOnBeforeGetProperties = use_cfo;
var debugger_test_loc = "dotnet://debugger-test.dll/debugger-valuetypes-test.cs";
await SetBreakpoint(debugger_test_loc, 36, 12);
var pause_location = await EvaluateAndCheck(
"window.setTimeout(function() { invoke_static_method ('[debugger-test] DebuggerTests.ValueTypesTest:TestStructsAsMethodArgs'); }, 1);",
debugger_test_loc, 36, 12, "MethodWithStructArgs");
var locals = await GetProperties(pause_location["callFrames"][0]["callFrameId"].Value<string>());
{
Assert.Equal(3, locals.Count());
await CheckString(locals, "label", "TestStructsAsMethodArgs#label");
await CheckValueType(locals, "ss_arg", "DebuggerTests.ValueTypesTest.SimpleStruct");
CheckNumber(locals, "x", 3);
}
var dt = new DateTime(2025, 6, 7, 8, 10, 11);
var ss_local_as_ss_arg = new
{
V = TGetter("V"),
str_member = TString("ss_local#SimpleStruct#string#0#SimpleStruct#str_member"),
dt = TDateTime(dt),
gs = TValueType("DebuggerTests.ValueTypesTest.GenericStruct<System.DateTime>"),
Kind = TEnum("System.DateTimeKind", "Local")
};
var ss_local_gs = new
{
StringField = TString("ss_local#SimpleStruct#string#0#SimpleStruct#gs#StringField"),
List = TObject("System.Collections.Generic.List<System.DateTime>", description: "Count = 1"),
Options = TEnum("DebuggerTests.Options", "Option1")
};
// Check ss_arg's properties
var ss_arg_props = await GetObjectOnFrame(pause_location["callFrames"][0], "ss_arg");
await CheckProps(ss_arg_props, ss_local_as_ss_arg, "ss_arg");
var res = await InvokeGetter(GetAndAssertObjectWithName(locals, "ss_arg"), "V");
await CheckValue(res.Value["result"], TNumber(0xDEADBEEF + (uint)dt.Month), "ss_arg#V");
{
// Check ss_local.gs
await CompareObjectPropertiesFor(ss_arg_props, "gs", ss_local_gs);
}
pause_location = await StepAndCheck(StepKind.Over, debugger_test_loc, 40, 8, "MethodWithStructArgs", times: 4,
locals_fn: async (l) => { /* non-null to make sure that locals get fetched */ await Task.CompletedTask; });
locals = await GetProperties(pause_location["callFrames"][0]["callFrameId"].Value<string>());
{
Assert.Equal(3, locals.Count());
await CheckString(locals, "label", "TestStructsAsMethodArgs#label");
await CheckValueType(locals, "ss_arg", "DebuggerTests.ValueTypesTest.SimpleStruct");
CheckNumber(locals, "x", 3);
}
var ss_arg_updated = new
{
V = TGetter("V"),
str_member = TString("ValueTypesTest#MethodWithStructArgs#updated#ss_arg#str_member"),
dt = TDateTime(dt),
gs = TValueType("DebuggerTests.ValueTypesTest.GenericStruct<System.DateTime>"),
Kind = TEnum("System.DateTimeKind", "Utc")
};
ss_arg_props = await GetObjectOnFrame(pause_location["callFrames"][0], "ss_arg");
await CheckProps(ss_arg_props, ss_arg_updated, "ss_arg");
res = await InvokeGetter(GetAndAssertObjectWithName(locals, "ss_arg"), "V");
await CheckValue(res.Value["result"], TNumber(0xDEADBEEF + (uint)dt.Month), "ss_arg#V");
{
// Check ss_local.gs
await CompareObjectPropertiesFor(ss_arg_props, "gs", new
{
StringField = TString("ValueTypesTest#MethodWithStructArgs#updated#gs#StringField#3"),
List = TObject("System.Collections.Generic.List<System.DateTime>", description: "Count = 1"),
Options = TEnum("DebuggerTests.Options", "Option1")
});
}
// Check locals on previous frame, same as earlier in this test
ss_arg_props = await GetObjectOnFrame(pause_location["callFrames"][1], "ss_local");
await CheckProps(ss_arg_props, ss_local_as_ss_arg, "ss_local");
{
// Check ss_local.dt
await CheckDateTime(ss_arg_props, "dt", dt);
// Check ss_local.gs
var gs_props = await GetObjectOnLocals(ss_arg_props, "gs");
await CheckString(gs_props, "StringField", "ss_local#SimpleStruct#string#0#SimpleStruct#gs#StringField");
await CheckObject(gs_props, "List", "System.Collections.Generic.List<System.DateTime>", description: "Count = 1");
}
// ----------- Step back to the caller ---------
pause_location = await StepAndCheck(StepKind.Over, debugger_test_loc, 30, 12, "TestStructsAsMethodArgs",
times: 1, locals_fn: async (l) => { /* non-null to make sure that locals get fetched */ await Task.CompletedTask; });
locals = await GetProperties(pause_location["callFrames"][0]["callFrameId"].Value<string>());
await CheckProps(locals, new
{
ss_local = TValueType("DebuggerTests.ValueTypesTest.SimpleStruct"),
ss_ret = TValueType("DebuggerTests.ValueTypesTest.SimpleStruct")
},
"locals#0");
ss_arg_props = await GetObjectOnFrame(pause_location["callFrames"][0], "ss_local");
await CheckProps(ss_arg_props, ss_local_as_ss_arg, "ss_local");
{
// Check ss_local.gs
await CompareObjectPropertiesFor(ss_arg_props, "gs", ss_local_gs, label: "ss_local_gs");
}
// FIXME: check ss_local.gs.List's members
}
[Fact]
public async Task CheckUpdatedValueTypeFieldsOnResume()
{
var debugger_test_loc = "dotnet://debugger-test.dll/debugger-valuetypes-test.cs";
var lines = new[] { 205, 208 };
await SetBreakpoint(debugger_test_loc, lines[0], 12);
await SetBreakpoint(debugger_test_loc, lines[1], 12);
var pause_location = await EvaluateAndCheck(
"window.setTimeout(function() { invoke_static_method ('[debugger-test] DebuggerTests.ValueTypesTest:MethodUpdatingValueTypeMembers'); }, 1);",
debugger_test_loc, lines[0], 12, "MethodUpdatingValueTypeMembers");
await CheckLocals(pause_location, new DateTime(1, 2, 3, 4, 5, 6), new DateTime(4, 5, 6, 7, 8, 9));
// Resume
pause_location = await SendCommandAndCheck(JObject.FromObject(new { }), "Debugger.resume", debugger_test_loc, lines[1], 12, "MethodUpdatingValueTypeMembers");
await CheckLocals(pause_location, new DateTime(9, 8, 7, 6, 5, 4), new DateTime(5, 1, 3, 7, 9, 10));
async Task CheckLocals(JToken pause_location, DateTime obj_dt, DateTime vt_dt)
{
var locals = await GetProperties(pause_location["callFrames"][0]["callFrameId"].Value<string>());
await CheckProps(locals, new
{
obj = TObject("DebuggerTests.ClassForToStringTests"),
vt = TObject("DebuggerTests.StructForToStringTests")
}, "locals");
var obj_props = await GetObjectOnLocals(locals, "obj");
{
await CheckProps(obj_props, new
{
DT = TDateTime(obj_dt)
}, "locals#obj.DT", num_fields: 5);
}
var vt_props = await GetObjectOnLocals(locals, "vt");
{
await CheckProps(vt_props, new
{
DT = TDateTime(vt_dt)
}, "locals#obj.DT", num_fields: 5);
}
}
}
[Fact]
public async Task CheckUpdatedValueTypeLocalsOnResumeAsync()
{
var debugger_test_loc = "dotnet://debugger-test.dll/debugger-valuetypes-test.cs";
var lines = new[] { 214, 216 };
await SetBreakpoint(debugger_test_loc, lines[0], 12);
await SetBreakpoint(debugger_test_loc, lines[1], 12);
var pause_location = await EvaluateAndCheck(
"window.setTimeout(function() { invoke_static_method ('[debugger-test] DebuggerTests.ValueTypesTest:MethodUpdatingValueTypeLocalsAsync'); }, 1);",
debugger_test_loc, lines[0], 12, "MoveNext");
var dt = new DateTime(1, 2, 3, 4, 5, 6);
var locals = await GetProperties(pause_location["callFrames"][0]["callFrameId"].Value<string>());
await CheckDateTime(locals, "dt", dt);
// Resume
dt = new DateTime(9, 8, 7, 6, 5, 4);
pause_location = await SendCommandAndCheck(JObject.FromObject(new { }), "Debugger.resume", debugger_test_loc, lines[1], 12, "MoveNext");
locals = await GetProperties(pause_location["callFrames"][0]["callFrameId"].Value<string>());
await CheckDateTime(locals, "dt", dt);
}
[Fact]
public async Task CheckUpdatedVTArrayMembersOnResume()
{
var debugger_test_loc = "dotnet://debugger-test.dll/debugger-valuetypes-test.cs";
var lines = new[] { 225, 227 };
await SetBreakpoint(debugger_test_loc, lines[0], 12);
await SetBreakpoint(debugger_test_loc, lines[1], 12);
var dt = new DateTime(1, 2, 3, 4, 5, 6);
var pause_location = await EvaluateAndCheck(
"window.setTimeout(function() { invoke_static_method ('[debugger-test] DebuggerTests.ValueTypesTest:MethodUpdatingVTArrayMembers'); }, 1);",
debugger_test_loc, lines[0], 12, "MethodUpdatingVTArrayMembers");
await CheckArrayElements(pause_location, dt);
// Resume
dt = new DateTime(9, 8, 7, 6, 5, 4);
pause_location = await SendCommandAndCheck(JObject.FromObject(new { }), "Debugger.resume", debugger_test_loc, lines[1], 12, "MethodUpdatingVTArrayMembers");
await CheckArrayElements(pause_location, dt);
async Task CheckArrayElements(JToken pause_location, DateTime dt)
{
var locals = await GetProperties(pause_location["callFrames"][0]["callFrameId"].Value<string>());
await CheckProps(locals, new
{
ssta = TArray("DebuggerTests.StructForToStringTests[]", "DebuggerTests.StructForToStringTests[1]")
}, "locals");
var ssta = await GetObjectOnLocals(locals, "ssta");
var sst0 = await GetObjectOnLocals(ssta, "0");
await CheckProps(sst0, new
{
DT = TDateTime(dt)
}, "dta [0]", num_fields: 5);
}
}
[Fact]
public async Task SteppingIntoMscorlib()
{
var bp = await SetBreakpoint("dotnet://debugger-test.dll/debugger-test.cs", 83, 8);
var pause_location = await EvaluateAndCheck(
"window.setTimeout(function() { invoke_static_method ('[debugger-test] Math:OuterMethod'); }, 1);",
"dotnet://debugger-test.dll/debugger-test.cs", 83, 8,
"OuterMethod");
//make sure we're on the right bp
Assert.Equal(bp.Value["breakpointId"]?.ToString(), pause_location["hitBreakpoints"]?[0]?.Value<string>());
pause_location = await SendCommandAndCheck(null, $"Debugger.stepInto", null, -1, -1, null);
var top_frame = pause_location["callFrames"][0];
AssertEqual("WriteLine", top_frame["functionName"]?.Value<string>(), "Expected to be in WriteLine method");
var script_id = top_frame["functionLocation"]["scriptId"].Value<string>();
Assert.Matches("^dotnet://(mscorlib|System\\.Console)\\.dll/Console.cs", scripts[script_id]);
}
[Fact]
public async Task CreateGoodBreakpointAndHitAndRemoveAndDontHit()
{
var bp = await SetBreakpoint("dotnet://debugger-test.dll/debugger-test.cs", 10, 8);
var bp2 = await SetBreakpoint("dotnet://debugger-test.dll/debugger-test.cs", 12, 8);
var pause_location = await EvaluateAndCheck(
"window.setTimeout(function() { invoke_add(); invoke_add()}, 1);",
"dotnet://debugger-test.dll/debugger-test.cs", 10, 8,
"IntAdd");
Assert.Equal("other", pause_location["reason"]?.Value<string>());
Assert.Equal(bp.Value["breakpointId"]?.ToString(), pause_location["hitBreakpoints"]?[0]?.Value<string>());
await RemoveBreakpoint(bp.Value["breakpointId"]?.ToString());
await SendCommandAndCheck(JObject.FromObject(new { }), "Debugger.resume", "dotnet://debugger-test.dll/debugger-test.cs", 12, 8, "IntAdd");
await SendCommandAndCheck(JObject.FromObject(new { }), "Debugger.resume", "dotnet://debugger-test.dll/debugger-test.cs", 12, 8, "IntAdd");
}
[Fact]
public async Task CreateGoodBreakpointAndHitAndRemoveTwice()
{
var bp = await SetBreakpoint("dotnet://debugger-test.dll/debugger-test.cs", 10, 8);
var bp2 = await SetBreakpoint("dotnet://debugger-test.dll/debugger-test.cs", 12, 8);
var pause_location = await EvaluateAndCheck(
"window.setTimeout(function() { invoke_add(); invoke_add()}, 1);",
"dotnet://debugger-test.dll/debugger-test.cs", 10, 8,
"IntAdd");
Assert.Equal("other", pause_location["reason"]?.Value<string>());
Assert.Equal(bp.Value["breakpointId"]?.ToString(), pause_location["hitBreakpoints"]?[0]?.Value<string>());
await RemoveBreakpoint(bp.Value["breakpointId"]?.ToString());
await RemoveBreakpoint(bp.Value["breakpointId"]?.ToString());
}
[Fact]
public async Task CreateGoodBreakpointAndHitAndRemoveAndDontHitAndCreateAgainAndHit()
{
var bp = await SetBreakpoint("dotnet://debugger-test.dll/debugger-test.cs", 10, 8);
var bp2 = await SetBreakpoint("dotnet://debugger-test.dll/debugger-test.cs", 12, 8);
var pause_location = await EvaluateAndCheck(
"window.setTimeout(function() { invoke_add(); invoke_add(); invoke_add(); invoke_add()}, 1);",
"dotnet://debugger-test.dll/debugger-test.cs", 10, 8,
"IntAdd");
Assert.Equal("other", pause_location["reason"]?.Value<string>());
Assert.Equal(bp.Value["breakpointId"]?.ToString(), pause_location["hitBreakpoints"]?[0]?.Value<string>());
await RemoveBreakpoint(bp.Value["breakpointId"]?.ToString());
await SendCommandAndCheck(JObject.FromObject(new { }), "Debugger.resume", "dotnet://debugger-test.dll/debugger-test.cs", 12, 8, "IntAdd");
await SendCommandAndCheck(JObject.FromObject(new { }), "Debugger.resume", "dotnet://debugger-test.dll/debugger-test.cs", 12, 8, "IntAdd");
bp = await SetBreakpoint("dotnet://debugger-test.dll/debugger-test.cs", 10, 8);
await SendCommandAndCheck(JObject.FromObject(new { }), "Debugger.resume", "dotnet://debugger-test.dll/debugger-test.cs", 10, 8, "IntAdd");
}
// [Fact]
//https://github.com/dotnet/runtime/issues/42421
public async Task BreakAfterAwaitThenStepOverTillBackToCaller()
{
var bp = await SetBreakpointInMethod("debugger-test.dll", "DebuggerTests.AsyncStepClass", "TestAsyncStepOut2", 2);
await EvaluateAndCheck(
"window.setTimeout(function() { invoke_static_method_async('[debugger-test] DebuggerTests.AsyncStepClass:TestAsyncStepOut'); }, 1);",
"dotnet://debugger-test.dll/debugger-async-step.cs", 21, 12,
"MoveNext");
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-async-step.cs", 23, 12, "MoveNext");
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-async-step.cs", 24, 8, "MoveNext");
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-async-step.cs", 15, 12, "MoveNext");
}
// [Fact]
//[ActiveIssue("https://github.com/dotnet/runtime/issues/42421")]
public async Task StepOutOfAsyncMethod()
{
string source_file = "dotnet://debugger-test.dll/debugger-async-step.cs";
await SetBreakpointInMethod("debugger-test.dll", "DebuggerTests.AsyncStepClass", "TestAsyncStepOut2", 2);
await EvaluateAndCheck(
"window.setTimeout(function() { invoke_static_method_async('[debugger-test] DebuggerTests.AsyncStepClass:TestAsyncStepOut'); }, 1);",
"dotnet://debugger-test.dll/debugger-async-step.cs", 21, 12,
"MoveNext");
await StepAndCheck(StepKind.Out, source_file, 15, 4, "TestAsyncStepOut");
}
[Fact]
public async Task ResumeOutOfAsyncMethodToAsyncCallerWithBreakpoint()
{
string source_file = "dotnet://debugger-test.dll/debugger-async-step.cs";
await SetBreakpointInMethod("debugger-test.dll", "DebuggerTests.AsyncStepClass", "TestAsyncStepOut2", 2);
await EvaluateAndCheck(
"window.setTimeout(function() { invoke_static_method_async('[debugger-test] DebuggerTests.AsyncStepClass:TestAsyncStepOut'); }, 1);",
"dotnet://debugger-test.dll/debugger-async-step.cs", 21, 12,
"MoveNext");
await SetBreakpointInMethod("debugger-test.dll", "DebuggerTests.AsyncStepClass", "TestAsyncStepOut", 2);
await SendCommandAndCheck(null, "Debugger.resume", source_file, 16, 8, "MoveNext");
}
[Fact]
public async Task StepOutOfNonAsyncMethod()
{
string source_file = "dotnet://debugger-test.dll/debugger-async-step.cs";
await SetBreakpointInMethod("debugger-test.dll", "DebuggerTests.AsyncStepClass", "OtherMethod0", 1);
await EvaluateAndCheck(
"window.setTimeout(function() { invoke_static_method_async('[debugger-test] DebuggerTests.AsyncStepClass:SimpleMethod'); }, 1);",
source_file, -1, -1,
"OtherMethod0");
await StepAndCheck(StepKind.Out, source_file, 29, 12, "SimpleMethod");
}
[Fact]
public async Task BreakOnAwaitThenStepOverToNextAwaitCall()
{
string source_file = "dotnet://debugger-test.dll/debugger-async-step.cs";
await SetBreakpointInMethod("debugger-test.dll", "DebuggerTests.AsyncStepClass", "MethodWithTwoAwaitsAsync", 2);
await EvaluateAndCheck(
"window.setTimeout(function() { invoke_static_method_async('[debugger-test] DebuggerTests.AsyncStepClass:StepOverTestAsync'); }, 1);",
"dotnet://debugger-test.dll/debugger-async-step.cs", 53, 12,
"MoveNext");
await StepAndCheck(StepKind.Over, source_file, 54, 12, "MoveNext");
}
[Fact]
public async Task BreakOnAwaitThenStepOverToNextLine()
{
string source_file = "dotnet://debugger-test.dll/debugger-async-step.cs";
await SetBreakpointInMethod("debugger-test.dll", "DebuggerTests.AsyncStepClass", "StepOverTestAsync", 1);
await EvaluateAndCheck(
"window.setTimeout(function() { invoke_static_method_async('[debugger-test] DebuggerTests.AsyncStepClass:StepOverTestAsync'); }, 1);",
"dotnet://debugger-test.dll/debugger-async-step.cs", 46, 12,
"MoveNext");
// BUG: chrome: not able to show any bp line indicator
await StepAndCheck(StepKind.Over, source_file, 47, 12, "MoveNext");
}
[Fact]
public async Task BreakOnAwaitThenResumeToNextBreakpoint()
{
string source_file = "dotnet://debugger-test.dll/debugger-async-step.cs";
await SetBreakpointInMethod("debugger-test.dll", "DebuggerTests.AsyncStepClass", "StepOverTestAsync", 1);
await SetBreakpointInMethod("debugger-test.dll", "DebuggerTests.AsyncStepClass", "StepOverTestAsync", 3);
await EvaluateAndCheck(
"window.setTimeout(function() { invoke_static_method_async('[debugger-test] DebuggerTests.AsyncStepClass:StepOverTestAsync'); }, 1);",
"dotnet://debugger-test.dll/debugger-async-step.cs", 46, 12,
"MoveNext");
await StepAndCheck(StepKind.Resume, source_file, 48, 8, "MoveNext");
}
[Fact]
public async Task BreakOnAwaitThenResumeToNextBreakpointAfterSecondAwaitInSameMethod()
{
string source_file = "dotnet://debugger-test.dll/debugger-async-step.cs";
await SetBreakpointInMethod("debugger-test.dll", "DebuggerTests.AsyncStepClass", "MethodWithTwoAwaitsAsync", 1);
await SetBreakpointInMethod("debugger-test.dll", "DebuggerTests.AsyncStepClass", "MethodWithTwoAwaitsAsync", 5);
await EvaluateAndCheck(
"window.setTimeout(function() { invoke_static_method_async('[debugger-test] DebuggerTests.AsyncStepClass:StepOverTestAsync'); }, 1);",
"dotnet://debugger-test.dll/debugger-async-step.cs", 52, 12,
"MoveNext");
await StepAndCheck(StepKind.Resume, source_file, 56, 12, "MoveNext");
}
[Fact]
public async Task BreakOnMethodCalledFromHiddenLine()
{
await SetBreakpointInMethod("debugger-test.dll", "HiddenSequencePointTest", "StepOverHiddenSP2", 0);
var pause_location = await EvaluateAndCheck(
"window.setTimeout(function() { invoke_static_method ('[debugger-test] HiddenSequencePointTest:StepOverHiddenSP'); }, 1);",
"dotnet://debugger-test.dll/debugger-test.cs", 546, 4,
"StepOverHiddenSP2");
// Check previous frame
var top_frame = pause_location["callFrames"][1];
Assert.Equal("StepOverHiddenSP", top_frame["functionName"].Value<string>());
Assert.Contains("debugger-test.cs", top_frame["url"].Value<string>());
CheckLocation("dotnet://debugger-test.dll/debugger-test.cs", 537, 8, scripts, top_frame["location"]);
}
[Fact]
public async Task StepOverHiddenLinesShouldResumeAtNextAvailableLineInTheMethod()
{
string source_loc = "dotnet://debugger-test.dll/debugger-test.cs";
await SetBreakpoint(source_loc, 537, 8);
await EvaluateAndCheck(
"window.setTimeout(function() { invoke_static_method ('[debugger-test] HiddenSequencePointTest:StepOverHiddenSP'); }, 1);",
"dotnet://debugger-test.dll/debugger-test.cs", 537, 8,
"StepOverHiddenSP");
await StepAndCheck(StepKind.Over, source_loc, 542, 8, "StepOverHiddenSP");
}
[Fact]
async Task StepOverHiddenLinesInMethodWithNoNextAvailableLineShouldResumeAtCallSite()
{
string source_loc = "dotnet://debugger-test.dll/debugger-test.cs";
await SetBreakpoint(source_loc, 552, 8);
await EvaluateAndCheck(
"window.setTimeout(function() { invoke_static_method ('[debugger-test] HiddenSequencePointTest:StepOverHiddenSP'); }, 1);",
"dotnet://debugger-test.dll/debugger-test.cs", 552, 8,
"MethodWithHiddenLinesAtTheEnd");
await StepAndCheck(StepKind.Over, source_loc, 544, 4, "StepOverHiddenSP");
}
// [Fact]
// Issue: https://github.com/dotnet/runtime/issues/42704
async Task BreakpointOnHiddenLineShouldStopAtEarliestNextAvailableLine()
{
await SetBreakpoint("dotnet://debugger-test.dll/debugger-test.cs", 539, 8);
await EvaluateAndCheck(
"window.setTimeout(function() { invoke_static_method ('[debugger-test] HiddenSequencePointTest:StepOverHiddenSP'); }, 1);",
"dotnet://debugger-test.dll/debugger-test.cs", 546, 4,
"StepOverHiddenSP2");
}
[Fact]
public async Task BreakpointOnHiddenLineOfMethodWithNoNextVisibleLineShouldNotPause()
{
await SetBreakpoint("dotnet://debugger-test.dll/debugger-test.cs", 554, 12);
string expression = "window.setTimeout(function() { invoke_static_method ('[debugger-test] HiddenSequencePointTest:StepOverHiddenSP'); }, 1);";
await cli.SendCommand($"Runtime.evaluate", JObject.FromObject(new { expression }), token);
Task pause_task = insp.WaitFor(Inspector.PAUSE);
Task t = await Task.WhenAny(pause_task, Task.Delay(2000));
Assert.True(t != pause_task, "Debugger unexpectedly paused");
}
[Fact]
public async Task SimpleStep_RegressionTest_49141()
{
await SetBreakpoint("dotnet://debugger-test.dll/debugger-test.cs", 674, 0);
string expression = "window.setTimeout(function() { invoke_static_method ('[debugger-test] Foo:RunBart'); }, 1);";
await EvaluateAndCheck(
expression,
"dotnet://debugger-test.dll/debugger-test.cs", 674, 12,
"Bart");
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 677, 8, "Bart");
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 678, 4, "Bart");
}
[Fact]
public async Task StepAndEvaluateExpression()
{
await SetBreakpoint("dotnet://debugger-test.dll/debugger-test.cs", 682, 0);
await EvaluateAndCheck(
"window.setTimeout(function() { invoke_static_method ('[debugger-test] Foo:RunBart'); }, 1);",
"dotnet://debugger-test.dll/debugger-test.cs", 682, 8,
"RunBart");
var pause_location = await StepAndCheck(StepKind.Into, "dotnet://debugger-test.dll/debugger-test.cs", 671, 4, "Bart");
var id = pause_location["callFrames"][0]["callFrameId"].Value<string>();
await EvaluateOnCallFrameAndCheck(id, ("this.Bar", TString("Same of something")));
pause_location = await StepAndCheck(StepKind.Into, "dotnet://debugger-test.dll/debugger-test.cs", 673, 8, "Bart");
id = pause_location["callFrames"][0]["callFrameId"].Value<string>();
await EvaluateOnCallFrameAndCheck(id, ("this.Bar", TString("Same of something")));
}
[Fact]
public async Task StepOverWithMoreThanOneCommandInSameLine()
{
await SetBreakpoint("dotnet://debugger-test.dll/debugger-test.cs", 693, 0);
string expression = "window.setTimeout(function() { invoke_static_method ('[debugger-test] Foo:RunBart'); }, 1);";
await EvaluateAndCheck(
expression,
"dotnet://debugger-test.dll/debugger-test.cs", 693, 8,
"OtherBar");
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 694, 8, "OtherBar");
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 696, 8, "OtherBar");
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 699, 8, "OtherBar");
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 701, 8, "OtherBar");
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 702, 4, "OtherBar");
}
[Fact]
public async Task StepOverWithMoreThanOneCommandInSameLineAsync()
{
await SetBreakpoint("dotnet://debugger-test.dll/debugger-test.cs", 710, 0);
string expression = "window.setTimeout(function() { invoke_static_method ('[debugger-test] Foo:RunBart'); }, 1);";
await EvaluateAndCheck(
expression,
"dotnet://debugger-test.dll/debugger-test.cs", 710, 8,
"MoveNext");
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 711, 8, "MoveNext");
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 713, 8, "MoveNext");
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 716, 8, "MoveNext");
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 718, 8, "MoveNext");
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 719, 8, "MoveNext");
await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 720, 4, "MoveNext");
}
[Fact]
public async Task CheckResetFrameNumberForEachStep()
{
var bp_conditional = await SetBreakpointInMethod("debugger-test.dll", "SteppingInto", "MethodToStep", 1);
await EvaluateAndCheck(
"window.setTimeout(function() { invoke_static_method('[debugger-test] SteppingInto:MethodToStep'); }, 1);",
"dotnet://debugger-test.dll/debugger-test.cs",
bp_conditional.Value["locations"][0]["lineNumber"].Value<int>(),
bp_conditional.Value["locations"][0]["columnNumber"].Value<int>(),
"MethodToStep"
);
var pause_location = await StepAndCheck(StepKind.Into, "dotnet://debugger-test.dll/debugger-test.cs", 799, 4, "Increment");
pause_location = await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 800, 8, "Increment");
Assert.Equal(pause_location["callFrames"][0]["callFrameId"], "dotnet:scope:1");
pause_location = await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 801, 8, "Increment");
Assert.Equal(pause_location["callFrames"][0]["callFrameId"], "dotnet:scope:1");
pause_location = await StepAndCheck(StepKind.Over, "dotnet://debugger-test.dll/debugger-test.cs", 806, 8, "Increment");
Assert.Equal(pause_location["callFrames"][0]["callFrameId"], "dotnet:scope:1");
}
[Fact]
public async Task DebuggerHiddenIgnoreStepInto()
{
var pause_location = await SetBreakpointInMethod("debugger-test.dll", "DebuggerAttribute", "RunDebuggerHidden", 1);
await EvaluateAndCheck(
"window.setTimeout(function() { invoke_static_method('[debugger-test] DebuggerAttribute:RunDebuggerHidden'); }, 1);",
"dotnet://debugger-test.dll/debugger-test.cs",
pause_location.Value["locations"][0]["lineNumber"].Value<int>(),
pause_location.Value["locations"][0]["columnNumber"].Value<int>(),
"RunDebuggerHidden"
);
var step_into = await SendCommandAndCheck(null, $"Debugger.stepInto", null, -1, -1, null);
Assert.Equal(
step_into["callFrames"][0]["location"]["lineNumber"].Value<int>(),
pause_location.Value["locations"][0]["lineNumber"].Value<int>() + 1
);
}
[Theory]
[InlineData("Debugger.stepInto")]
[InlineData("Debugger.stepOver")]
public async Task DebuggerHiddenIgnoreStepUserBreakpoint(string steppingFunction)
{
var pause_location = await SetBreakpointInMethod("debugger-test.dll", "DebuggerAttribute", "RunDebuggerHidden", 1);
await EvaluateAndCheck(
"window.setTimeout(function() { invoke_static_method('[debugger-test] DebuggerAttribute:RunDebuggerHidden'); }, 1);",
"dotnet://debugger-test.dll/debugger-test.cs",
pause_location.Value["locations"][0]["lineNumber"].Value<int>(),
pause_location.Value["locations"][0]["columnNumber"].Value<int>(),
"RunDebuggerHidden"
);
// stepOver HiddenMethod:
var step_into1 = await SendCommandAndCheck(null, steppingFunction, null, -1, -1, null);
Assert.Equal(
pause_location.Value["locations"][0]["lineNumber"].Value<int>() + 1,
step_into1["callFrames"][0]["location"]["lineNumber"].Value<int>()
);
// freeze on HiddenMethodUserBreak:
var step_into2 = await SendCommandAndCheck(null, steppingFunction, null, -1, -1, null);
Assert.Equal(
pause_location.Value["locations"][0]["lineNumber"].Value<int>() + 1,
step_into2["callFrames"][0]["location"]["lineNumber"].Value<int>()
);
}
}
}
| -1 |
dotnet/runtime
| 66,411 |
Arm64: Always use SIMD features
|
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
kunalspathak
| 2022-03-09T21:00:00Z | 2022-03-12T04:35:06Z |
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
|
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
|
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
./src/coreclr/tools/Common/TypeSystem/IL/HelperExtensions.cs
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using Internal.TypeSystem;
using Internal.IL.Stubs;
using Debug = System.Diagnostics.Debug;
namespace Internal.IL
{
internal static class HelperExtensions
{
private const string HelperTypesNamespace = "Internal.Runtime.CompilerHelpers";
public static MetadataType GetHelperType(this TypeSystemContext context, string name)
{
MetadataType helperType = context.SystemModule.GetKnownType(HelperTypesNamespace, name);
return helperType;
}
public static MetadataType GetOptionalHelperType(this TypeSystemContext context, string name)
{
MetadataType helperType = context.SystemModule.GetType(HelperTypesNamespace, name, throwIfNotFound: false);
return helperType;
}
public static MethodDesc GetHelperEntryPoint(this TypeSystemContext context, string typeName, string methodName)
{
MetadataType helperType = context.GetHelperType(typeName);
MethodDesc helperMethod = helperType.GetKnownMethod(methodName, null);
return helperMethod;
}
public static MethodDesc GetOptionalHelperEntryPoint(this TypeSystemContext context, string typeName, string methodName)
{
MetadataType helperType = context.GetOptionalHelperType(typeName);
MethodDesc helperMethod = helperType?.GetMethod(methodName, null);
return helperMethod;
}
/// <summary>
/// Emits a call to a throw helper. Use this to emit calls to static parameterless methods that don't return.
/// The advantage of using this extension method is that you don't have to deal with what code to emit after
/// the call (e.g. do you need to make sure the stack is balanced?).
/// </summary>
public static void EmitCallThrowHelper(this ILCodeStream codeStream, ILEmitter emitter, MethodDesc method)
{
Debug.Assert(method.Signature.Length == 0 && method.Signature.IsStatic);
// Emit a call followed by a branch to the call.
// We are emitting this instead of emitting a tight loop that jumps to itself
// so that the JIT doesn't generate extra GC checks within the loop.
ILCodeLabel label = emitter.NewCodeLabel();
codeStream.EmitLabel(label);
codeStream.Emit(ILOpcode.call, emitter.NewToken(method));
codeStream.Emit(ILOpcode.br, label);
}
/// <summary>
/// Retrieves a method on <paramref name="type"/> that is well known to the compiler.
/// Throws an exception if the method doesn't exist.
/// </summary>
public static MethodDesc GetKnownMethod(this TypeDesc type, string name, MethodSignature signature)
{
MethodDesc method = type.GetMethod(name, signature);
if (method == null)
{
throw new InvalidOperationException(String.Format("Expected method '{0}' not found on type '{1}'", name, type));
}
return method;
}
/// <summary>
/// Retrieves a field on <paramref name="type"/> that is well known to the compiler.
/// Throws an exception if the field doesn't exist.
/// </summary>
public static FieldDesc GetKnownField(this TypeDesc type, string name)
{
FieldDesc field = type.GetField(name);
if (field == null)
{
throw new InvalidOperationException(String.Format("Expected field '{0}' not found on type '{1}'", name, type));
}
return field;
}
/// <summary>
/// Retrieves a nested type on <paramref name="type"/> that is well known to the compiler.
/// Throws an exception if the nested type doesn't exist.
/// </summary>
public static MetadataType GetKnownNestedType(this MetadataType type, string name)
{
MetadataType nestedType = type.GetNestedType(name);
if (nestedType == null)
{
throw new InvalidOperationException(String.Format("Expected type '{0}' not found on type '{1}'", name, type));
}
return nestedType;
}
/// <summary>
/// Retrieves a namespace type in <paramref name= "module" /> that is well known to the compiler.
/// Throws an exception if the type doesn't exist.
/// </summary>
public static MetadataType GetKnownType(this ModuleDesc module, string @namespace, string name)
{
MetadataType type = module.GetType(@namespace, name, throwIfNotFound: false);
if (type == null)
{
throw new InvalidOperationException(
String.Format("Expected type '{0}' not found in module '{1}'",
@namespace.Length > 0 ? String.Concat(@namespace, ".", name) : name,
module));
}
return type;
}
}
}
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System;
using Internal.TypeSystem;
using Internal.IL.Stubs;
using Debug = System.Diagnostics.Debug;
namespace Internal.IL
{
internal static class HelperExtensions
{
private const string HelperTypesNamespace = "Internal.Runtime.CompilerHelpers";
public static MetadataType GetHelperType(this TypeSystemContext context, string name)
{
MetadataType helperType = context.SystemModule.GetKnownType(HelperTypesNamespace, name);
return helperType;
}
public static MetadataType GetOptionalHelperType(this TypeSystemContext context, string name)
{
MetadataType helperType = context.SystemModule.GetType(HelperTypesNamespace, name, throwIfNotFound: false);
return helperType;
}
public static MethodDesc GetHelperEntryPoint(this TypeSystemContext context, string typeName, string methodName)
{
MetadataType helperType = context.GetHelperType(typeName);
MethodDesc helperMethod = helperType.GetKnownMethod(methodName, null);
return helperMethod;
}
public static MethodDesc GetOptionalHelperEntryPoint(this TypeSystemContext context, string typeName, string methodName)
{
MetadataType helperType = context.GetOptionalHelperType(typeName);
MethodDesc helperMethod = helperType?.GetMethod(methodName, null);
return helperMethod;
}
/// <summary>
/// Emits a call to a throw helper. Use this to emit calls to static parameterless methods that don't return.
/// The advantage of using this extension method is that you don't have to deal with what code to emit after
/// the call (e.g. do you need to make sure the stack is balanced?).
/// </summary>
public static void EmitCallThrowHelper(this ILCodeStream codeStream, ILEmitter emitter, MethodDesc method)
{
Debug.Assert(method.Signature.Length == 0 && method.Signature.IsStatic);
// Emit a call followed by a branch to the call.
// We are emitting this instead of emitting a tight loop that jumps to itself
// so that the JIT doesn't generate extra GC checks within the loop.
ILCodeLabel label = emitter.NewCodeLabel();
codeStream.EmitLabel(label);
codeStream.Emit(ILOpcode.call, emitter.NewToken(method));
codeStream.Emit(ILOpcode.br, label);
}
/// <summary>
/// Retrieves a method on <paramref name="type"/> that is well known to the compiler.
/// Throws an exception if the method doesn't exist.
/// </summary>
public static MethodDesc GetKnownMethod(this TypeDesc type, string name, MethodSignature signature)
{
MethodDesc method = type.GetMethod(name, signature);
if (method == null)
{
throw new InvalidOperationException(String.Format("Expected method '{0}' not found on type '{1}'", name, type));
}
return method;
}
/// <summary>
/// Retrieves a field on <paramref name="type"/> that is well known to the compiler.
/// Throws an exception if the field doesn't exist.
/// </summary>
public static FieldDesc GetKnownField(this TypeDesc type, string name)
{
FieldDesc field = type.GetField(name);
if (field == null)
{
throw new InvalidOperationException(String.Format("Expected field '{0}' not found on type '{1}'", name, type));
}
return field;
}
/// <summary>
/// Retrieves a nested type on <paramref name="type"/> that is well known to the compiler.
/// Throws an exception if the nested type doesn't exist.
/// </summary>
public static MetadataType GetKnownNestedType(this MetadataType type, string name)
{
MetadataType nestedType = type.GetNestedType(name);
if (nestedType == null)
{
throw new InvalidOperationException(String.Format("Expected type '{0}' not found on type '{1}'", name, type));
}
return nestedType;
}
/// <summary>
/// Retrieves a namespace type in <paramref name= "module" /> that is well known to the compiler.
/// Throws an exception if the type doesn't exist.
/// </summary>
public static MetadataType GetKnownType(this ModuleDesc module, string @namespace, string name)
{
MetadataType type = module.GetType(@namespace, name, throwIfNotFound: false);
if (type == null)
{
throw new InvalidOperationException(
String.Format("Expected type '{0}' not found in module '{1}'",
@namespace.Length > 0 ? String.Concat(@namespace, ".", name) : name,
module));
}
return type;
}
}
}
| -1 |
dotnet/runtime
| 66,411 |
Arm64: Always use SIMD features
|
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
kunalspathak
| 2022-03-09T21:00:00Z | 2022-03-12T04:35:06Z |
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
|
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
|
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
./src/tests/JIT/jit64/valuetypes/nullable/box-unbox/generics/box-unbox-generics030.csproj
|
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>PdbOnly</DebugType>
</PropertyGroup>
<ItemGroup>
<Compile Include="box-unbox-generics030.cs" />
<Compile Include="..\structdef.cs" />
</ItemGroup>
</Project>
|
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>PdbOnly</DebugType>
</PropertyGroup>
<ItemGroup>
<Compile Include="box-unbox-generics030.cs" />
<Compile Include="..\structdef.cs" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime
| 66,411 |
Arm64: Always use SIMD features
|
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
kunalspathak
| 2022-03-09T21:00:00Z | 2022-03-12T04:35:06Z |
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
|
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
|
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd/And.Vector128.Int32.cs
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
using System.Runtime.Intrinsics.Arm;
namespace JIT.HardwareIntrinsics.Arm
{
public static partial class Program
{
private static void And_Vector128_Int32()
{
var test = new SimpleBinaryOpTest__And_Vector128_Int32();
if (test.IsSupported)
{
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates basic functionality works, using Load
test.RunBasicScenario_Load();
}
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates calling via reflection works, using Load
test.RunReflectionScenario_Load();
}
// Validates passing a static member works
test.RunClsVarScenario();
if (AdvSimd.IsSupported)
{
// Validates passing a static member works, using pinning and Load
test.RunClsVarScenario_Load();
}
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates passing a local works, using Load
test.RunLclVarScenario_Load();
}
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local class works, using pinning and Load
test.RunClassLclFldScenario_Load();
}
// Validates passing an instance member of a class works
test.RunClassFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a class works, using pinning and Load
test.RunClassFldScenario_Load();
}
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local struct works, using pinning and Load
test.RunStructLclFldScenario_Load();
}
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a struct works, using pinning and Load
test.RunStructFldScenario_Load();
}
}
else
{
// Validates we throw on unsupported hardware
test.RunUnsupportedScenario();
}
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class SimpleBinaryOpTest__And_Vector128_Int32
{
private struct DataTable
{
private byte[] inArray1;
private byte[] inArray2;
private byte[] outArray;
private GCHandle inHandle1;
private GCHandle inHandle2;
private GCHandle outHandle;
private ulong alignment;
public DataTable(Int32[] inArray1, Int32[] inArray2, Int32[] outArray, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int32>();
int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int32>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int32>();
if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.inArray2 = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int32, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int32, byte>(ref inArray2[0]), (uint)sizeOfinArray2);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
inHandle2.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector128<Int32> _fld1;
public Vector128<Int32> _fld2;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref testStruct._fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref testStruct._fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
return testStruct;
}
public void RunStructFldScenario(SimpleBinaryOpTest__And_Vector128_Int32 testClass)
{
var result = AdvSimd.And(_fld1, _fld2);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr);
}
public void RunStructFldScenario_Load(SimpleBinaryOpTest__And_Vector128_Int32 testClass)
{
fixed (Vector128<Int32>* pFld1 = &_fld1)
fixed (Vector128<Int32>* pFld2 = &_fld2)
{
var result = AdvSimd.And(
AdvSimd.LoadVector128((Int32*)(pFld1)),
AdvSimd.LoadVector128((Int32*)(pFld2))
);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr);
}
}
}
private static readonly int LargestVectorSize = 16;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32);
private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32);
private static Int32[] _data1 = new Int32[Op1ElementCount];
private static Int32[] _data2 = new Int32[Op2ElementCount];
private static Vector128<Int32> _clsVar1;
private static Vector128<Int32> _clsVar2;
private Vector128<Int32> _fld1;
private Vector128<Int32> _fld2;
private DataTable _dataTable;
static SimpleBinaryOpTest__And_Vector128_Int32()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _clsVar1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _clsVar2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
}
public SimpleBinaryOpTest__And_Vector128_Int32()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); }
_dataTable = new DataTable(_data1, _data2, new Int32[RetElementCount], LargestVectorSize);
}
public bool IsSupported => AdvSimd.IsSupported;
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = AdvSimd.And(
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray2Ptr)
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunBasicScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load));
var result = AdvSimd.And(
AdvSimd.LoadVector128((Int32*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector128((Int32*)(_dataTable.inArray2Ptr))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.And), new Type[] { typeof(Vector128<Int32>), typeof(Vector128<Int32>) })
.Invoke(null, new object[] {
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray2Ptr)
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int32>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.And), new Type[] { typeof(Vector128<Int32>), typeof(Vector128<Int32>) })
.Invoke(null, new object[] {
AdvSimd.LoadVector128((Int32*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector128((Int32*)(_dataTable.inArray2Ptr))
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int32>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = AdvSimd.And(
_clsVar1,
_clsVar2
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr);
}
public void RunClsVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load));
fixed (Vector128<Int32>* pClsVar1 = &_clsVar1)
fixed (Vector128<Int32>* pClsVar2 = &_clsVar2)
{
var result = AdvSimd.And(
AdvSimd.LoadVector128((Int32*)(pClsVar1)),
AdvSimd.LoadVector128((Int32*)(pClsVar2))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr);
}
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr);
var op2 = Unsafe.Read<Vector128<Int32>>(_dataTable.inArray2Ptr);
var result = AdvSimd.And(op1, op2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load));
var op1 = AdvSimd.LoadVector128((Int32*)(_dataTable.inArray1Ptr));
var op2 = AdvSimd.LoadVector128((Int32*)(_dataTable.inArray2Ptr));
var result = AdvSimd.And(op1, op2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new SimpleBinaryOpTest__And_Vector128_Int32();
var result = AdvSimd.And(test._fld1, test._fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load));
var test = new SimpleBinaryOpTest__And_Vector128_Int32();
fixed (Vector128<Int32>* pFld1 = &test._fld1)
fixed (Vector128<Int32>* pFld2 = &test._fld2)
{
var result = AdvSimd.And(
AdvSimd.LoadVector128((Int32*)(pFld1)),
AdvSimd.LoadVector128((Int32*)(pFld2))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = AdvSimd.And(_fld1, _fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr);
}
public void RunClassFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load));
fixed (Vector128<Int32>* pFld1 = &_fld1)
fixed (Vector128<Int32>* pFld2 = &_fld2)
{
var result = AdvSimd.And(
AdvSimd.LoadVector128((Int32*)(pFld1)),
AdvSimd.LoadVector128((Int32*)(pFld2))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr);
}
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = AdvSimd.And(test._fld1, test._fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load));
var test = TestStruct.Create();
var result = AdvSimd.And(
AdvSimd.LoadVector128((Int32*)(&test._fld1)),
AdvSimd.LoadVector128((Int32*)(&test._fld2))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
public void RunStructFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load));
var test = TestStruct.Create();
test.RunStructFldScenario_Load(this);
}
public void RunUnsupportedScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario));
bool succeeded = false;
try
{
RunBasicScenario_UnsafeRead();
}
catch (PlatformNotSupportedException)
{
succeeded = true;
}
if (!succeeded)
{
Succeeded = false;
}
}
private void ValidateResult(Vector128<Int32> op1, Vector128<Int32> op2, void* result, [CallerMemberName] string method = "")
{
Int32[] inArray1 = new Int32[Op1ElementCount];
Int32[] inArray2 = new Int32[Op2ElementCount];
Int32[] outArray = new Int32[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), op1);
Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), op2);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int32>>());
ValidateResult(inArray1, inArray2, outArray, method);
}
private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "")
{
Int32[] inArray1 = new Int32[Op1ElementCount];
Int32[] inArray2 = new Int32[Op2ElementCount];
Int32[] outArray = new Int32[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<Int32>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<Int32>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int32>>());
ValidateResult(inArray1, inArray2, outArray, method);
}
private void ValidateResult(Int32[] left, Int32[] right, Int32[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
for (var i = 0; i < RetElementCount; i++)
{
if (Helpers.And(left[i], right[i]) != result[i])
{
succeeded = false;
break;
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.And)}<Int32>(Vector128<Int32>, Vector128<Int32>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})");
TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
using System.Runtime.Intrinsics.Arm;
namespace JIT.HardwareIntrinsics.Arm
{
public static partial class Program
{
private static void And_Vector128_Int32()
{
var test = new SimpleBinaryOpTest__And_Vector128_Int32();
if (test.IsSupported)
{
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates basic functionality works, using Load
test.RunBasicScenario_Load();
}
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates calling via reflection works, using Load
test.RunReflectionScenario_Load();
}
// Validates passing a static member works
test.RunClsVarScenario();
if (AdvSimd.IsSupported)
{
// Validates passing a static member works, using pinning and Load
test.RunClsVarScenario_Load();
}
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates passing a local works, using Load
test.RunLclVarScenario_Load();
}
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local class works, using pinning and Load
test.RunClassLclFldScenario_Load();
}
// Validates passing an instance member of a class works
test.RunClassFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a class works, using pinning and Load
test.RunClassFldScenario_Load();
}
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local struct works, using pinning and Load
test.RunStructLclFldScenario_Load();
}
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a struct works, using pinning and Load
test.RunStructFldScenario_Load();
}
}
else
{
// Validates we throw on unsupported hardware
test.RunUnsupportedScenario();
}
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class SimpleBinaryOpTest__And_Vector128_Int32
{
private struct DataTable
{
private byte[] inArray1;
private byte[] inArray2;
private byte[] outArray;
private GCHandle inHandle1;
private GCHandle inHandle2;
private GCHandle outHandle;
private ulong alignment;
public DataTable(Int32[] inArray1, Int32[] inArray2, Int32[] outArray, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int32>();
int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int32>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int32>();
if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.inArray2 = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int32, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int32, byte>(ref inArray2[0]), (uint)sizeOfinArray2);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
inHandle2.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector128<Int32> _fld1;
public Vector128<Int32> _fld2;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref testStruct._fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref testStruct._fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
return testStruct;
}
public void RunStructFldScenario(SimpleBinaryOpTest__And_Vector128_Int32 testClass)
{
var result = AdvSimd.And(_fld1, _fld2);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr);
}
public void RunStructFldScenario_Load(SimpleBinaryOpTest__And_Vector128_Int32 testClass)
{
fixed (Vector128<Int32>* pFld1 = &_fld1)
fixed (Vector128<Int32>* pFld2 = &_fld2)
{
var result = AdvSimd.And(
AdvSimd.LoadVector128((Int32*)(pFld1)),
AdvSimd.LoadVector128((Int32*)(pFld2))
);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr);
}
}
}
private static readonly int LargestVectorSize = 16;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32);
private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32);
private static Int32[] _data1 = new Int32[Op1ElementCount];
private static Int32[] _data2 = new Int32[Op2ElementCount];
private static Vector128<Int32> _clsVar1;
private static Vector128<Int32> _clsVar2;
private Vector128<Int32> _fld1;
private Vector128<Int32> _fld2;
private DataTable _dataTable;
static SimpleBinaryOpTest__And_Vector128_Int32()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _clsVar1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _clsVar2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
}
public SimpleBinaryOpTest__And_Vector128_Int32()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _fld2), ref Unsafe.As<Int32, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt32(); }
_dataTable = new DataTable(_data1, _data2, new Int32[RetElementCount], LargestVectorSize);
}
public bool IsSupported => AdvSimd.IsSupported;
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = AdvSimd.And(
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray2Ptr)
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunBasicScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load));
var result = AdvSimd.And(
AdvSimd.LoadVector128((Int32*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector128((Int32*)(_dataTable.inArray2Ptr))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.And), new Type[] { typeof(Vector128<Int32>), typeof(Vector128<Int32>) })
.Invoke(null, new object[] {
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray2Ptr)
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int32>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.And), new Type[] { typeof(Vector128<Int32>), typeof(Vector128<Int32>) })
.Invoke(null, new object[] {
AdvSimd.LoadVector128((Int32*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector128((Int32*)(_dataTable.inArray2Ptr))
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int32>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = AdvSimd.And(
_clsVar1,
_clsVar2
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr);
}
public void RunClsVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load));
fixed (Vector128<Int32>* pClsVar1 = &_clsVar1)
fixed (Vector128<Int32>* pClsVar2 = &_clsVar2)
{
var result = AdvSimd.And(
AdvSimd.LoadVector128((Int32*)(pClsVar1)),
AdvSimd.LoadVector128((Int32*)(pClsVar2))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr);
}
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr);
var op2 = Unsafe.Read<Vector128<Int32>>(_dataTable.inArray2Ptr);
var result = AdvSimd.And(op1, op2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load));
var op1 = AdvSimd.LoadVector128((Int32*)(_dataTable.inArray1Ptr));
var op2 = AdvSimd.LoadVector128((Int32*)(_dataTable.inArray2Ptr));
var result = AdvSimd.And(op1, op2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new SimpleBinaryOpTest__And_Vector128_Int32();
var result = AdvSimd.And(test._fld1, test._fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load));
var test = new SimpleBinaryOpTest__And_Vector128_Int32();
fixed (Vector128<Int32>* pFld1 = &test._fld1)
fixed (Vector128<Int32>* pFld2 = &test._fld2)
{
var result = AdvSimd.And(
AdvSimd.LoadVector128((Int32*)(pFld1)),
AdvSimd.LoadVector128((Int32*)(pFld2))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = AdvSimd.And(_fld1, _fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr);
}
public void RunClassFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load));
fixed (Vector128<Int32>* pFld1 = &_fld1)
fixed (Vector128<Int32>* pFld2 = &_fld2)
{
var result = AdvSimd.And(
AdvSimd.LoadVector128((Int32*)(pFld1)),
AdvSimd.LoadVector128((Int32*)(pFld2))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr);
}
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = AdvSimd.And(test._fld1, test._fld2);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load));
var test = TestStruct.Create();
var result = AdvSimd.And(
AdvSimd.LoadVector128((Int32*)(&test._fld1)),
AdvSimd.LoadVector128((Int32*)(&test._fld2))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
public void RunStructFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load));
var test = TestStruct.Create();
test.RunStructFldScenario_Load(this);
}
public void RunUnsupportedScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario));
bool succeeded = false;
try
{
RunBasicScenario_UnsafeRead();
}
catch (PlatformNotSupportedException)
{
succeeded = true;
}
if (!succeeded)
{
Succeeded = false;
}
}
private void ValidateResult(Vector128<Int32> op1, Vector128<Int32> op2, void* result, [CallerMemberName] string method = "")
{
Int32[] inArray1 = new Int32[Op1ElementCount];
Int32[] inArray2 = new Int32[Op2ElementCount];
Int32[] outArray = new Int32[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), op1);
Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), op2);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int32>>());
ValidateResult(inArray1, inArray2, outArray, method);
}
private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "")
{
Int32[] inArray1 = new Int32[Op1ElementCount];
Int32[] inArray2 = new Int32[Op2ElementCount];
Int32[] outArray = new Int32[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<Int32>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<Int32>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int32>>());
ValidateResult(inArray1, inArray2, outArray, method);
}
private void ValidateResult(Int32[] left, Int32[] right, Int32[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
for (var i = 0; i < RetElementCount; i++)
{
if (Helpers.And(left[i], right[i]) != result[i])
{
succeeded = false;
break;
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.And)}<Int32>(Vector128<Int32>, Vector128<Int32>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})");
TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| -1 |
dotnet/runtime
| 66,411 |
Arm64: Always use SIMD features
|
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
kunalspathak
| 2022-03-09T21:00:00Z | 2022-03-12T04:35:06Z |
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
|
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
|
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
./src/mono/mono/tests/recursive-generics.2.cs
|
using System;
public class GenA<T> {};
public class GenB<T> : GenA<GenB<GenB<T>>> {};
public class GenC<T> {
public object newA () {
return new GenA<T> ();
}
}
public class GenD<T> : GenC<GenD<GenD<T>>> {};
public class main {
public static int Main () {
GenB<string> gb = new GenB<string> ();
GenD<string> gd = new GenD<string> ();
gd.newA ();
return 0;
}
}
|
using System;
public class GenA<T> {};
public class GenB<T> : GenA<GenB<GenB<T>>> {};
public class GenC<T> {
public object newA () {
return new GenA<T> ();
}
}
public class GenD<T> : GenC<GenD<GenD<T>>> {};
public class main {
public static int Main () {
GenB<string> gb = new GenB<string> ();
GenD<string> gd = new GenD<string> ();
gd.newA ();
return 0;
}
}
| -1 |
dotnet/runtime
| 66,411 |
Arm64: Always use SIMD features
|
On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
kunalspathak
| 2022-03-09T21:00:00Z | 2022-03-12T04:35:06Z |
4bcc7998f03f9e8c9d008d56e997d1c9d935e3a7
|
6e26872d0a282aa71ea792c3550a3cb0e8bf4e71
|
Arm64: Always use SIMD features . On Arm64, we always support SIMD types but in the code base, we were sometimes relying on `featureSIMD` flag which can be `false` in case `COMPlus_FeatureSIMD=0` and doesn't work on Arm64. On Arm64, we always need SIMD features to support ABI handling. Modified all the usages to instead use `supportSIMDTypes()`.
Will fix https://github.com/dotnet/runtime/issues/64972
Thanks @tannergooding for your the references:
- https://github.com/dotnet/runtime/issues/66206
- https://github.com/dotnet/runtime/issues/11701
- https://github.com/dotnet/runtime/issues/9473.
|
./src/tests/JIT/Directed/coverage/oldtests/ldsshrstsfld_il_d.ilproj
|
<Project Sdk="Microsoft.NET.Sdk.IL">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>Full</DebugType>
</PropertyGroup>
<ItemGroup>
<Compile Include="ldsshrstsfld.il" />
</ItemGroup>
</Project>
|
<Project Sdk="Microsoft.NET.Sdk.IL">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType>Full</DebugType>
</PropertyGroup>
<ItemGroup>
<Compile Include="ldsshrstsfld.il" />
</ItemGroup>
</Project>
| -1 |
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/mono/mono.proj
|
<Project Sdk="Microsoft.Build.Traversal" DefaultTargets="Build">
<!--
Build properties:
- MonoForceInterpreter - enable the interpreter
- MonoEnableLLVM - enable LLVM
- MonoLLVMDir - [optional] the directory where LLVM is located
- MonoAOTEnableLLVM - enable LLVM for an AOT-only Mono
- MonoAOTLLVMDir - [optional] the directory where LLVM is located, for an AOT-only Mono
- MonoVerboseBuild - enable verbose build
- MonoThreadSuspend - coop,hybrid,preemptive - default thread suspend mode
-->
<PropertyGroup>
<MonoCrossDir Condition="'$(MonoCrossDir)' == '' and '$(ROOTFS_DIR)' != ''">$(ROOTFS_DIR)</MonoCrossDir>
<MonoForceInterpreter Condition="'$(MonoForceInterpreter)' == ''">false</MonoForceInterpreter>
<ScriptExt Condition="'$(HostOS)' == 'windows'">.cmd</ScriptExt>
<ScriptExt Condition="'$(HostOS)' != 'windows'">.sh</ScriptExt>
<EscapedQuoteW Condition="'$(HostOS)' == 'windows'">\"</EscapedQuoteW>
<PythonCmd Condition="'$(HostOS)' != 'windows'">python3</PythonCmd>
<PythonCmd Condition="'$(HostOS)' == 'windows'">python</PythonCmd>
<CoreClrLibName>coreclr</CoreClrLibName>
<CoreClrFileName>$(LibPrefix)$(CoreClrLibName)$(LibSuffix)</CoreClrFileName>
<MonoLibName>monosgen-2.0</MonoLibName>
<MonoSharedLibName Condition="'$(TargetsiOS)' == 'true' or '$(TargetstvOS)' == 'true' or '$(TargetsMacCatalyst)' == 'true' or '$(TargetsAndroid)' == 'true' or '$(TargetsBrowser)' == 'true'">$(MonoLibName)</MonoSharedLibName>
<MonoSharedLibName Condition="'$(MonoSharedLibName)' == ''">$(CoreClrLibName)</MonoSharedLibName>
<MonoSharedLibFileName>$(LibPrefix)$(MonoSharedLibName)$(LibSuffix)</MonoSharedLibFileName>
<MonoStaticLibFileName>$(LibPrefix)$(MonoLibName)$(StaticLibSuffix)</MonoStaticLibFileName>
<MonoFileName Condition="'$(TargetsBrowser)' == 'true'">$(MonoStaticLibFileName)</MonoFileName>
<MonoFileName Condition="'$(MonoFileName)' == ''">$(MonoSharedLibFileName)</MonoFileName>
<MonoAotCrossFileName>mono-aot-cross$(ExeSuffix)</MonoAotCrossFileName>
<MonoAotCrossPdbFileName>mono-aot-cross.pdb</MonoAotCrossPdbFileName>
<CoreClrTestConfig Condition="'$(CoreClrTestConfig)' == ''">$(Configuration)</CoreClrTestConfig>
<LibrariesTestConfig Condition="'$(LibrariesTestConfig)' == ''">$(Configuration)</LibrariesTestConfig>
<CoreClrTestCoreRoot>$([MSBuild]::NormalizeDirectory('$(ArtifactsDir)', 'tests', 'coreclr', '$(TargetOS).$(Platform).$(CoreClrTestConfig)', 'Tests', 'Core_Root'))</CoreClrTestCoreRoot>
<LibrariesTesthostRoot>$([MSBuild]::NormalizeDirectory('$(ArtifactsDir)', 'bin', 'testhost', '$(NetCoreAppCurrent)-$(TargetOS)-$(LibrariesTestConfig)-$(Platform)'))</LibrariesTesthostRoot>
<LibrariesTesthostRuntimeDir>$([MSBuild]::NormalizeDirectory('$(LibrariesTesthostRoot)', 'shared', 'Microsoft.NETCore.App', '$(ProductVersion)'))</LibrariesTesthostRuntimeDir>
<BuildMonoAOTCrossCompiler Condition="'$(TargetsiOS)' == 'true'">true</BuildMonoAOTCrossCompiler>
<BuildMonoAOTCrossCompiler Condition="'$(TargetstvOS)' == 'true'">true</BuildMonoAOTCrossCompiler>
<BuildMonoAOTCrossCompiler Condition="'$(TargetsMacCatalyst)' == 'true'">true</BuildMonoAOTCrossCompiler>
<BuildMonoAOTCrossCompiler Condition="'$(TargetsBrowser)' == 'true'">true</BuildMonoAOTCrossCompiler>
<BuildMonoAOTCrossCompiler Condition="'$(TargetsAndroid)' == 'true'">true</BuildMonoAOTCrossCompiler>
<MonoObjCrossDir>$([MSBuild]::NormalizeDirectory('$(MonoObjDir)', 'cross'))</MonoObjCrossDir>
<CrossConfigH Condition="'$(BuildMonoAOTCrossCompiler)' == 'true'">$([MSBuild]::NormalizePath('$(MonoObjCrossDir)', 'config.h'))</CrossConfigH>
<MonoBundleLLVMOptimizer Condition="'$(MonoEnableLLVM)' == 'true'">true</MonoBundleLLVMOptimizer>
<MonoAOTBundleLLVMOptimizer Condition="'$(MonoAOTEnableLLVM)' == 'true' and '$(TargetsBrowser)' != 'true'">true</MonoAOTBundleLLVMOptimizer>
<MonoCCompiler>$(Compiler)</MonoCCompiler>
<MonoCCompiler Condition="'$(MonoCCompiler)' == ''">clang</MonoCCompiler>
<_CompilerTargetArch Condition="'$(RealTargetArchitecture)' == ''">$(Platform)</_CompilerTargetArch>
<_CompilerTargetArch Condition="'$(RealTargetArchitecture)' != ''">$(RealTargetArchitecture)</_CompilerTargetArch>
<RepositoryEngineeringCommonDir>$([MSBuild]::NormalizeDirectory('$(RepositoryEngineeringDir)', 'common'))</RepositoryEngineeringCommonDir>
<CrossToolchainFile>$([MSBuild]::NormalizePath('$(RepositoryEngineeringCommonDir)', 'cross', 'toolchain.cmake'))</CrossToolchainFile>
</PropertyGroup>
<!-- default thread suspend for specific platforms -->
<PropertyGroup>
<MonoThreadSuspend Condition="'$(TargetswatchOS)' == 'true' and '$(MonoThreadSuspend)' == ''">coop</MonoThreadSuspend>
<!-- wasm isn't really preemptive, but we don't want safepoints -->
<MonoThreadSuspend Condition="'$(TargetsBrowser)' == 'true' and '$(MonoThreadSuspend)' == ''">preemptive</MonoThreadSuspend>
<!-- all other platforms -->
<MonoThreadSuspend Condition="'$(MonoThreadSuspend)' == ''">hybrid</MonoThreadSuspend>
</PropertyGroup>
<!-- How to build runtime components? Static or dynamic. -->
<PropertyGroup>
<MonoComponentsStatic Condition="'$(TargetsBrowser)' == 'true' and '$(MonoComponentsStatic)' == ''">true</MonoComponentsStatic>
<MonoComponentsStatic Condition="'$(TargetsiOS)' == 'true' and '$(TargetsiOSSimulator)' != 'true' and '$(MonoComponentsStatic)' == ''">true</MonoComponentsStatic>
<MonoComponentsStatic Condition="'$(TargetstvOS)' == 'true' and '$(TargetstvOSSimulator)' != 'true' and '$(MonoComponentsStatic)' == ''">true</MonoComponentsStatic>
<MonoComponentsStatic Condition="'$(TargetsiOS)' == 'true' and '$(TargetsiOSSimulator)' == 'true' and '$(MonoComponentsStatic)' == ''">false</MonoComponentsStatic>
<MonoComponentsStatic Condition="'$(TargetstvOS)' == 'true' and '$(TargetstvOSSimulator)' == 'true' and '$(MonoComponentsStatic)' == ''">false</MonoComponentsStatic>
<MonoComponentsStatic Condition="'$(TargetsAndroid)' == 'true' and '$(MonoComponentsStatic)' == ''">false</MonoComponentsStatic>
<!-- by default, do dynamic components -->
<!-- TODO: Change to dynamic as default once package/deploy is fixed for all targets -->
<MonoComponentsStatic Condition="'$(MonoComponentsStatic)' == ''">true</MonoComponentsStatic>
</PropertyGroup>
<ItemGroup Condition="'$(TargetsBrowser)' == 'true' or '$(TargetsiOS)' == 'true' or '$(TargetstvOS)' == 'true' or '$(TargetsMacCatalyst)' == 'true'">
<PackageReference Include="Microsoft.NETCore.Runtime.ICU.Transport" PrivateAssets="all" Version="$(MicrosoftNETCoreRuntimeICUTransportVersion)" GeneratePathProperty="true" />
</ItemGroup>
<!-- CI specific build options -->
<ItemGroup Condition="'$(ContinuousIntegrationBuild)' == 'true' and ('$(TargetsOSX)' == 'true' or '$(TargetsMacCatalyst)' == 'true' or '$(TargetsBrowser)' == 'true' or '$(Targetsillumos)' == 'true')">
<_MonoCMakeArgs Include="-DENABLE_WERROR=1"/>
</ItemGroup>
<!-- Sanity checks -->
<Target Name="CheckEnv">
<Error Condition="'$(TargetstvOSSimulator)' != 'true' and '$(TargetstvOS)' == 'true' and '$(Platform)' != 'arm64'" Text="Error: Invalid platform for $(TargetOS): $(Platform)." />
<Error Condition="'$(TargetstvOSSimulator)' == 'true' and '$(TargetstvOS)' == 'true' and '$(Platform)' != 'x64' and '$(Platform)' != 'arm64'" Text="Error: Invalid platform for $(TargetOS): $(Platform)." />
<Error Condition="'$(TargetsiOSSimulator)' != 'true' and '$(TargetsiOS)' == 'true' and '$(Platform)' != 'arm64' and '$(Platform)' != 'arm'" Text="Error: Invalid platform for $(TargetOS): $(Platform)." />
<Error Condition="'$(TargetsiOSSimulator)' == 'true' and '$(TargetsiOS)' == 'true' and '$(Platform)' != 'x64' and '$(Platform)' != 'x86' and '$(Platform)' != 'arm64'" Text="Error: Invalid platform for $(TargetOS): $(Platform)." />
<Error Condition="('$(TargetsiOS)' == 'true' or '$(TargetstvOS)' == 'true') and !$([MSBuild]::IsOSPlatform('OSX'))" Text="Error: $(TargetOS) can only be built on macOS." />
<Error Condition="'$(TargetsAndroid)' == 'true' and '$(Platform)' != 'x64' and '$(Platform)' != 'x86' and '$(Platform)' != 'arm64' and '$(Platform)' != 'arm'" Text="Error: Invalid platform for $(TargetOS): $(Platform)." />
<Error Condition="'$(TargetsBrowser)' == 'true' and '$(EMSDK_PATH)' == '' and '$(SkipMonoCrossJitConfigure)' != 'true'" Text="The EMSDK_PATH environment variable should be set pointing to the emscripten SDK root dir."/>
<Error Condition="'$(TargetsAndroid)' == 'true' and '$(ANDROID_NDK_ROOT)' == '' and '$(SkipMonoCrossJitConfigure)' != 'true'" Text="Error: You need to set the ANDROID_NDK_ROOT environment variable pointing to the Android NDK root." />
<Error Condition="'$(HostOS)' == 'windows' and ('$(TargetsiOS)' == 'true' or '$(TargetstvOS)' == 'true')" Text="Error: Mono runtime for $(TargetOS) can't be built on Windows." />
<!-- check if Ninja is available and default to it on Unix platforms -->
<Exec Condition="'$(HostOS)' != 'windows' and '$(Ninja)' == ''" Command="which ninja" IgnoreExitCode="true" IgnoreStandardErrorWarningFormat="true" StandardOutputImportance="Low" >
<Output TaskParameter="ExitCode" PropertyName="_MonoFindNinjaExitCode"/>
</Exec>
<PropertyGroup>
<_MonoUseNinja Condition="'$(Ninja)' == 'true' or '$(_MonoFindNinjaExitCode)' == '0' or ('$(HostOS)' == 'windows' and '$(Ninja)' == '')">true</_MonoUseNinja>
</PropertyGroup>
<Exec Condition="'$(TargetArchitecture)' == 'wasm' and '$(HostOS)' == 'windows'" Command="call "$(RepositoryEngineeringDir)native\init-vs-env.cmd" && cmake --version" IgnoreExitCode="true" IgnoreStandardErrorWarningFormat="true" StandardOutputImportance="Low" >
<Output TaskParameter="ExitCode" PropertyName="_MonoFindCmakeExitCode"/>
</Exec>
<Error Condition="'$(TargetArchitecture)' == 'wasm' and '$(HostOS)' == 'windows' and '$(_MonoFindCmakeExitCode)' != '0' and '$(BuildMonoAOTCrossCompilerOnly)' != 'true'" Text="cmake tool is required to build wasm on windows" />
<Exec Condition="'$(TargetArchitecture)' == 'wasm' and '$(HostOS)' == 'windows'" Command="call "$(RepositoryEngineeringDir)native\init-vs-env.cmd" && ninja --version" IgnoreExitCode="true" IgnoreStandardErrorWarningFormat="true" StandardOutputImportance="Low" >
<Output TaskParameter="ExitCode" PropertyName="_MonoFindNinjaExitCode"/>
</Exec>
<Error Condition="'$(TargetArchitecture)' == 'wasm' and '$(HostOS)' == 'windows' and '$(_MonoFindNinjaExitCode)' != '0' and '$(BuildMonoAOTCrossCompilerOnly)' != 'true'" Text="ninja tool is required to build wasm on windows" />
</Target>
<Target Name="GetXcodeDir" Condition="$([MSBuild]::IsOSPlatform('OSX')) and '$(XcodeDir)' == ''">
<Exec Command="xcode-select -p" IgnoreExitCode="true" IgnoreStandardErrorWarningFormat="true" StandardOutputImportance="Low" ConsoleToMsBuild="true">
<Output TaskParameter="ExitCode" PropertyName="_MonoGetXcodeExitCode"/>
<Output TaskParameter="ConsoleOutput" PropertyName="_MonoGetXcodeConsoleOutput"/>
</Exec>
<PropertyGroup>
<XcodeDir Condition="'$(_MonoGetXcodeExitCode)' == '0'">$(_MonoGetXcodeConsoleOutput)</XcodeDir>
<XcodeDir Condition="'$(XcodeDir)' == ''">/Applications/Xcode.app/Contents/Developer</XcodeDir>
</PropertyGroup>
</Target>
<!-- Sets up emscripten if you don't have the EMSDK_PATH env variable set -->
<Target Name="ProvisionEmscripten"
Condition="'$(ShouldProvisionEmscripten)' == 'true' and '$(SkipMonoCrossJitConfigure)' != 'true'">
<ReadLinesFromFile File="$(MSBuildThisFileDirectory)\wasm\emscripten-version.txt">
<Output TaskParameter="Lines" ItemName="_VersionLines" />
</ReadLinesFromFile>
<PropertyGroup>
<EmsdkExt Condition="'$(HostOS)' != 'windows'">.sh</EmsdkExt>
<EmsdkExt Condition="'$(HostOS)' == 'windows'">.ps1</EmsdkExt>
<EMSDK_PATH>$(ProvisionEmscriptenDir)</EMSDK_PATH>
<WasmLocalPath>$([MSBuild]::NormalizeDirectory('$(MSBuildThisFileDirectory)', 'wasm'))</WasmLocalPath>
<EmsdkLocalPath>emsdk</EmsdkLocalPath>
<EmscriptenVersion>%(_VersionLines.Identity)</EmscriptenVersion>
<InstallCmd>./emsdk$(EmsdkExt) install $(EmscriptenVersion)</InstallCmd>
<ActivateCmd>./emsdk$(EmsdkExt) activate $(EmscriptenVersion)</ActivateCmd>
<InstallCmd Condition="'$(HostOS)' == 'windows'">powershell -NonInteractive -command "& $(InstallCmd); Exit $LastExitCode "</InstallCmd>
<ActivateCmd Condition="'$(HostOS)' == 'windows'">powershell -NonInteractive -command "& $(ActivateCmd); Exit $LastExitCode "</ActivateCmd>
</PropertyGroup>
<RemoveDir Directories="$(EMSDK_PATH)" />
<Exec Command="git clone https://github.com/emscripten-core/emsdk.git emsdk"
WorkingDirectory="$(WasmLocalPath)"
IgnoreStandardErrorWarningFormat="true" />
<Exec Command="git checkout $(EmscriptenVersion) && $(InstallCmd) && $(ActivateCmd)"
WorkingDirectory="$(EMSDK_PATH)"
IgnoreStandardErrorWarningFormat="true" />
</Target>
<!-- Copy Mono runtime bits to $(Destination) -->
<Target Name="CopyMonoRuntimeFilesFromArtifactsToDestination">
<ItemGroup>
<_MonoRuntimeArtifacts Include="$(RuntimeBinDir)\*.*" />
</ItemGroup>
<Error Condition="'$(Destination)' == ''" Text="Destination should not be empty" />
<Error Condition="@(_MonoRuntimeArtifacts->Count()) < 2" Text="Mono artifacts were not found at $(RuntimeBinDir)" />
<Message Text="Copying Mono Runtime artifacts from '$(RuntimeBinDir)' to '$(Destination)'.'" Importance="High" />
<Copy SourceFiles="@(_MonoRuntimeArtifacts)"
DestinationFolder="$(Destination)"
OverwriteReadOnlyFiles="true"
SkipUnchangedFiles="true" />
</Target>
<!-- Copy Mono runtime bits to the coreclr's Core_Root in order to run runtime tests -->
<Target Name="PatchCoreClrCoreRoot">
<MSBuild Projects="$(MSBuildProjectFullPath)"
Properties="Destination=$(CoreClrTestCoreRoot)"
Targets="CopyMonoRuntimeFilesFromArtifactsToDestination" />
</Target>
<!-- Copy Coreclr runtime bits back to Core_Root -->
<Target Name="RestoreCoreClrCoreRoot">
<Copy SourceFiles="$(CoreCLRArtifactsPath)\System.Private.CoreLib.dll"
DestinationFiles="$(CoreClrTestCoreRoot)\System.Private.CoreLib.dll" />
<Copy SourceFiles="$(CoreCLRArtifactsPath)\$(CoreClrFileName)"
DestinationFiles="$(CoreClrTestCoreRoot)\$(CoreClrFileName)" />
</Target>
<!-- Run CoreCLR runtime test using testhost -->
<Target Name="RunCoreClrTest" DependsOnTargets="PatchCoreClrCoreRoot">
<Error Condition="$(CoreClrTest) == ''" Text="'CoreClrTest' is not set. E.g. set it to `$(ArtifactsDir)tests/coreclr/$(TargetOS).$(Platform).$(CoreClrTestConfig)/JIT/opt/InstructionCombining/DivToMul/DivToMul$(ScriptExt)` in order to run DivToMul test." />
<Exec Command="$(CoreClrTest) -coreroot="$(CoreClrTestCoreRoot)""/>
</Target>
<!-- Run coreclr tests using runtest.py -->
<Target Name="RunCoreClrTests" DependsOnTargets="PatchCoreClrCoreRoot">
<Exec Condition="'$(HostOS)' == 'windows'" Command="$(MonoProjectRoot)..\tests\run.cmd $(CoreClrTestConfig)" ContinueOnError="ErrorAndContinue" />
<Exec Condition="'$(HostOS)' != 'windows'" Command="$(MonoProjectRoot)../tests/run.sh $(CoreClrTestConfig)" ContinueOnError="ErrorAndContinue" />
</Target>
<!-- Mono runtime build -->
<Target Name="BuildMonoRuntime">
<ItemGroup>
<_MonoCMakeArgs Condition="'$(_MonoUseNinja)' == 'true'" Include="-G Ninja"/>
<_MonoCMakeArgs Include="-DCMAKE_INSTALL_PREFIX="$(MonoObjDir)out""/>
<_MonoCMakeArgs Include="-DCMAKE_INSTALL_LIBDIR=lib"/>
<_MonoCMakeArgs Include="-DCMAKE_BUILD_TYPE=$(Configuration)"/>
<_MonoCMakeArgs Condition="'$(CMakeArgs)' != ''" Include="$(CMakeArgs)"/>
<_MonoCMakeArgs Condition="'$(MonoEnableLLVM)' == 'true'" Include="-DLLVM_PREFIX=$(MonoLLVMDir.TrimEnd('\/'))" />
<_MonoCMakeArgs Condition="'$(BuildDarwinFrameworks)' == 'true'" Include="-DBUILD_DARWIN_FRAMEWORKS=1" />
<_MonoCMakeArgs Include="-DGC_SUSPEND=$(MonoThreadSuspend)" />
<_MonoCMakeArgs Include="-DMONO_LIB_NAME=$(MonoLibName)" />
<_MonoCMakeArgs Include="-DMONO_SHARED_LIB_NAME=$(MonoSharedLibName)" />
</ItemGroup>
<!-- We build LLVM bits for x64 Linux without C++11 ABI (CentOS 7 has libstdc++ < 5.1) -->
<ItemGroup Condition="'$(TargetsLinux)' == 'true' and '$(MonoEnableLLVM)' == 'true' and '$(MonoLLVMUseCxx11Abi)' != 'true'">
<_MonoCXXFLAGS Include="-D_GLIBCXX_USE_CXX11_ABI=0" />
</ItemGroup>
<ItemGroup Condition="'$(TargetsLinux)' == 'true' and '$(MonoAOTEnableLLVM)' == 'true' and '$(MonoAOTLLVMUseCxx11Abi)' != 'true'">
<_MonoAOTCXXFLAGS Include="-D_GLIBCXX_USE_CXX11_ABI=0" />
</ItemGroup>
<!-- We build LLVM bits for ARM64 Linux with C++11 ABI (Ubuntu 16.04 has libstdc++ > 5.1)-->
<ItemGroup Condition="'$(TargetsLinux)' == 'true' and '$(MonoEnableLLVM)' == 'true' and '$(MonoLLVMUseCxx11Abi)' == 'true'">
<_MonoCXXFLAGS Include="-D_GLIBCXX_USE_CXX11_ABI=1" />
</ItemGroup>
<ItemGroup Condition="'$(TargetsLinux)' == 'true' and '$(MonoAOTEnableLLVM)' == 'true' and '$(MonoAOTLLVMUseCxx11Abi)' == 'true'">
<_MonoAOTCXXFLAGS Include="-D_GLIBCXX_USE_CXX11_ABI=1" />
</ItemGroup>
<!-- ARM Linux cross build options on CI -->
<ItemGroup Condition="'$(TargetsAndroid)' != 'true' and '$(MonoCrossDir)' != '' and ('$(TargetArchitecture)' == 'arm' or '$(TargetArchitecture)' == 'armv6' or '$(TargetArchitecture)' == 'arm64')">
<_MonoCMakeArgs Include="-DCMAKE_TOOLCHAIN_FILE=$(CrossToolchainFile)" />
<_MonoCMakeArgs Condition="'$(TargetOS)' == 'Linux' and ('$(TargetArchitecture)' == 'arm' or '$(TargetArchitecture)' == 'armv6')" Include="-DMONO_ARM_FPU=vfp-hard" />
<_MonoBuildEnv Condition="'$(Platform)' == 'arm64'" Include="TARGET_BUILD_ARCH=arm64" />
<_MonoBuildEnv Condition="'$(Platform)' == 'arm'" Include="TARGET_BUILD_ARCH=arm" />
<_MonoBuildEnv Condition="'$(Platform)' == 'armv6'" Include="TARGET_BUILD_ARCH=armv6" />
<_MonoBuildEnv Condition="'$(Platform)' == 'arm64'" Include="PKG_CONFIG_PATH=$(MonoCrossDir)/usr/lib/aarch64-linux-gnu/pkgconfig" />
<_MonoBuildEnv Condition="'$(Platform)' == 'arm'" Include="PKG_CONFIG_PATH=$(MonoCrossDir)/usr/lib/arm-linux-gnueabihf/pkgconfig" />
<_MonoBuildEnv Condition="'$(Platform)' == 'armv6'" Include="PKG_CONFIG_PATH=$(MonoCrossDir)/usr/lib/arm-linux-gnueabihf/pkgconfig" />
<_MonoCFLAGS Condition="'$(TargetArchitecture)' == 'armv6'" Include="-march=armv6zk" />
<_MonoCFLAGS Condition="'$(TargetArchitecture)' == 'armv6'" Include="-mcpu=arm1176jzf-s" />
<_MonoCFLAGS Condition="'$(TargetArchitecture)' == 'armv6'" Include="-mfpu=vfp" />
<_MonoCFLAGS Condition="'$(TargetArchitecture)' == 'armv6'" Include="-mfloat-abi=hard" />
<_MonoCXXFLAGS Condition="'$(TargetArchitecture)' == 'armv6'" Include="-march=armv6zk" />
<_MonoCXXFLAGS Condition="'$(TargetArchitecture)' == 'armv6'" Include="-mcpu=arm1176jzf-s" />
<_MonoCXXFLAGS Condition="'$(TargetArchitecture)' == 'armv6'" Include="-mfpu=vfp" />
<_MonoCXXFLAGS Condition="'$(TargetArchitecture)' == 'armv6'" Include="-mfloat-abi=hard" />
</ItemGroup>
<!-- x64 illumos cross build options -->
<ItemGroup Condition="'$(Targetsillumos)' == 'true' and '$(MonoCrossDir)' != ''">
<_MonoCMakeArgs Include="-DCMAKE_TOOLCHAIN_FILE=$(CrossToolchainFile)" />
<_MonoBuildEnv Include="TARGET_BUILD_ARCH=x64" />
<_MonoBuildEnv Include="PKG_CONFIG_PATH=$(MonoCrossDir)/lib/pkgconfig" />
</ItemGroup>
<!-- s390x Linux cross build options -->
<ItemGroup Condition="'$(MonoCrossDir)' != '' and '$(TargetArchitecture)' == 's390x'">
<_MonoCMakeArgs Include="-DCMAKE_TOOLCHAIN_FILE=$(CrossToolchainFile)" />
<_MonoBuildEnv Include="TARGET_BUILD_ARCH=s390x" />
<_MonoBuildEnv Include="PKG_CONFIG_PATH=$(MonoCrossDir)/usr/lib/s390x-linux-gnu/pkgconfig" />
</ItemGroup>
<!-- x64 FreeBSD cross build options -->
<ItemGroup Condition="'$(TargetsFreeBSD)' == 'true' and '$(MonoCrossDir)' != ''">
<_MonoCMakeArgs Include="-DCMAKE_TOOLCHAIN_FILE=$(CrossToolchainFile)" />
<_MonoBuildEnv Include="TARGET_BUILD_ARCH=x64" />
</ItemGroup>
<!-- Windows specific options -->
<ItemGroup Condition="'$(TargetsWindows)' == 'true'">
<_MonoCPPFLAGS Include="-DWIN32" />
<_MonoCPPFLAGS Include="-DWIN32_LEAN_AND_MEAN" />
<!--<_MonoCPPFLAGS Include="-D_WINDOWS" />--> <!-- set in monow.vcxproj, not sure we really need it -->
<_MonoCPPFLAGS Condition="'$(Platform)' == 'x64' or '$(Platform)' == 'arm64'" Include="-DWIN64" />
<_MonoCPPFLAGS Condition="'$(Configuration)' == 'Release'" Include="-DNDEBUG" />
<_MonoCPPFLAGS Condition="'$(Configuration)' == 'Debug'" Include="-D_DEBUG" />
<!-- <_MonoCPPFLAGS Include="-D__default_codegen__" /> --> <!-- doesn't seem to be used -->
<_MonoCPPFLAGS Include="-D_CRT_SECURE_NO_WARNINGS" />
<_MonoCPPFLAGS Include="-D_CRT_NONSTDC_NO_DEPRECATE" />
<!--<_MonoCPPFLAGS Include="-DGC_NOT_DLL" />--> <!-- only used for Boehm -->
<_MonoCPPFLAGS Include="-DWIN32_THREADS" />
<_MonoCPPFLAGS Include="-DWINVER=0x0601" />
<_MonoCPPFLAGS Include="-D_WIN32_WINNT=0x0601" />
<_MonoCPPFLAGS Include="-D_WIN32_IE=0x0501" />
<_MonoCPPFLAGS Include="-D_UNICODE" />
<_MonoCPPFLAGS Include="-DUNICODE" />
<_MonoCPPFLAGS Include="-DFD_SETSIZE=1024" />
<_MonoCPPFLAGS Include="-DNVALGRIND" />
<!-- Select generator platform for VS generator -->
<_MonoCMakeArgs Condition="'$(_MonoUseNinja)' != 'true' and '$(Platform)' == 'x64'" Include="-A x64" />
<_MonoCMakeArgs Condition="'$(_MonoUseNinja)' != 'true' and '$(Platform)' == 'x86'" Include="-A Win32" />
<_MonoCMakeArgs Condition="'$(_MonoUseNinja)' != 'true' and '$(Platform)' == 'arm'" Include="-A ARM" />
<_MonoCMakeArgs Condition="'$(_MonoUseNinja)' != 'true' and '$(Platform)' == 'arm64'" Include="-A ARM64" />
</ItemGroup>
<!-- OSX specific options -->
<ItemGroup Condition="'$(TargetsOSX)' == 'true'">
<_MonoCMakeArgs Include="-DCMAKE_OSX_DEPLOYMENT_TARGET=$(macOSVersionMin)" />
<_MonoCMakeArgs Include="-DENABLE_ICALL_EXPORT=1"/>
<_MonoCFLAGS Condition="'$(TargetArchitecture)' == 'arm64'" Include="-arch arm64" />
<_MonoCXXFLAGS Condition="'$(TargetArchitecture)' == 'arm64'" Include="-arch arm64" />
<!-- Force running as arm64 even when invoked from an x86 msbuild process -->
<_MonoBuildEnv Condition="'$(BuildArchitecture)' == 'arm64'" Include="arch -arch arm64" />
</ItemGroup>
<!-- Mac Catalyst specific options -->
<ItemGroup Condition="'$(TargetsMacCatalyst)' == 'true'">
<_MonoCMakeArgs Include="-DCMAKE_SYSTEM_VARIANT=MacCatalyst" />
<!-- https://gitlab.kitware.com/cmake/cmake/-/issues/20132 -->
<_MonoCPPFLAGS Include="-Wno-overriding-t-option" />
<_MonoCFlags Condition="'$(TargetArchitecture)' == 'arm64'" Include="-target arm64-apple-ios14.2-macabi" />
<_MonoCFlags Condition="'$(TargetArchitecture)' == 'x64'" Include="-target x86_64-apple-ios13.5-macabi" />
<_MonoCFLAGS Condition="'$(TargetArchitecture)' == 'arm64'" Include="-arch arm64" />
<_MonoCXXFlags Condition="'$(TargetArchitecture)' == 'arm64'" Include="-target arm64-apple-ios14.2-macabi" />
<_MonoCXXFlags Condition="'$(TargetArchitecture)' == 'x64'" Include="-target x86_64-apple-ios13.5-macabi" />
<_MonoCXXFLAGS Condition="'$(TargetArchitecture)' == 'arm64'" Include="-arch arm64" />
<!-- Force running as arm64 even when invoked from an x86 msbuild process -->
<_MonoBuildEnv Condition="'$(BuildArchitecture)' == 'arm64'" Include="arch -arch arm64" />
</ItemGroup>
<!-- WASM specific options -->
<PropertyGroup Condition="'$(TargetsBrowser)' == 'true'">
<_MonoMinimal Condition="'$(Configuration)' == 'Release'">,debugger_agent,log_dest</_MonoMinimal>
<_MonoMinimal Condition="'$(Configuration)' == 'Release' and '$(MonoEnableAssertMessages)' != 'true'">$(_MonoMinimal),assert_messages</_MonoMinimal>
</PropertyGroup>
<ItemGroup Condition="'$(TargetsBrowser)' == 'true'">
<_MonoCMakeArgs Include="-DENABLE_MINIMAL=jit,sgen_major_marksweep_conc,sgen_split_nursery,sgen_gc_bridge,sgen_toggleref,sgen_debug_helpers,sgen_binary_protocol,logging,shared_perfcounters,interpreter,threads,qcalls$(_MonoMinimal)"/>
<_MonoCMakeArgs Include="-DENABLE_INTERP_LIB=1"/>
<_MonoCMakeArgs Include="-DDISABLE_ICALL_TABLES=1"/>
<_MonoCMakeArgs Include="-DENABLE_ICALL_EXPORT=1"/>
<_MonoCMakeArgs Include="-DENABLE_LAZY_GC_THREAD_CREATION=1"/>
<_MonoCMakeArgs Include="-DENABLE_LLVM_RUNTIME=1"/>
<_MonoCFLAGS Include="-fexceptions"/>
<_MonoCXXFLAGS Include="-fexceptions"/>
<_MonoCFLAGS Include="$(EscapedQuoteW)-I$([MSBuild]::NormalizePath('$(PkgMicrosoft_NETCore_Runtime_ICU_Transport)', 'runtimes', 'browser-wasm', 'native', 'include'))$(EscapedQuoteW)"/>
</ItemGroup>
<!-- iOS/tvOS specific options -->
<PropertyGroup Condition="'$(TargetsiOS)' == 'true' or '$(TargetstvOS)' == 'true'">
<_MonoCCOption>CC="$(XcodeDir)/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang"</_MonoCCOption>
<_MonoCXXOption>CXX="$(XcodeDir)/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang++"</_MonoCXXOption>
<_MonoRunInitCompiler>false</_MonoRunInitCompiler>
<_MonoCMakeSysroot Condition="'$(TargetsiOS)' == 'true' and '$(TargetsiOSSimulator)' != 'true'">$(XcodeDir)/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS$(iOSVersion).sdk</_MonoCMakeSysroot>
<_MonoCMakeSysroot Condition="'$(TargetsiOS)' == 'true' and '$(TargetsiOSSimulator)' == 'true'">$(XcodeDir)/Platforms/iPhoneSimulator.platform/Developer/SDKs/iPhoneSimulator$(iOSVersion).sdk</_MonoCMakeSysroot>
<_MonoCMakeSysroot Condition="'$(TargetstvOS)' == 'true' and '$(TargetstvOSSimulator)' != 'true'">$(XcodeDir)/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS$(tvOSVersion).sdk</_MonoCMakeSysroot>
<_MonoCMakeSysroot Condition="'$(TargetstvOS)' == 'true' and '$(TargetstvOSSimulator)' == 'true'">$(XcodeDir)/Platforms/AppleTVSimulator.platform/Developer/SDKs/AppleTVSimulator$(tvOSVersion).sdk</_MonoCMakeSysroot>
<_MonoCMakeSystemName Condition="'$(TargetsiOS)' == 'true'">iOS</_MonoCMakeSystemName>
<_MonoCMakeSystemName Condition="'$(TargetstvOS)' == 'true'">tvOS</_MonoCMakeSystemName>
<_MonoCMakeVersionMin Condition="'$(TargetsiOS)' == 'true'">$(iOSVersionMin)</_MonoCMakeVersionMin>
<_MonoCMakeVersionMin Condition="'$(TargetstvOS)' == 'true'">$(tvOSVersionMin)</_MonoCMakeVersionMin>
</PropertyGroup>
<PropertyGroup Condition="'$(TargetsiOS)' == 'true' or '$(TargetstvOS)' == 'true' or '$(TargetsMacCatalyst)' == 'true'">
<_IcuLibdir>$(PkgMicrosoft_NETCore_Runtime_ICU_Transport)/runtimes/$(TargetOS)-$(TargetArchitecture)/native/lib</_IcuLibdir>
</PropertyGroup>
<ItemGroup Condition="('$(TargetsOSX)' == 'true' or '$(TargetsMacCatalyst)' == 'true') and '$(Platform)' == 'arm64'">
<_MonoCMakeArgs Include="-DCMAKE_OSX_ARCHITECTURES=arm64"/>
</ItemGroup>
<ItemGroup Condition="'$(TargetsiOS)' == 'true' or '$(TargetstvOS)' == 'true'">
<_MonoCMakeArgs Include="-DCMAKE_SYSTEM_NAME=$(_MonoCMakeSystemName)"/>
<_MonoCMakeArgs Include="-DCMAKE_OSX_DEPLOYMENT_TARGET=$(_MonoCMakeVersionMin)" />
<_MonoCMakeArgs Include="-DCMAKE_OSX_SYSROOT='$(_MonoCMakeSysroot)'" />
<_MonoCMakeArgs Condition="'$(Platform)' == 'x64'" Include="-DCMAKE_OSX_ARCHITECTURES=x86_64"/>
<_MonoCMakeArgs Condition="'$(Platform)' == 'x86'" Include="-DCMAKE_OSX_ARCHITECTURES=i386"/>
<_MonoCMakeArgs Condition="'$(Platform)' == 'arm64'" Include="-DCMAKE_OSX_ARCHITECTURES=arm64"/>
<_MonoCMakeArgs Condition="'$(Platform)' == 'arm'" Include=""-DCMAKE_OSX_ARCHITECTURES=armv7%3Barmv7s""/>
<_MonoCFLAGS Include="-Wl,-application_extension" />
<_MonoCXXFLAGS Include="-Wl,-application_extension" />
</ItemGroup>
<ItemGroup Condition="'$(TargetsiOS)' == 'true' or '$(TargetstvOS)' == 'true' or '$(TargetsMacCatalyst)' == 'true'">
<_MonoCMakeArgs Include="-DICU_LIBDIR=$(_IcuLibdir)"/>
<_MonoCMakeArgs Include="-DENABLE_ICALL_EXPORT=1"/>
<_MonoCFLAGS Include="-I$(PkgMicrosoft_NETCore_Runtime_ICU_Transport)/runtimes/$(TargetOS)-$(TargetArchitecture)/native/include" />
</ItemGroup>
<!-- iOS/tvOS simulator specific options -->
<ItemGroup Condition="('$(TargetsiOS)' == 'true' and '$(TargetsiOSSimulator)' == 'true') or ('$(TargetstvOS)' == 'true' and '$(TargetstvOSSimulator)' == 'true')">
<_MonoCMakeArgs Include="-DENABLE_MINIMAL=shared_perfcounters"/>
</ItemGroup>
<!-- iOS/tvOS device specific options -->
<ItemGroup Condition="('$(TargetsiOS)' == 'true' and '$(TargetsiOSSimulator)' != 'true') or ('$(TargetstvOS)' == 'true' and '$(TargetstvOSSimulator)' != 'true')">
<_MonoCMakeArgs Include="-DENABLE_MINIMAL=jit,logging,shared_perfcounters" />
<_MonoCMakeArgs Include="-DENABLE_VISIBILITY_HIDDEN=1"/>
<_MonoCMakeArgs Include="-DENABLE_LAZY_GC_THREAD_CREATION=1"/>
<_MonoCMakeArgs Include="-DENABLE_SIGALTSTACK=0"/>
<_MonoCMakeArgs Include="-DENABLE_ICALL_EXPORT=1"/>
<_MonoCFLAGS Include="-Werror=partial-availability" />
<_MonoCFLAGS Condition="'$(TargetstvOS)' == 'true'" Include="-fno-gnu-inline-asm" />
<_MonoCFLAGS Include="-fexceptions" />
<_MonoCPPFLAGS Include="-DSMALL_CONFIG" />
<_MonoCPPFLAGS Include="-D_XOPEN_SOURCE" />
<_MonoCPPFLAGS Include="-DHAVE_LARGE_FILE_SUPPORT=1" />
<_MonoCXXFLAGS Include="-Werror=partial-availability" />
<_MonoCXXFLAGS Condition="'$(TargetstvOS)' == 'true'" Include="-fno-gnu-inline-asm" />
<_MonoCXXFLAGS Include="-fexceptions" />
</ItemGroup>
<!-- Android specific options -->
<PropertyGroup Condition="'$(TargetsAndroid)' == 'true'">
<_MonoRunInitCompiler>false</_MonoRunInitCompiler>
</PropertyGroup>
<ItemGroup Condition="'$(TargetsAndroid)' == 'true'">
<_MonoCMakeArgs Include="-DCMAKE_TOOLCHAIN_FILE=$(ANDROID_NDK_ROOT)/build/cmake/android.toolchain.cmake"/>
<_MonoCMakeArgs Include="-DANDROID_NDK=$(ANDROID_NDK_ROOT)"/>
<_MonoCMakeArgs Include="-DANDROID_STL=none"/>
<_MonoCMakeArgs Include="-DANDROID_CPP_FEATURES="no-rtti no-exceptions""/>
<_MonoCMakeArgs Include="-DANDROID_NATIVE_API_LEVEL=$(AndroidApiVersion)"/>
<_MonoCMakeArgs Include="-DANDROID_PLATFORM=android-$(AndroidApiVersion)"/>
<_MonoCMakeArgs Condition="'$(Platform)' == 'arm64'" Include="-DANDROID_ABI=arm64-v8a" />
<_MonoCMakeArgs Condition="'$(Platform)' == 'arm'" Include="-DANDROID_ABI=armeabi-v7a" />
<_MonoCMakeArgs Condition="'$(Platform)' == 'x86'" Include="-DANDROID_ABI=x86" />
<_MonoCMakeArgs Condition="'$(Platform)' == 'x64'" Include="-DANDROID_ABI=x86_64" />
<_MonoCMakeArgs Include="-DENABLE_MINIMAL=ssa,logging" />
<_MonoCMakeArgs Include="-DENABLE_SIGALTSTACK=1"/>
<_MonoCFLAGS Condition="'$(Platform)' == 'arm'" Include="-march=armv7-a" />
<_MonoCFLAGS Condition="'$(Platform)' == 'arm'" Include="-mtune=cortex-a8" />
<_MonoCFLAGS Condition="'$(Platform)' == 'arm'" Include="-mfpu=vfp" />
<_MonoCFLAGS Condition="'$(Platform)' == 'arm'" Include="-mfloat-abi=softfp" />
<_MonoCFLAGS Condition="'$(Platform)' == 'arm64' or '$(Platform)' == 'arm'" Include="-fpic" />
<_MonoCFLAGS Include="-fstack-protector" />
<_MonoCFLAGS Condition="'$(Platform)' == 'arm64'" Include="-DANDROID64" />
<_MonoCFLAGS Condition="'$(Platform)' == 'arm64' or '$(Platform)' == 'x64'" Include="-DL_cuserid=9" />
<_MonoCFLAGS Condition="'$(Platform)' == 'arm64' or '$(Platform)' == 'arm'" Include="-D__POSIX_VISIBLE=201002" />
<_MonoCFLAGS Condition="'$(Platform)' == 'arm64' or '$(Platform)' == 'arm'" Include="-DSK_RELEASE" />
<_MonoCFLAGS Condition="'$(Platform)' == 'arm64' or '$(Platform)' == 'arm'" Include="-DNDEBUG" />
<_MonoCFLAGS Condition="'$(Platform)' == 'arm64' or '$(Platform)' == 'arm'" Include="-UDEBUG" />
<_MonoCXXFLAGS Condition="'$(Platform)' == 'arm'" Include="-march=armv7-a" />
<_MonoCXXFLAGS Condition="'$(Platform)' == 'arm'" Include="-mtune=cortex-a8" />
<_MonoCXXFLAGS Condition="'$(Platform)' == 'arm'" Include="-mfpu=vfp" />
<_MonoCXXFLAGS Condition="'$(Platform)' == 'arm'" Include="-mfloat-abi=softfp" />
<_MonoCXXFLAGS Condition="'$(Platform)' == 'arm64' or '$(Platform)' == 'arm'" Include="-fpic" />
<_MonoCXXFLAGS Include="-fstack-protector" />
<_MonoCXXFLAGS Condition="'$(Platform)' == 'arm64'" Include="-DANDROID64" />
<_MonoCXXFLAGS Condition="'$(Platform)' == 'arm64' or '$(Platform)' == 'x64'" Include="-DL_cuserid=9" />
<_MonoCXXFLAGS Condition="'$(Platform)' == 'arm64' or '$(Platform)' == 'arm'" Include="-D__POSIX_VISIBLE=201002" />
<_MonoCXXFLAGS Condition="'$(Platform)' == 'arm64' or '$(Platform)' == 'arm'" Include="-DSK_RELEASE" />
<_MonoCXXFLAGS Condition="'$(Platform)' == 'arm64' or '$(Platform)' == 'arm'" Include="-DNDEBUG" />
<_MonoCXXFLAGS Condition="'$(Platform)' == 'arm64' or '$(Platform)' == 'arm'" Include="-UDEBUG" />
</ItemGroup>
<!-- Linux options -->
<ItemGroup Condition="'$(TargetsLinux)' == true">
<_MonoCFLAGS Include="-Wl,--build-id=sha1" />
<_MonoCXXFLAGS Include="-Wl,--build-id=sha1" />
</ItemGroup>
<ItemGroup Condition="'$(RealTargetOS)' == 'Linux'">
<_MonoAOTCFLAGS Include="-Wl,--build-id=sha1" />
<_MonoAOTCXXFLAGS Include="-Wl,--build-id=sha1" />
</ItemGroup>
<!-- Devloop features -->
<ItemGroup Condition="'$(MonoMsCorDbi)' == 'true'">
<_MonoCMakeArgs Include="-DENABLE_MSCORDBI=1" />
</ItemGroup>
<ItemGroup Condition="'$(TargetsiOS)' == 'true' or '$(TargetstvOS)' == 'true'">
<_MonoCMakeArgs Include="-DFEATURE_PERFTRACING_PAL_TCP=1"/>
<_MonoCMakeArgs Include="-DFEATURE_PERFTRACING_DISABLE_DEFAULT_LISTEN_PORT=1"/>
<_MonoCMakeArgs Include="-DDISABLE_LINK_STATIC_COMPONENTS=1" Condition="!('$(TargetsiOSSimulator)' == 'true' or '$(TargetstvOSSimulator)' == 'true')"/>
</ItemGroup>
<ItemGroup Condition="'$(TargetsAndroid)' == 'true'">
<_MonoCMakeArgs Include="-DFEATURE_PERFTRACING_PAL_TCP=1"/>
<_MonoCMakeArgs Include="-DFEATURE_PERFTRACING_DISABLE_DEFAULT_LISTEN_PORT=1"/>
</ItemGroup>
<!-- Components -->
<ItemGroup Condition="'$(MonoComponentsStatic)' == 'true'">
<_MonoCMakeArgs Include="-DSTATIC_COMPONENTS=1" />
</ItemGroup>
<ItemGroup>
<_MonoCMakeArgs Include="-DMONO_COMPONENTS_RID=$(TargetOS)-$(TargetArchitecture)" />
</ItemGroup>
<PropertyGroup>
<_MonoCFLAGSOption>-DCMAKE_C_FLAGS="@(_MonoCPPFLAGS, ' ') @(_MonoCFLAGS, ' ')"</_MonoCFLAGSOption>
<_MonoCXXFLAGSOption>-DCMAKE_CXX_FLAGS="@(_MonoCPPFLAGS, ' ') @(_MonoCXXFLAGS, ' ')"</_MonoCXXFLAGSOption>
</PropertyGroup>
<ItemGroup>
<_MonoCMakeArgs Include="$(_MonoCFLAGSOption)"/>
<_MonoCMakeArgs Include="$(_MonoCXXFLAGSOption)"/>
</ItemGroup>
<PropertyGroup>
<EMSDK_PATH>$([MSBuild]::EnsureTrailingSlash('$(EMSDK_PATH)'))</EMSDK_PATH>
<_MonoCMakeConfigureCommand>cmake @(_MonoCMakeArgs, ' ') $(MonoCMakeExtraArgs) "$(MonoProjectRoot.TrimEnd('\/'))"</_MonoCMakeConfigureCommand>
<_MonoCMakeConfigureCommand Condition="'$(TargetsBrowser)' != 'true' and '$(_MonoRunInitCompiler)' != 'false' and '$(HostOS)' != 'windows'">bash -c 'source $(RepositoryEngineeringCommonDir)native/init-compiler.sh "$(RepositoryEngineeringCommonDir)native" "$(_CompilerTargetArch)" "$(MonoCCompiler)" && @(_MonoBuildEnv, ' ') $(_MonoCMakeConfigureCommand)'</_MonoCMakeConfigureCommand>
<_MonoCMakeConfigureCommand Condition="'$(TargetsBrowser)' != 'true' and '$(_MonoRunInitCompiler)' != 'false' and '$(HostOS)' == 'windows'">call "$(RepositoryEngineeringDir)native\init-vs-env.cmd" $(_CompilerTargetArch) && cd /D "$(MonoObjDir)" && @(_MonoBuildEnv, ' ') $(_MonoCMakeConfigureCommand)</_MonoCMakeConfigureCommand>
<_MonoCMakeConfigureCommand Condition="'$(TargetsBrowser)' != 'true' and '$(_MonoRunInitCompiler)' == 'false'">$(_MonoCCOption) $(_MonoCXXOption) @(_MonoBuildEnv, ' ') $(_MonoCMakeConfigureCommand)</_MonoCMakeConfigureCommand>
<_MonoCMakeConfigureCommand Condition="'$(TargetsBrowser)' == 'true' and '$(HostOS)' != 'windows'">bash -c 'source $(EMSDK_PATH)/emsdk_env.sh 2>&1 && emcmake $(_MonoCMakeConfigureCommand)'</_MonoCMakeConfigureCommand>
<_MonoCMakeConfigureCommand Condition="'$(TargetsBrowser)' == 'true' and '$(HostOS)' == 'windows'">call "$(RepositoryEngineeringDir)native\init-vs-env.cmd" && call "$([MSBuild]::NormalizePath('$(EMSDK_PATH)', 'emsdk_env.bat'))" && emcmake $(_MonoCMakeConfigureCommand)</_MonoCMakeConfigureCommand>
<_MonoCMakeBuildCommand>cmake --build . --target install --config $(Configuration)</_MonoCMakeBuildCommand>
<_MonoCMakeBuildCommand Condition="'$(MonoVerboseBuild)' == 'true'">$(_MonoCMakeBuildCommand) --verbose</_MonoCMakeBuildCommand>
<_MonoCMakeBuildCommand Condition="'$(_MonoUseNinja)' != 'true'">$(_MonoCMakeBuildCommand) --parallel $([System.Environment]::ProcessorCount)</_MonoCMakeBuildCommand>
<_MonoCMakeBuildCommand Condition="'$(TargetsBrowser)' != 'true' and '$(HostOS)' != 'windows'">@(_MonoBuildEnv, ' ') $(_MonoCMakeBuildCommand)</_MonoCMakeBuildCommand>
<_MonoCMakeBuildCommand Condition="'$(TargetsBrowser)' != 'true' and '$(HostOS)' == 'windows'">call "$(RepositoryEngineeringDir)native\init-vs-env.cmd" $(_CompilerTargetArch) && cd /D "$(MonoObjDir)" && @(_MonoBuildEnv, ' ') $(_MonoCMakeBuildCommand)</_MonoCMakeBuildCommand>
<_MonoCMakeBuildCommand Condition="'$(TargetsBrowser)' == 'true' and '$(HostOS)' == 'windows'">call "$(RepositoryEngineeringDir)native\init-vs-env.cmd" && $(_MonoCMakeBuildCommand)</_MonoCMakeBuildCommand>
</PropertyGroup>
<MakeDir Directories="$(MonoObjDir)" />
<!-- configure -->
<PropertyGroup>
<_MonoCMakeCmdLineUpToDate Condition="Exists('$(MonoObjDir)cmake_cmd_line.txt') and '$([System.IO.File]::ReadAllText($(MonoObjDir)cmake_cmd_line.txt).Trim())' == '$(_MonoCMakeConfigureCommand.Trim())'">true</_MonoCMakeCmdLineUpToDate>
<_MonoSkipCMakeConfigure>false</_MonoSkipCMakeConfigure>
<_MonoSkipCMakeConfigure Condition="'$(SkipMonoCrossJitConfigure)' == 'true' or '$(_MonoCMakeCmdLineUpToDate)' == 'true'">true</_MonoSkipCMakeConfigure>
</PropertyGroup>
<Message Condition="'$(_MonoSkipCMakeConfigure)' == 'true'" Text="The CMake command line is the same as the last run. Skipping running CMake configure." Importance="High"/>
<Message Condition="'$(_MonoSkipCMakeConfigure)' != 'true'" Text="Running '$(_MonoCMakeConfigureCommand)' in '$(MonoObjDir)'" Importance="High"/>
<Exec Condition="'$(_MonoSkipCMakeConfigure)' != 'true'" Command="$(_MonoCMakeConfigureCommand)" IgnoreStandardErrorWarningFormat="true" WorkingDirectory="$(MonoObjDir)"/>
<WriteLinesToFile
Condition="'$(_MonoSkipCMakeConfigure)' != 'true'"
File="$(MonoObjDir)cmake_cmd_line.txt"
Lines="$(_MonoCMakeConfigureCommand)"
Overwrite="true" />
<!-- build -->
<Message Condition="'$(BuildMonoAOTCrossCompilerOnly)' != 'true'" Text="Running '$(_MonoCMakeBuildCommand)' in '$(MonoObjDir)'" Importance="High"/>
<Exec Condition="'$(BuildMonoAOTCrossCompilerOnly)' != 'true'" Command="$(_MonoCMakeBuildCommand)" IgnoreStandardErrorWarningFormat="true" WorkingDirectory="$(MonoObjDir)"/>
<!-- strip -->
<PropertyGroup>
<MonoToolchainPrebuiltOS Condition="$([MSBuild]::IsOSPlatform('Linux'))">linux-x86_64</MonoToolchainPrebuiltOS>
<MonoToolchainPrebuiltOS Condition="$([MSBuild]::IsOSPlatform('OSX'))">darwin-x86_64</MonoToolchainPrebuiltOS>
<MonoToolchainPrebuiltOS Condition="'$(HostOS)' == 'windows'">windows-x86_64</MonoToolchainPrebuiltOS>
<_MonoRuntimeFilePath>$(MonoObjDir)out\lib\$(MonoFileName)</_MonoRuntimeFilePath>
<_LinuxAbi Condition="'$(TargetsAndroid)' != 'true'">gnu</_LinuxAbi>
<_LinuxAbi Condition="'$(TargetsAndroid)' == 'true'">android</_LinuxAbi>
<_LinuxFloatAbi Condition="'$(TargetsAndroid)' != 'true'">hf</_LinuxFloatAbi>
<_Objcopy>objcopy</_Objcopy>
<_Objcopy Condition="'$(Platform)' == 'arm'">arm-linux-$(_LinuxAbi)eabi$(_LinuxFloatAbi)-$(_Objcopy)</_Objcopy>
<_Objcopy Condition="'$(Platform)' == 'armv6'">arm-linux-$(_LinuxAbi)eabi$(_LinuxFloatAbi)-$(_Objcopy)</_Objcopy>
<_Objcopy Condition="'$(Platform)' == 'arm64'">aarch64-linux-$(_LinuxAbi)-$(_Objcopy)</_Objcopy>
<_Objcopy Condition="'$(Platform)' == 's390x'">s390x-linux-$(_LinuxAbi)-$(_Objcopy)</_Objcopy>
<_Objcopy Condition="'$(Platform)' == 'x64'">x86_64-linux-$(_LinuxAbi)-$(_Objcopy)</_Objcopy>
<_Objcopy Condition="'$(Platform)' == 'x86'">i686-linux-$(_LinuxAbi)-$(_Objcopy)</_Objcopy>
<_Objcopy Condition="'$(TargetsAndroid)' == 'true'">$(ANDROID_NDK_ROOT)/toolchains/llvm/prebuilt/$(MonoToolchainPrebuiltOS)/bin/llvm-objcopy</_Objcopy>
</PropertyGroup>
<!-- test viability of objcopy command -->
<Exec Condition="'$(BuildMonoAOTCrossCompilerOnly)' != 'true' and ('$(TargetsLinux)' == 'true' or '$(TargetsAndroid)' == 'true')" Command="$(_Objcopy) -V" IgnoreStandardErrorWarningFormat="true" ContinueOnError="WarnAndContinue" IgnoreExitCode="true" EchoOff="true" ConsoleToMsBuild="true">
<Output TaskParameter="ExitCode" PropertyName="_ObjcopyFound"/>
</Exec>
<PropertyGroup>
<!-- if all else fails in finding a valid objcopy, fall back to no-prefix from $PATH (used for x64 on CentOS) -->
<_Objcopy Condition="'$(_ObjcopyFound)' != '0'">objcopy</_Objcopy>
</PropertyGroup>
<ItemGroup>
<FilesToStrip Include="$(_MonoRuntimeFilePath)" />
<FilesToStrip Include="$([System.IO.Directory]::GetParent($(_MonoRuntimeFilePath)))\libmono-component-*$(LibSuffix)" />
<FilesToStrip Include="$([System.IO.Directory]::GetParent($(_MonoRuntimeFilePath)))\Mono*framework\**\Mono*" Exclude="$([System.IO.Directory]::GetParent($(_MonoRuntimeFilePath)))\Mono*framework\**\*.dwarf" />
</ItemGroup>
<Message Condition="'$(BuildMonoAOTCrossCompilerOnly)' != 'true' and ($([MSBuild]::IsOSPlatform('OSX')) or $([MSBuild]::IsOSPlatform('Linux')))" Text="Stripping debug symbols from %(FilesToStrip.Identity)" Importance="High"/>
<Exec Condition="!$([System.String]::Copy(%(FilesToStrip.Identity)).EndsWith('.a')) and '$(BuildMonoAOTCrossCompilerOnly)' != 'true' and ('$(TargetsOSX)' == 'true' or '$(TargetsMacCatalyst)' == 'true' or '$(TargetsiOS)' == 'true' or '$(TargetstvOS)' == 'true')" Command="dsymutil --flat --minimize %(FilesToStrip.Identity)" IgnoreStandardErrorWarningFormat="true" WorkingDirectory="$(MonoObjDir)"/>
<Exec Condition="!$([System.String]::Copy(%(FilesToStrip.Identity)).EndsWith('.a')) and '$(BuildMonoAOTCrossCompilerOnly)' != 'true' and ('$(TargetsOSX)' == 'true' or '$(TargetsMacCatalyst)' == 'true' or '$(TargetsiOS)' == 'true' or '$(TargetstvOS)' == 'true') and '$(Configuration)' == 'Release'" Command="strip -no_code_signature_warning -S %(FilesToStrip.Identity)" IgnoreStandardErrorWarningFormat="true" WorkingDirectory="$(MonoObjDir)"/>
<Exec Condition="!$([System.String]::Copy(%(FilesToStrip.Identity)).EndsWith('.a')) and '$(BuildMonoAOTCrossCompilerOnly)' != 'true' and ('$(TargetsLinux)' == 'true' or '$(TargetsAndroid)' == 'true')" Command="$(_Objcopy) --only-keep-debug %(FilesToStrip.Identity) %(FilesToStrip.Identity).dbg" IgnoreStandardErrorWarningFormat="true" WorkingDirectory="$(MonoObjDir)"/>
<Exec Condition="!$([System.String]::Copy(%(FilesToStrip.Identity)).EndsWith('.a')) and '$(BuildMonoAOTCrossCompilerOnly)' != 'true' and ('$(TargetsLinux)' == 'true' or '$(TargetsAndroid)' == 'true')" Command="$(_Objcopy) --strip-unneeded %(FilesToStrip.Identity)" IgnoreStandardErrorWarningFormat="true" WorkingDirectory="$(MonoObjDir)"/>
<Exec Condition="!$([System.String]::Copy(%(FilesToStrip.Identity)).EndsWith('.a')) and '$(BuildMonoAOTCrossCompilerOnly)' != 'true' and ('$(TargetsLinux)' == 'true' or '$(TargetsAndroid)' == 'true')" Command="$(_Objcopy) --add-gnu-debuglink=%(FilesToStrip.Identity).dbg %(FilesToStrip.Identity)" IgnoreStandardErrorWarningFormat="true" WorkingDirectory="$(MonoObjDir)"/>
</Target>
<!-- Build AOT cross compiler (if available) -->
<Target Name="BuildMonoCross" Condition="'$(BuildMonoAOTCrossCompiler)' == 'true'" DependsOnTargets="BuildMonoRuntime">
<!-- iOS/tvOS specific options -->
<PropertyGroup Condition="'$(TargetstvOS)' == 'true' or '$(TargetsiOS)' == 'true'">
<!-- FIXME: Disable for simulator -->
<MonoUseCrossTool>true</MonoUseCrossTool>
<MonoAotCMakeSysroot Condition="'$(TargetsiOS)' == 'true' and '$(TargetsiOSSimulator)' != 'true'">$(XcodeDir)/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS$(iOSVersion).sdk</MonoAotCMakeSysroot>
<MonoAotCMakeSysroot Condition="'$(TargetsiOS)' == 'true' and '$(TargetsiOSSimulator)' == 'true'">$(XcodeDir)/Platforms/iPhoneSimulator.platform/Developer/SDKs/iPhoneSimulator$(iOSVersion).sdk</MonoAotCMakeSysroot>
<MonoAotCMakeSysroot Condition="'$(TargetstvOS)' == 'true' and '$(TargetstvOSSimulator)' != 'true'">$(XcodeDir)/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS$(tvOSVersion).sdk</MonoAotCMakeSysroot>
<MonoAotCMakeSysroot Condition="'$(TargetstvOS)' == 'true' and '$(TargetstvOSSimulator)' == 'true'">$(XcodeDir)/Platforms/AppleTVSimulator.platform/Developer/SDKs/AppleTVSimulator$(tvOSVersion).sdk</MonoAotCMakeSysroot>
<MonoAotOffsetsFile>$(MonoObjCrossDir)offsets-$(Platform)-darwin.h</MonoAotOffsetsFile>
<MonoAotAbi Condition="'$(Platform)' == 'arm64'">aarch64-apple-darwin10</MonoAotAbi>
<MonoAotAbi Condition="'$(Platform)' == 'arm'">arm-apple-darwin10</MonoAotAbi>
<MonoAotAbi Condition="'$(Platform)' == 'x86'">i386-apple-darwin10</MonoAotAbi>
<MonoAotAbi Condition="'$(Platform)' == 'x64'">x86_64-apple-darwin10</MonoAotAbi>
</PropertyGroup>
<!-- Catalyst specific options -->
<PropertyGroup Condition="'$(TargetsMacCatalyst)' == 'true'">
<MonoUseCrossTool>true</MonoUseCrossTool>
<MonoAotCMakeSysroot Condition="'$(TargetsMacCatalyst)' == 'true'">$(XcodeDir)/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk</MonoAotCMakeSysroot>
<MonoAotOffsetsFile>$(MonoObjCrossDir)offsets-$(Platform)-darwin.h</MonoAotOffsetsFile>
<MonoAotAbi Condition="'$(Platform)' == 'arm64'">aarch64-apple-maccatalyst</MonoAotAbi>
<MonoAotAbi Condition="'$(Platform)' == 'x64'">x86_64-apple-maccatalyst</MonoAotAbi>
</PropertyGroup>
<!-- Linux specific options -->
<ItemGroup Condition="'$(RealTargetOS)' == 'Linux' or $([MSBuild]::IsOSPlatform('Linux'))">
<_LibClang Include="$(ANDROID_NDK_ROOT)/toolchains/llvm/prebuilt/$(MonoToolchainPrebuiltOS)/lib64/libclang.so.*"/>
</ItemGroup>
<PropertyGroup Condition="'$(TargetsLinux)' == 'true' and '$(Platform)' == 'arm64'">
<MonoUseCrossTool>true</MonoUseCrossTool>
<MonoAotAbi>aarch64-linux-gnu</MonoAotAbi>
<MonoAotOffsetsFile>$(MonoObjCrossDir)offsets-aarch-linux-gnu.h</MonoAotOffsetsFile>
<MonoAotOffsetsPrefix>$(MonoCrossDir)/usr/lib/gcc/aarch64-linux-gnu/5</MonoAotOffsetsPrefix>
</PropertyGroup>
<!-- macOS host specific options -->
<ItemGroup Condition="'$(RealTargetOS)' == 'OSX' or $([MSBuild]::IsOSPlatform('OSX'))">
<MonoAOTCMakeArgs Include="-DCMAKE_OSX_DEPLOYMENT_TARGET=$(macOSVersionMin)" />
</ItemGroup>
<!-- WASM specific options -->
<PropertyGroup Condition="'$(TargetsBrowser)' == 'true'">
<MonoUseCrossTool>true</MonoUseCrossTool>
<MonoAotAbi>wasm32-unknown-none</MonoAotAbi>
<MonoAotOffsetsFile>$(MonoObjCrossDir)offsets-wasm32-unknown-none.h</MonoAotOffsetsFile>
<MonoLibClang Condition="$([MSBuild]::IsOSPlatform('OSX'))">$(EMSDK_PATH)/upstream/lib/libclang.dylib</MonoLibClang>
<MonoLibClang Condition="$([MSBuild]::IsOSPlatform('Linux'))">$(EMSDK_PATH)/upstream/lib/libclang.so</MonoLibClang>
<MonoLibClang Condition="$([MSBuild]::IsOSPlatform('Windows'))">$([MSBuild]::NormalizePath('$(EMSDK_PATH)', 'upstream', 'bin', 'libclang.dll'))</MonoLibClang>
<PythonCmd Condition="'$(HostOS)' == 'windows'">setlocal EnableDelayedExpansion && call "$([MSBuild]::NormalizePath('$(EMSDK_PATH)', 'emsdk_env.bat'))" && !EMSDK_PYTHON!</PythonCmd>
<_ForceRelease Condition="$([MSBuild]::IsOSPlatform('Windows')) and '$(TargetArchitecture)' == 'wasm' and '$(Configuration)' == 'Debug'">true</_ForceRelease>
</PropertyGroup>
<!-- Windows specific options -->
<ItemGroup Condition="'$(RealTargetOS)' == 'Windows' or $([MSBuild]::IsOSPlatform('Windows'))">
<_MonoAOTCPPFLAGS Include="-DHOST_WIN32" />
<_MonoAOTCPPFLAGS Include="-D__WIN32__" />
<_MonoAOTCPPFLAGS Include="-DWIN32" />
<_MonoAOTCPPFLAGS Include="-DWIN32_LEAN_AND_MEAN" />
<!--<_MonoAOTCPPFLAGS Include="-D_WINDOWS" />--> <!-- set in monow.vcxproj, not sure we really need it -->
<_MonoAOTCPPFLAGS Condition="'$(Platform)' == 'x64' or '$(Platform)' == 'arm64'" Include="-DWIN64" />
<_MonoAOTCPPFLAGS Condition="'$(Configuration)' == 'Release' or '$(_ForceRelease)' == 'true'" Include="-DNDEBUG" />
<_MonoAOTCPPFLAGS Condition="'$(Configuration)' == 'Debug' and '$(_ForceRelease)' != 'true'" Include="-D_DEBUG" />
<!-- <_MonoAOTCPPFLAGS Include="-D__default_codegen__" /> --> <!-- doesn't seem to be used -->
<_MonoAOTCPPFLAGS Include="-D_CRT_SECURE_NO_WARNINGS" />
<_MonoAOTCPPFLAGS Include="-D_CRT_NONSTDC_NO_DEPRECATE" />
<!--<_MonoAOTCPPFLAGS Include="-DGC_NOT_DLL" />--> <!-- only used for Boehm -->
<_MonoAOTCPPFLAGS Include="-DWIN32_THREADS" />
<_MonoAOTCPPFLAGS Include="-DWINVER=0x0601" />
<_MonoAOTCPPFLAGS Include="-D_WIN32_WINNT=0x0601" />
<_MonoAOTCPPFLAGS Include="-D_WIN32_IE=0x0501" />
<_MonoAOTCPPFLAGS Include="-D_UNICODE" />
<_MonoAOTCPPFLAGS Include="-DUNICODE" />
<_MonoAOTCPPFLAGS Include="-DFD_SETSIZE=1024" />
<_MonoAOTCPPFLAGS Include="-DNVALGRIND" />
<MonoAOTCMakeArgs Include="-DDISABLE_INTERPRETER=1" />
<!-- Select generator platform for VS generator -->
<MonoAOTCMakeArgs Condition="'$(_MonoUseNinja)' != 'true' and '$(Platform)' == 'x64'" Include="-A x64" />
<MonoAOTCMakeArgs Condition="'$(_MonoUseNinja)' != 'true' and '$(Platform)' == 'x86'" Include="-A Win32" />
<MonoAOTCMakeArgs Condition="'$(_MonoUseNinja)' != 'true' and '$(Platform)' == 'arm'" Include="-A ARM" />
<MonoAOTCMakeArgs Condition="'$(_MonoUseNinja)' != 'true' and '$(Platform)' == 'arm64'" Include="-A ARM64" />
</ItemGroup>
<!-- Android specific options -->
<PropertyGroup Condition="'$(TargetsAndroid)' == 'true'">
<MonoToolchainPrebuiltOS Condition="$([MSBuild]::IsOSPlatform('Linux'))">linux-x86_64</MonoToolchainPrebuiltOS>
<MonoToolchainPrebuiltOS Condition="$([MSBuild]::IsOSPlatform('OSX'))">darwin-x86_64</MonoToolchainPrebuiltOS>
<MonoToolchainPrebuiltOS Condition="'$(HostOS)' == 'windows'">windows-x86_64</MonoToolchainPrebuiltOS>
<MonoUseCrossTool>true</MonoUseCrossTool>
<MonoAotCMakeSysroot Condition="Exists('$(ANDROID_NDK_ROOT)/sysroot')">$(ANDROID_NDK_ROOT)/sysroot</MonoAotCMakeSysroot>
<MonoAotCMakeSysroot Condition="'$(MonoAotCMakeSysroot)' == '' And Exists('$(ANDROID_NDK_ROOT)/toolchains/llvm/prebuilt/$(MonoToolchainPrebuiltOS)/sysroot')">$(ANDROID_NDK_ROOT)/toolchains/llvm/prebuilt/$(MonoToolchainPrebuiltOS)/sysroot</MonoAotCMakeSysroot>
<MonoAotAbi Condition="'$(Platform)' == 'arm64'">aarch64-v8a-linux-android</MonoAotAbi>
<MonoAotAbi Condition="'$(Platform)' == 'arm'">armv7-none-linux-androideabi</MonoAotAbi>
<MonoAotAbi Condition="'$(Platform)' == 'x86'">i686-none-linux-android</MonoAotAbi>
<MonoAotAbi Condition="'$(Platform)' == 'x64'">x86_64-none-linux-android</MonoAotAbi>
<MonoAotOffsetsFile>$(MonoObjDir)cross/offsets-$(Platform)-android.h</MonoAotOffsetsFile>
</PropertyGroup>
<PropertyGroup>
<MonoLibClang Condition="$([MSBuild]::IsOSPlatform('OSX')) and '$(MonoLibClang)' == ''">$(XcodeDir)/Toolchains/XcodeDefault.xctoolchain/usr/lib/libclang.dylib</MonoLibClang>
<MonoLibClang Condition="$([MSBuild]::IsOSPlatform('Linux')) and '$(MonoLibClang)' == ''">@(_LibClang)</MonoLibClang>
<MonoLibClang Condition="'$(HostOS)' == 'windows' and '$(MonoLibClang)' == ''">c:/dev/LLVM/bin/libclang.dll</MonoLibClang>
<MonoAotCMakeSysroot Condition="'$(MonoAotCMakeSysroot)' == ''">$(MonoCrossDir)</MonoAotCMakeSysroot>
</PropertyGroup>
<ItemGroup Condition="'$(MonoUseCrossTool)' == 'true'">
<MonoAotCrossOffsetsToolParams Include="--abi=$(MonoAotAbi)" />
<MonoAotCrossOffsetsToolParams Include="--netcore" />
<MonoAotCrossOffsetsToolParams Include="--targetdir="$(MonoObjDir.TrimEnd('\/'))"" />
<MonoAotCrossOffsetsToolParams Include="--monodir="$(MonoProjectRoot.TrimEnd('\/'))"" />
<MonoAotCrossOffsetsToolParams Include="--nativedir="$(SharedNativeRoot.TrimEnd('\/'))"" />
<MonoAotCrossOffsetsToolParams Include="--outfile="$(MonoAotOffsetsFile)"" />
<MonoAotCrossOffsetsToolParams Include="--libclang="$(MonoLibClang)"" />
<MonoAotCrossOffsetsToolParams Condition="'$(MonoAotOffsetsPrefix)' != ''" Include="--prefix="$(MonoAotOffsetsPrefix)"" />
<MonoAotCrossOffsetsToolParams Condition="'$(MonoAotCMakeSysroot)' != ''" Include="--sysroot="$(MonoAotCMakeSysroot)"" />
<MonoAotCrossOffsetsToolParams Condition="'$(TargetsBrowser)' == 'true'" Include="--emscripten-sdk="$([MSBuild]::NormalizePath('$(EMSDK_PATH)', 'upstream', 'emscripten'))"" />
</ItemGroup>
<PropertyGroup>
<_MonoAOTCFLAGSOption>-DCMAKE_C_FLAGS="@(_MonoAOTCPPFLAGS, ' ') @(_MonoAOTCFLAGS, ' ')"</_MonoAOTCFLAGSOption>
<_MonoAOTCXXFLAGSOption>-DCMAKE_CXX_FLAGS="@(_MonoAOTCPPFLAGS, ' ') @(_MonoAOTCXXFLAGS, ' ')"</_MonoAOTCXXFLAGSOption>
</PropertyGroup>
<ItemGroup>
<MonoAOTCMakeArgs Include="-DAOT_TARGET_TRIPLE=$(MonoAotAbi)"/>
<MonoAOTCMakeArgs Condition="'$(_MonoUseNinja)' == 'true'" Include="-G Ninja"/>
<MonoAOTCMakeArgs Include="-DCMAKE_INSTALL_PREFIX=$([MSBuild]::NormalizePath('$(MonoObjCrossDir)', 'out'))"/>
<MonoAOTCMakeArgs Condition="'$(_ForceRelease)' != 'true'" Include="-DCMAKE_BUILD_TYPE=$(Configuration)"/>
<MonoAOTCMakeArgs Condition="'$(_ForceRelease)' == 'true'" Include="-DCMAKE_BUILD_TYPE=Release"/>
<!-- FIXME: Disable more -->
<MonoAOTCMakeArgs Include="-DENABLE_MINIMAL=" />
<MonoAOTCMakeArgs Include="-DENABLE_ICALL_SYMBOL_MAP=1" />
<MonoAOTCMakeArgs Include="-DDISABLE_SHARED_LIBS=1" />
<MonoAOTCMakeArgs Include="-DDISABLE_LIBS=1" />
<MonoAOTCMakeArgs Include="-DDISABLE_COMPONENTS=1" />
<MonoAOTCMakeArgs Condition="'$(MonoAotOffsetsFile)' != ''" Include="-DAOT_OFFSETS_FILE="$(MonoAotOffsetsFile)"" />
<MonoAOTCMakeArgs Condition="'$(MonoAOTEnableLLVM)' == 'true'" Include="-DLLVM_PREFIX=$(MonoAOTLLVMDir.TrimEnd('\/'))" />
<MonoAOTCMakeArgs Include="$(_MonoAOTCFLAGSOption)" />
<MonoAOTCMakeArgs Include="$(_MonoAOTCXXFLAGSOption)" />
<!-- thread suspend -->
<MonoAOTCMakeArgs Include="-DGC_SUSPEND=$(MonoThreadSuspend)" />
<!-- rename exe -->
<MonoAOTCMakeArgs Include="-DMONO_CROSS_COMPILE_EXECUTABLE_NAME=1" />
</ItemGroup>
<PropertyGroup>
<_MonoAotCrossOffsetsCommand Condition="'$(MonoUseCrossTool)' == 'true'">$(PythonCmd) $(MonoProjectRoot)mono/tools/offsets-tool/offsets-tool.py @(MonoAotCrossOffsetsToolParams, ' ')</_MonoAotCrossOffsetsCommand>
<_MonoAotCMakeConfigureCommand>cmake @(MonoAOTCMakeArgs, ' ') $(MonoProjectRoot)</_MonoAotCMakeConfigureCommand>
<_MonoAotCMakeConfigureCommand Condition="'$(HostOS)' == 'windows'">call "$(RepositoryEngineeringDir)native\init-vs-env.cmd" $(_CompilerTargetArch) && cd /D "$(MonoObjCrossDir)" && @(_MonoBuildEnv, ' ') $(_MonoAotCMakeConfigureCommand)</_MonoAotCMakeConfigureCommand>
<_MonoAotCMakeBuildCommand>cmake --build . --target install --config $(Configuration)</_MonoAotCMakeBuildCommand>
<_MonoAotCMakeBuildCommand Condition="'$(MonoVerboseBuild)' == 'true'">$(_MonoAotCMakeBuildCommand) --verbose</_MonoAotCMakeBuildCommand>
<_MonoAotCMakeBuildCommand Condition="'$(_MonoUseNinja)' != 'true'">$(_MonoAotCMakeBuildCommand) --parallel $([System.Environment]::ProcessorCount)</_MonoAotCMakeBuildCommand>
<_MonoAotCMakeBuildCommand Condition="'$(HostOS)' == 'windows'">call "$(RepositoryEngineeringDir)native\init-vs-env.cmd" $(_CompilerTargetArch) && cd /D "$(MonoObjCrossDir)" && @(_MonoBuildEnv, ' ') $(_MonoAotCMakeBuildCommand)</_MonoAotCMakeBuildCommand>
<_MonoAotPrebuiltOffsetsFile>$(ArtifactsObjDir)\mono\offsetfiles\$(PlatformConfigPathPart)\cross\$([System.IO.Path]::GetFileName('$(MonoAotOffsetsFile)'))</_MonoAotPrebuiltOffsetsFile>
</PropertyGroup>
<MakeDir Directories="$(MonoObjCrossDir)" />
<!-- offsets tool -->
<Message Condition="Exists('$(_MonoAotPrebuiltOffsetsFile)')" Text="Out-of-tree offset file found, moving into place" Importance="High" />
<Copy Condition="Exists('$(_MonoAotPrebuiltOffsetsFile)')" SourceFiles="$(_MonoAotPrebuiltOffsetsFile)" DestinationFolder="$([System.IO.Path]::GetDirectoryName('$(MonoAotOffsetsFile)'))" />
<Message Condition="'$(MonoUseCrossTool)' == 'true' and !Exists('$(MonoAotOffsetsFile)')" Text="Running '$(_MonoAotCrossOffsetsCommand)'" Importance="High" />
<Exec Condition="'$(MonoUseCrossTool)' == 'true' and !Exists('$(MonoAotOffsetsFile)')" Command="$(_MonoAotCrossOffsetsCommand)" IgnoreStandardErrorWarningFormat="true" />
<!-- configure -->
<PropertyGroup>
<_MonoAotCMakeCmdLineUpToDate Condition="Exists('$(MonoObjCrossDir)cmake_cmd_line.txt') and '$([System.IO.File]::ReadAllText($(MonoObjCrossDir)cmake_cmd_line.txt).Trim())' == '$(_MonoAotCMakeConfigureCommand.Trim())'">true</_MonoAotCMakeCmdLineUpToDate>
<_MonoSkipAotCMakeConfigure>false</_MonoSkipAotCMakeConfigure>
<_MonoSkipAotCMakeConfigure Condition="'$(MonoGenerateOffsetsOSGroups)' != '' or '$(_MonoAotCMakeCmdLineUpToDate)' == 'true'">true</_MonoSkipAotCMakeConfigure>
</PropertyGroup>
<Message Condition="'$(_MonoSkipAotCMakeConfigure)' == 'true'" Text="The AOT Cross CMake command line is the same as the last run. Skipping running CMake configure." Importance="High"/>
<Message Condition="'$(_MonoSkipAotCMakeConfigure)' != 'true'" Text="Running '$(_MonoAotCMakeConfigureCommand)' in '$(MonoObjCrossDir)'" Importance="High"/>
<Exec Condition="'$(_MonoSkipAotCMakeConfigure)' != 'true'" Command="$(_MonoAotCMakeConfigureCommand)" IgnoreStandardErrorWarningFormat="true" WorkingDirectory="$(MonoObjCrossDir)"/>
<WriteLinesToFile
Condition="'$(_MonoSkipAotCMakeConfigure)' != 'true'"
File="$(MonoObjCrossDir)cmake_cmd_line.txt"
Lines="$(_MonoAotCMakeConfigureCommand)"
Overwrite="true" />
<!-- build -->
<Message Text="Running '$(_MonoAotCMakeBuildCommand)' in '$(MonoObjCrossDir)'" Importance="High" />
<Exec Condition="'$(MonoGenerateOffsetsOSGroups)' == ''" Command="$(_MonoAotCMakeBuildCommand)" IgnoreStandardErrorWarningFormat="true" WorkingDirectory="$(MonoObjCrossDir)"/>
</Target>
<PropertyGroup>
<!-- Hardcode version paths in a global location. Condition on running OS to generate the right files for the Mono WASM cross tools. -->
<NativeVersionFile Condition="'$(HostOS)' == 'windows'">$(ArtifactsObjDir)_version.h</NativeVersionFile>
<NativeVersionFile Condition="'$(HostOS)' != 'windows'">$(ArtifactsObjDir)_version.c</NativeVersionFile>
<AssemblyName>.NET Runtime</AssemblyName>
</PropertyGroup>
<Import Project="$(RepositoryEngineeringDir)versioning.targets" />
<!-- The standard set of targets that need to run before the BuildMono target runs -->
<PropertyGroup>
<MonoDependsOnTargets>CheckEnv;GetXcodeDir;GenerateRuntimeVersionFile;BuildMonoRuntime;BuildMonoCross</MonoDependsOnTargets>
<MonoDependsOnTargets Condition="'$(TargetsBrowser)' == 'true'">GenerateRuntimeVersionFile;ProvisionEmscripten;$(MonoDependsOnTargets)</MonoDependsOnTargets>
</PropertyGroup>
<!-- General targets -->
<Target Name="BuildMono" AfterTargets="Build" DependsOnTargets="$(MonoDependsOnTargets)">
<PropertyGroup Condition="'$(BuildMonoAOTCrossCompilerOnly)' != 'true'">
<_MonoRuntimeFilePath Condition="'$(TargetsWindows)' == 'true'">$(MonoObjDir)out\bin\$(MonoFileName)</_MonoRuntimeFilePath>
<_MonoRuntimeFilePath Condition="'$(_MonoRuntimeFilePath)' == ''">$(MonoObjDir)out\lib\$(MonoFileName)</_MonoRuntimeFilePath>
<_MonoRuntimeStaticFilePath Condition="'$(TargetsMacCatalyst)' == 'true' or '$(TargetsiOS)' == 'true' or '$(TargetstvOS)' == 'true' or '$(TargetsAndroid)' == 'true'">$(MonoObjDir)out\lib\$(MonoStaticLibFileName)</_MonoRuntimeStaticFilePath>
<_MonoIncludeInterpStaticFiles Condition="'$(TargetsBrowser)' == 'true'">true</_MonoIncludeInterpStaticFiles>
<_MonoIncludeIcuFiles Condition="'$(TargetsiOS)' == 'true' or '$(TargetstvOS)' == 'true' or '$(TargetsMacCatalyst)' == 'true'">true</_MonoIncludeIcuFiles>
</PropertyGroup>
<PropertyGroup Condition="'$(BuildMonoAOTCrossCompiler)' == 'true'">
<_MonoAotCrossFilePath>$(MonoObjCrossDir)out\bin\$(MonoAotCrossFileName)</_MonoAotCrossFilePath>
<_MonoAotCrossPdbFilePath>$(MonoObjCrossDir)out\bin\$(MonoAotCrossPdbFileName)</_MonoAotCrossPdbFilePath>
</PropertyGroup>
<!-- Copy Mono runtime files to artifacts directory -->
<ItemGroup>
<_MonoRuntimeComponentsStaticFilePath Include="$([System.IO.Directory]::GetParent($(_MonoRuntimeFilePath)))\libmono-component-*$(StaticLibSuffix)" Condition="Exists($(_MonoRuntimeFilePath))" />
<_MonoRuntimeComponentsSharedFilePath Include="$([System.IO.Directory]::GetParent($(_MonoRuntimeFilePath)))\libmono-component-*$(LibSuffix)" Condition="Exists($(_MonoRuntimeFilePath))" />
<_MonoRuntimeComponentsSharedFilePath Include="$([System.IO.Directory]::GetParent($(_MonoRuntimeFilePath)))\libmono-component-*$(LibSuffix).dwarf" Condition="Exists('$(_MonoRuntimeFilePath).dwarf')" />
<_MonoRuntimeComponentsSharedFilePath Include="$([System.IO.Directory]::GetParent($(_MonoRuntimeFilePath)))\libmono-component-*$(LibSuffix).dbg" Condition="Exists('$(_MonoRuntimeFilePath).dbg')" />
<_MonoRuntimeArtifacts Include="$(_MonoRuntimeFilePath)" Condition="Exists($(_MonoRuntimeFilePath))">
<Destination>$(RuntimeBinDir)$(MonoFileName)</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Include="$(_MonoRuntimeFilePath).dbg" Condition="Exists('$(_MonoRuntimeFilePath).dbg')">
<Destination>$(RuntimeBinDir)$(MonoFileName).dbg</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Include="$(_MonoRuntimeFilePath).dwarf" Condition="Exists('$(_MonoRuntimeFilePath).dwarf')">
<Destination>$(RuntimeBinDir)$(MonoFileName).dwarf</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Include="$(_MonoRuntimeStaticFilePath)" Condition="Exists($(_MonoRuntimeStaticFilePath)) and '$(_MonoRuntimeStaticFilePath)' != '$(_MonoRuntimeFilePath)'">
<Destination>$(RuntimeBinDir)$(MonoStaticLibFileName)</Destination>
</_MonoRuntimeArtifacts>
<!-- copy the mono runtime component shared or static libraries -->
<_MonoRuntimeArtifacts Include="@(_MonoRuntimeComponentsStaticFilePath)">
<Destination>$(RuntimeBinDir)%(_MonoRuntimeComponentsStaticFilePath.Filename)%(_MonoRuntimeComponentsStaticFilePath.Extension)</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Include="@(_MonoRuntimeComponentsSharedFilePath)">
<Destination>$(RuntimeBinDir)%(_MonoRuntimeComponentsSharedFilePath.Filename)%(_MonoRuntimeComponentsSharedFilePath.Extension)</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Include="$(_MonoAotCrossFilePath)">
<Destination>$(RuntimeBinDir)cross\$(PackageRID)\$(MonoAotCrossFileName)</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Include="$(_MonoAotCrossPdbFilePath)" Condition="Exists('$(_MonoAotCrossPdbFilePath)')">
<Destination>$(RuntimeBinDir)cross\$(PackageRID)\$(MonoAotCrossPdbFileName)</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Condition="'$(MonoBundleLLVMOptimizer)' == 'true'" Include="$(MonoLLVMDir)\bin\llc$(ExeSuffix)">
<Destination>$(RuntimeBinDir)\llc$(ExeSuffix)</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Condition="'$(MonoBundleLLVMOptimizer)' == 'true'" Include="$(MonoLLVMDir)\bin\opt$(ExeSuffix)">
<Destination>$(RuntimeBinDir)\opt$(ExeSuffix)</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Condition="'$(MonoAOTBundleLLVMOptimizer)' == 'true'" Include="$(MonoAOTLLVMDir)\bin\llc$(ExeSuffix)">
<Destination>$(RuntimeBinDir)cross\$(PackageRID)\llc$(ExeSuffix)</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Condition="'$(MonoAOTBundleLLVMOptimizer)' == 'true'" Include="$(MonoAOTLLVMDir)\bin\opt$(ExeSuffix)">
<Destination>$(RuntimeBinDir)cross\$(PackageRID)\opt$(ExeSuffix)</Destination>
</_MonoRuntimeArtifacts>
<_MonoIncludeArtifacts Include="$(MonoObjDir)out\include\**" />
<_MonoRuntimeArtifacts Condition="'$(MonoComponentsStatic)' != 'true' and Exists('$(MonoObjDir)out\lib\Mono.release.framework')" Include="@(_MonoRuntimeComponentsSharedFilePath)">
<Destination>$(RuntimeBinDir)\Mono.release.framework\%(_MonoRuntimeComponentsSharedFilePath.Filename)%(_MonoRuntimeComponentsSharedFilePath.Extension)</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Condition="Exists('$(MonoObjDir)out\lib\Mono.release.framework') and !Exists('$(MonoObjDir)out\lib\Mono.release.framework\Versions')" Include="$(MonoObjDir)out\lib\Mono.release.framework\Mono.release">
<Destination>$(RuntimeBinDir)\Mono.release.framework\Mono</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Condition="Exists('$(MonoObjDir)out\lib\Mono.release.framework') and Exists('$(MonoObjDir)out\lib\Mono.release.framework\Versions')" Include="$(MonoObjDir)out\lib\Mono.release.framework\Versions\Current\Mono.release">
<Destination>$(RuntimeBinDir)\Mono.release.framework\Mono</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Condition="Exists('$(MonoObjDir)out\lib\Mono.release.framework')" Include="$(MonoObjDir)out\lib\Mono.release.framework\Mono.release.dwarf">
<Destination>$(RuntimeBinDir)\Mono.release.framework\Mono.dwarf</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Condition="'$(MonoComponentsStatic)' != 'true' and Exists('$(MonoObjDir)out\lib\Mono.debug.framework')" Include="@(_MonoRuntimeComponentsSharedFilePath)">
<Destination>$(RuntimeBinDir)\Mono.debug.framework\%(_MonoRuntimeComponentsSharedFilePath.Filename)%(_MonoRuntimeComponentsSharedFilePath.Extension)</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Condition="Exists('$(MonoObjDir)out\lib\Mono.debug.framework') and !Exists('$(MonoObjDir)out\lib\Mono.debug.framework\Versions')" Include="$(MonoObjDir)out\lib\Mono.debug.framework\Mono.debug">
<Destination>$(RuntimeBinDir)\Mono.debug.framework\Mono</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Condition="Exists('$(MonoObjDir)out\lib\Mono.debug.framework') and Exists('$(MonoObjDir)out\lib\Mono.debug.framework\Versions')" Include="$(MonoObjDir)out\lib\Mono.debug.framework\Versions\Current\Mono.debug">
<Destination>$(RuntimeBinDir)\Mono.debug.framework\Mono</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Condition="Exists('$(MonoObjDir)out\lib\Mono.debug.framework')" Include="$(MonoObjDir)out\lib\Mono.debug.framework\Mono.debug.dwarf">
<Destination>$(RuntimeBinDir)\Mono.debug.framework\Mono.dwarf</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Condition="Exists('$(MonoObjDir)out\lib\Mono.release.framework') and !Exists('$(MonoObjDir)out\lib\Mono.release.framework\Versions')" Include="$(MonoObjDir)out\lib\Mono.release.framework\Info.plist">
<Destination>$(RuntimeBinDir)\Mono.release.framework\Info.plist</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Condition="Exists('$(MonoObjDir)out\lib\Mono.release.framework') and Exists('$(MonoObjDir)out\lib\Mono.release.framework\Versions')" Include="$(MonoObjDir)out\lib\Mono.release.framework\Versions\Current\Resources\Info.plist">
<Destination>$(RuntimeBinDir)\Mono.release.framework\Info.plist</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Condition="Exists('$(MonoObjDir)out\lib\Mono.debug.framework') and !Exists('$(MonoObjDir)out\lib\Mono.debug.framework\Versions')" Include="$(MonoObjDir)out\lib\Mono.debug.framework\Info.plist">
<Destination>$(RuntimeBinDir)\Mono.debug.framework\Info.plist</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Condition="Exists('$(MonoObjDir)out\lib\Mono.debug.framework') and Exists('$(MonoObjDir)out\lib\Mono.debug.framework\Versions')" Include="$(MonoObjDir)out\lib\Mono.debug.framework\Versions\Current\Resources\Info.plist">
<Destination>$(RuntimeBinDir)\Mono.debug.framework\Info.plist</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeBuildArtifacts Include="$(MonoObjDir)\build\**" />
<_MonoRuntimeArtifacts Condition="'$(_MonoIncludeInterpStaticFiles)' == 'true'" Include="$(MonoObjDir)out\lib\libmono-ee-interp.a">
<Destination>$(RuntimeBinDir)libmono-ee-interp.a</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Condition="'$(_MonoIncludeInterpStaticFiles)' == 'true'" Include="$(MonoObjDir)out\lib\libmono-icall-table.a">
<Destination>$(RuntimeBinDir)libmono-icall-table.a</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Condition="'$(_MonoIncludeInterpStaticFiles)' == 'true'" Include="$(MonoObjDir)out\lib\libmono-ilgen.a">
<Destination>$(RuntimeBinDir)libmono-ilgen.a</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Condition="'$(TargetsBrowser)' == 'true' and '$(BuildMonoAOTCrossCompilerOnly)' != 'true'" Include="$(MonoObjDir)out\lib\libmono-profiler-aot.a">
<Destination>$(RuntimeBinDir)libmono-profiler-aot.a</Destination>
</_MonoRuntimeArtifacts>
<_MonoICorDebugArtifacts Condition="'$(MonoMsCorDbi)' == 'true'" Include="$(MonoObjDir)out\lib\$(LibPrefix)dbgshim$(LibSuffix)">
<Destination>$(RuntimeBinDir)$(LibPrefix)dbgshim$(LibSuffix)</Destination>
</_MonoICorDebugArtifacts>
<_MonoICorDebugArtifacts Condition="'$(MonoMsCorDbi)' == 'true'" Include="$(MonoObjDir)out\lib\$(LibPrefix)mscordbi$(LibSuffix)">
<Destination>$(RuntimeBinDir)$(LibPrefix)mscordbi$(LibSuffix)</Destination>
</_MonoICorDebugArtifacts>
<_IcuArtifacts Condition="'$(_MonoIncludeIcuFiles)' == 'true'"
Include="$(_IcuLibdir)\libicuuc.a;
$(_IcuLibdir)\libicui18n.a;
$(_IcuLibdir)\libicudata.a;
$(_IcuLibdir)\*.dat" />
</ItemGroup>
<Copy Condition="'$(_MonoIncludeIcuFiles)' == 'true'"
SourceFiles="@(_IcuArtifacts)"
DestinationFolder="$(RuntimeBinDir)"
SkipUnchangedFiles="true" />
<Copy SourceFiles="@(_MonoRuntimeArtifacts)"
DestinationFiles="%(_MonoRuntimeArtifacts.Destination)"
Condition="'$(MonoGenerateOffsetsOSGroups)' == ''"
SkipUnchangedFiles="true" />
<Copy SourceFiles="@(_MonoICorDebugArtifacts)"
DestinationFiles="%(_MonoICorDebugArtifacts.Destination)"
SkipUnchangedFiles="true"
Condition="Exists(@(_MonoICorDebugArtifacts))" />
<Copy SourceFiles="@(_MonoIncludeArtifacts)"
DestinationFiles="@(_MonoIncludeArtifacts->'$(RuntimeBinDir)include\%(RecursiveDir)%(Filename)%(Extension)')"
SkipUnchangedFiles="true"
Condition="'$(MonoGenerateOffsetsOSGroups)' == '' and ('$(TargetsMacCatalyst)' == 'true' or '$(TargetsiOS)' == 'true' or '$(TargetstvOS)' == 'true' or '$(TargetsAndroid)' == 'true' or '$(TargetsBrowser)' == 'true')"/>
<Copy SourceFiles="@(_MonoRuntimeBuildArtifacts)"
DestinationFiles="@(_MonoRuntimeBuildArtifacts->'$(RuntimeBinDir)build\%(RecursiveDir)%(Filename)%(Extension)')"
SkipUnchangedFiles="true"
Condition="'$(BuildMonoAOTCrossCompilerOnly)' != 'true'" />
<Exec Condition="'$(BuildMonoAOTCrossCompilerOnly)' != 'true' and '$(MonoGenerateOffsetsOSGroups)' == '' and ('$(TargetsOSX)' == 'true' or '$(TargetsMacCatalyst)' == 'true' or '$(TargetsiOS)' == 'true' or '$(TargetstvOS)' == 'true')" Command="install_name_tool -id @rpath/$(MonoFileName) $(RuntimeBinDir)$(MonoFileName)" />
</Target>
<Target Name="CleanMono">
<RemoveDir Directories="$(MonoObjDir)" />
</Target>
</Project>
|
<Project Sdk="Microsoft.Build.Traversal" DefaultTargets="Build">
<!--
Build properties:
- MonoForceInterpreter - enable the interpreter
- MonoEnableLLVM - enable LLVM
- MonoLLVMDir - [optional] the directory where LLVM is located
- MonoAOTEnableLLVM - enable LLVM for an AOT-only Mono
- MonoAOTLLVMDir - [optional] the directory where LLVM is located, for an AOT-only Mono
- MonoVerboseBuild - enable verbose build
- MonoThreadSuspend - coop,hybrid,preemptive - default thread suspend mode
-->
<PropertyGroup>
<MonoCrossDir Condition="'$(MonoCrossDir)' == '' and '$(ROOTFS_DIR)' != ''">$(ROOTFS_DIR)</MonoCrossDir>
<MonoForceInterpreter Condition="'$(MonoForceInterpreter)' == ''">false</MonoForceInterpreter>
<ScriptExt Condition="'$(HostOS)' == 'windows'">.cmd</ScriptExt>
<ScriptExt Condition="'$(HostOS)' != 'windows'">.sh</ScriptExt>
<EscapedQuoteW Condition="'$(HostOS)' == 'windows'">\"</EscapedQuoteW>
<PythonCmd Condition="'$(HostOS)' != 'windows'">python3</PythonCmd>
<PythonCmd Condition="'$(HostOS)' == 'windows'">python</PythonCmd>
<CoreClrLibName>coreclr</CoreClrLibName>
<CoreClrFileName>$(LibPrefix)$(CoreClrLibName)$(LibSuffix)</CoreClrFileName>
<MonoLibName>monosgen-2.0</MonoLibName>
<MonoSharedLibName Condition="'$(TargetsiOS)' == 'true' or '$(TargetstvOS)' == 'true' or '$(TargetsMacCatalyst)' == 'true' or '$(TargetsAndroid)' == 'true' or '$(TargetsBrowser)' == 'true'">$(MonoLibName)</MonoSharedLibName>
<MonoSharedLibName Condition="'$(MonoSharedLibName)' == ''">$(CoreClrLibName)</MonoSharedLibName>
<MonoSharedLibFileName>$(LibPrefix)$(MonoSharedLibName)$(LibSuffix)</MonoSharedLibFileName>
<MonoStaticLibFileName>$(LibPrefix)$(MonoLibName)$(StaticLibSuffix)</MonoStaticLibFileName>
<MonoFileName Condition="'$(TargetsBrowser)' == 'true'">$(MonoStaticLibFileName)</MonoFileName>
<MonoFileName Condition="'$(MonoFileName)' == ''">$(MonoSharedLibFileName)</MonoFileName>
<MonoAotCrossFileName>mono-aot-cross$(ExeSuffix)</MonoAotCrossFileName>
<MonoAotCrossPdbFileName>mono-aot-cross.pdb</MonoAotCrossPdbFileName>
<CoreClrTestConfig Condition="'$(CoreClrTestConfig)' == ''">$(Configuration)</CoreClrTestConfig>
<LibrariesTestConfig Condition="'$(LibrariesTestConfig)' == ''">$(Configuration)</LibrariesTestConfig>
<CoreClrTestCoreRoot>$([MSBuild]::NormalizeDirectory('$(ArtifactsDir)', 'tests', 'coreclr', '$(TargetOS).$(Platform).$(CoreClrTestConfig)', 'Tests', 'Core_Root'))</CoreClrTestCoreRoot>
<LibrariesTesthostRoot>$([MSBuild]::NormalizeDirectory('$(ArtifactsDir)', 'bin', 'testhost', '$(NetCoreAppCurrent)-$(TargetOS)-$(LibrariesTestConfig)-$(Platform)'))</LibrariesTesthostRoot>
<LibrariesTesthostRuntimeDir>$([MSBuild]::NormalizeDirectory('$(LibrariesTesthostRoot)', 'shared', 'Microsoft.NETCore.App', '$(ProductVersion)'))</LibrariesTesthostRuntimeDir>
<BuildMonoAOTCrossCompiler Condition="'$(TargetsiOS)' == 'true'">true</BuildMonoAOTCrossCompiler>
<BuildMonoAOTCrossCompiler Condition="'$(TargetstvOS)' == 'true'">true</BuildMonoAOTCrossCompiler>
<BuildMonoAOTCrossCompiler Condition="'$(TargetsMacCatalyst)' == 'true'">true</BuildMonoAOTCrossCompiler>
<BuildMonoAOTCrossCompiler Condition="'$(TargetsBrowser)' == 'true'">true</BuildMonoAOTCrossCompiler>
<BuildMonoAOTCrossCompiler Condition="'$(TargetsAndroid)' == 'true'">true</BuildMonoAOTCrossCompiler>
<MonoObjCrossDir>$([MSBuild]::NormalizeDirectory('$(MonoObjDir)', 'cross'))</MonoObjCrossDir>
<CrossConfigH Condition="'$(BuildMonoAOTCrossCompiler)' == 'true'">$([MSBuild]::NormalizePath('$(MonoObjCrossDir)', 'config.h'))</CrossConfigH>
<MonoBundleLLVMOptimizer Condition="'$(MonoEnableLLVM)' == 'true'">true</MonoBundleLLVMOptimizer>
<MonoAOTBundleLLVMOptimizer Condition="'$(MonoAOTEnableLLVM)' == 'true' and '$(TargetsBrowser)' != 'true'">true</MonoAOTBundleLLVMOptimizer>
<MonoCCompiler>$(Compiler)</MonoCCompiler>
<MonoCCompiler Condition="'$(MonoCCompiler)' == ''">clang</MonoCCompiler>
<_CompilerTargetArch Condition="'$(RealTargetArchitecture)' == ''">$(Platform)</_CompilerTargetArch>
<_CompilerTargetArch Condition="'$(RealTargetArchitecture)' != ''">$(RealTargetArchitecture)</_CompilerTargetArch>
<RepositoryEngineeringCommonDir>$([MSBuild]::NormalizeDirectory('$(RepositoryEngineeringDir)', 'common'))</RepositoryEngineeringCommonDir>
<CrossToolchainFile>$([MSBuild]::NormalizePath('$(RepositoryEngineeringCommonDir)', 'cross', 'toolchain.cmake'))</CrossToolchainFile>
</PropertyGroup>
<!-- default thread suspend for specific platforms -->
<PropertyGroup>
<MonoThreadSuspend Condition="'$(TargetswatchOS)' == 'true' and '$(MonoThreadSuspend)' == ''">coop</MonoThreadSuspend>
<!-- wasm isn't really preemptive, but we don't want safepoints -->
<MonoThreadSuspend Condition="'$(TargetsBrowser)' == 'true' and '$(MonoThreadSuspend)' == ''">preemptive</MonoThreadSuspend>
<!-- all other platforms -->
<MonoThreadSuspend Condition="'$(MonoThreadSuspend)' == ''">hybrid</MonoThreadSuspend>
</PropertyGroup>
<!-- How to build runtime components? Static or dynamic. -->
<PropertyGroup>
<MonoComponentsStatic Condition="'$(TargetsBrowser)' == 'true' and '$(MonoComponentsStatic)' == ''">true</MonoComponentsStatic>
<MonoComponentsStatic Condition="'$(TargetsiOS)' == 'true' and '$(TargetsiOSSimulator)' != 'true' and '$(MonoComponentsStatic)' == ''">true</MonoComponentsStatic>
<MonoComponentsStatic Condition="'$(TargetstvOS)' == 'true' and '$(TargetstvOSSimulator)' != 'true' and '$(MonoComponentsStatic)' == ''">true</MonoComponentsStatic>
<MonoComponentsStatic Condition="'$(TargetsiOS)' == 'true' and '$(TargetsiOSSimulator)' == 'true' and '$(MonoComponentsStatic)' == ''">false</MonoComponentsStatic>
<MonoComponentsStatic Condition="'$(TargetstvOS)' == 'true' and '$(TargetstvOSSimulator)' == 'true' and '$(MonoComponentsStatic)' == ''">false</MonoComponentsStatic>
<MonoComponentsStatic Condition="'$(TargetsAndroid)' == 'true' and '$(MonoComponentsStatic)' == ''">false</MonoComponentsStatic>
<!-- by default, do dynamic components -->
<!-- TODO: Change to dynamic as default once package/deploy is fixed for all targets -->
<MonoComponentsStatic Condition="'$(MonoComponentsStatic)' == ''">true</MonoComponentsStatic>
</PropertyGroup>
<ItemGroup Condition="'$(TargetsBrowser)' == 'true' or '$(TargetsiOS)' == 'true' or '$(TargetstvOS)' == 'true' or '$(TargetsMacCatalyst)' == 'true'">
<PackageReference Include="Microsoft.NETCore.Runtime.ICU.Transport" PrivateAssets="all" Version="$(MicrosoftNETCoreRuntimeICUTransportVersion)" GeneratePathProperty="true" />
</ItemGroup>
<!-- CI specific build options -->
<ItemGroup Condition="'$(ContinuousIntegrationBuild)' == 'true' and ('$(TargetsOSX)' == 'true' or '$(TargetsMacCatalyst)' == 'true' or '$(TargetsBrowser)' == 'true' or '$(Targetsillumos)' == 'true')">
<_MonoCMakeArgs Include="-DENABLE_WERROR=1"/>
</ItemGroup>
<!-- Sanity checks -->
<Target Name="CheckEnv">
<Error Condition="'$(TargetstvOSSimulator)' != 'true' and '$(TargetstvOS)' == 'true' and '$(Platform)' != 'arm64'" Text="Error: Invalid platform for $(TargetOS): $(Platform)." />
<Error Condition="'$(TargetstvOSSimulator)' == 'true' and '$(TargetstvOS)' == 'true' and '$(Platform)' != 'x64' and '$(Platform)' != 'arm64'" Text="Error: Invalid platform for $(TargetOS): $(Platform)." />
<Error Condition="'$(TargetsiOSSimulator)' != 'true' and '$(TargetsiOS)' == 'true' and '$(Platform)' != 'arm64' and '$(Platform)' != 'arm'" Text="Error: Invalid platform for $(TargetOS): $(Platform)." />
<Error Condition="'$(TargetsiOSSimulator)' == 'true' and '$(TargetsiOS)' == 'true' and '$(Platform)' != 'x64' and '$(Platform)' != 'x86' and '$(Platform)' != 'arm64'" Text="Error: Invalid platform for $(TargetOS): $(Platform)." />
<Error Condition="('$(TargetsiOS)' == 'true' or '$(TargetstvOS)' == 'true') and !$([MSBuild]::IsOSPlatform('OSX'))" Text="Error: $(TargetOS) can only be built on macOS." />
<Error Condition="'$(TargetsAndroid)' == 'true' and '$(Platform)' != 'x64' and '$(Platform)' != 'x86' and '$(Platform)' != 'arm64' and '$(Platform)' != 'arm'" Text="Error: Invalid platform for $(TargetOS): $(Platform)." />
<Error Condition="'$(TargetsBrowser)' == 'true' and '$(EMSDK_PATH)' == '' and '$(SkipMonoCrossJitConfigure)' != 'true'" Text="The EMSDK_PATH environment variable should be set pointing to the emscripten SDK root dir."/>
<Error Condition="'$(TargetsAndroid)' == 'true' and '$(ANDROID_NDK_ROOT)' == '' and '$(SkipMonoCrossJitConfigure)' != 'true'" Text="Error: You need to set the ANDROID_NDK_ROOT environment variable pointing to the Android NDK root." />
<Error Condition="'$(HostOS)' == 'windows' and ('$(TargetsiOS)' == 'true' or '$(TargetstvOS)' == 'true')" Text="Error: Mono runtime for $(TargetOS) can't be built on Windows." />
<!-- check if Ninja is available and default to it on Unix platforms -->
<Exec Condition="'$(HostOS)' != 'windows' and '$(Ninja)' == ''" Command="which ninja" IgnoreExitCode="true" IgnoreStandardErrorWarningFormat="true" StandardOutputImportance="Low" >
<Output TaskParameter="ExitCode" PropertyName="_MonoFindNinjaExitCode"/>
</Exec>
<PropertyGroup>
<_MonoUseNinja Condition="'$(Ninja)' == 'true' or '$(_MonoFindNinjaExitCode)' == '0' or ('$(HostOS)' == 'windows' and '$(Ninja)' == '')">true</_MonoUseNinja>
</PropertyGroup>
<Exec Condition="'$(TargetArchitecture)' == 'wasm' and '$(HostOS)' == 'windows'" Command="call "$(RepositoryEngineeringDir)native\init-vs-env.cmd" && cmake --version" IgnoreExitCode="true" IgnoreStandardErrorWarningFormat="true" StandardOutputImportance="Low" >
<Output TaskParameter="ExitCode" PropertyName="_MonoFindCmakeExitCode"/>
</Exec>
<Error Condition="'$(TargetArchitecture)' == 'wasm' and '$(HostOS)' == 'windows' and '$(_MonoFindCmakeExitCode)' != '0' and '$(BuildMonoAOTCrossCompilerOnly)' != 'true'" Text="cmake tool is required to build wasm on windows" />
<Exec Condition="'$(TargetArchitecture)' == 'wasm' and '$(HostOS)' == 'windows'" Command="call "$(RepositoryEngineeringDir)native\init-vs-env.cmd" && ninja --version" IgnoreExitCode="true" IgnoreStandardErrorWarningFormat="true" StandardOutputImportance="Low" >
<Output TaskParameter="ExitCode" PropertyName="_MonoFindNinjaExitCode"/>
</Exec>
<Error Condition="'$(TargetArchitecture)' == 'wasm' and '$(HostOS)' == 'windows' and '$(_MonoFindNinjaExitCode)' != '0' and '$(BuildMonoAOTCrossCompilerOnly)' != 'true'" Text="ninja tool is required to build wasm on windows" />
</Target>
<Target Name="GetXcodeDir" Condition="$([MSBuild]::IsOSPlatform('OSX')) and '$(XcodeDir)' == ''">
<Exec Command="xcode-select -p" IgnoreExitCode="true" IgnoreStandardErrorWarningFormat="true" StandardOutputImportance="Low" ConsoleToMsBuild="true">
<Output TaskParameter="ExitCode" PropertyName="_MonoGetXcodeExitCode"/>
<Output TaskParameter="ConsoleOutput" PropertyName="_MonoGetXcodeConsoleOutput"/>
</Exec>
<PropertyGroup>
<XcodeDir Condition="'$(_MonoGetXcodeExitCode)' == '0'">$(_MonoGetXcodeConsoleOutput)</XcodeDir>
<XcodeDir Condition="'$(XcodeDir)' == ''">/Applications/Xcode.app/Contents/Developer</XcodeDir>
</PropertyGroup>
</Target>
<!-- Sets up emscripten if you don't have the EMSDK_PATH env variable set -->
<Target Name="ProvisionEmscripten"
Condition="'$(ShouldProvisionEmscripten)' == 'true' and '$(SkipMonoCrossJitConfigure)' != 'true'">
<ReadLinesFromFile File="$(MSBuildThisFileDirectory)\wasm\emscripten-version.txt">
<Output TaskParameter="Lines" ItemName="_VersionLines" />
</ReadLinesFromFile>
<PropertyGroup>
<EmsdkExt Condition="'$(HostOS)' != 'windows'">.sh</EmsdkExt>
<EmsdkExt Condition="'$(HostOS)' == 'windows'">.ps1</EmsdkExt>
<EMSDK_PATH>$(ProvisionEmscriptenDir)</EMSDK_PATH>
<WasmLocalPath>$([MSBuild]::NormalizeDirectory('$(MSBuildThisFileDirectory)', 'wasm'))</WasmLocalPath>
<EmsdkLocalPath>emsdk</EmsdkLocalPath>
<EmscriptenVersion>%(_VersionLines.Identity)</EmscriptenVersion>
<InstallCmd>./emsdk$(EmsdkExt) install $(EmscriptenVersion)</InstallCmd>
<ActivateCmd>./emsdk$(EmsdkExt) activate $(EmscriptenVersion)</ActivateCmd>
<InstallCmd Condition="'$(HostOS)' == 'windows'">powershell -NonInteractive -command "& $(InstallCmd); Exit $LastExitCode "</InstallCmd>
<ActivateCmd Condition="'$(HostOS)' == 'windows'">powershell -NonInteractive -command "& $(ActivateCmd); Exit $LastExitCode "</ActivateCmd>
</PropertyGroup>
<RemoveDir Directories="$(EMSDK_PATH)" />
<Exec Command="git clone https://github.com/emscripten-core/emsdk.git emsdk"
WorkingDirectory="$(WasmLocalPath)"
IgnoreStandardErrorWarningFormat="true" />
<Exec Command="git checkout $(EmscriptenVersion) && $(InstallCmd) && $(ActivateCmd)"
WorkingDirectory="$(EMSDK_PATH)"
IgnoreStandardErrorWarningFormat="true" />
</Target>
<!-- Copy Mono runtime bits to $(Destination) -->
<Target Name="CopyMonoRuntimeFilesFromArtifactsToDestination">
<ItemGroup>
<_MonoRuntimeArtifacts Include="$(RuntimeBinDir)\*.*" />
</ItemGroup>
<Error Condition="'$(Destination)' == ''" Text="Destination should not be empty" />
<Error Condition="@(_MonoRuntimeArtifacts->Count()) < 2" Text="Mono artifacts were not found at $(RuntimeBinDir)" />
<Message Text="Copying Mono Runtime artifacts from '$(RuntimeBinDir)' to '$(Destination)'.'" Importance="High" />
<Copy SourceFiles="@(_MonoRuntimeArtifacts)"
DestinationFolder="$(Destination)"
OverwriteReadOnlyFiles="true"
SkipUnchangedFiles="true" />
</Target>
<!-- Copy Mono runtime bits to the coreclr's Core_Root in order to run runtime tests -->
<Target Name="PatchCoreClrCoreRoot">
<MSBuild Projects="$(MSBuildProjectFullPath)"
Properties="Destination=$(CoreClrTestCoreRoot)"
Targets="CopyMonoRuntimeFilesFromArtifactsToDestination" />
</Target>
<!-- Copy Coreclr runtime bits back to Core_Root -->
<Target Name="RestoreCoreClrCoreRoot">
<Copy SourceFiles="$(CoreCLRArtifactsPath)\System.Private.CoreLib.dll"
DestinationFiles="$(CoreClrTestCoreRoot)\System.Private.CoreLib.dll" />
<Copy SourceFiles="$(CoreCLRArtifactsPath)\$(CoreClrFileName)"
DestinationFiles="$(CoreClrTestCoreRoot)\$(CoreClrFileName)" />
</Target>
<!-- Run CoreCLR runtime test using testhost -->
<Target Name="RunCoreClrTest" DependsOnTargets="PatchCoreClrCoreRoot">
<Error Condition="$(CoreClrTest) == ''" Text="'CoreClrTest' is not set. E.g. set it to `$(ArtifactsDir)tests/coreclr/$(TargetOS).$(Platform).$(CoreClrTestConfig)/JIT/opt/InstructionCombining/DivToMul/DivToMul$(ScriptExt)` in order to run DivToMul test." />
<Exec Command="$(CoreClrTest) -coreroot="$(CoreClrTestCoreRoot)""/>
</Target>
<!-- Run coreclr tests using runtest.py -->
<Target Name="RunCoreClrTests" DependsOnTargets="PatchCoreClrCoreRoot">
<Exec Condition="'$(HostOS)' == 'windows'" Command="$(MonoProjectRoot)..\tests\run.cmd $(CoreClrTestConfig)" ContinueOnError="ErrorAndContinue" />
<Exec Condition="'$(HostOS)' != 'windows'" Command="$(MonoProjectRoot)../tests/run.sh $(CoreClrTestConfig)" ContinueOnError="ErrorAndContinue" />
</Target>
<!-- Mono runtime build -->
<Target Name="BuildMonoRuntime">
<ItemGroup>
<_MonoCMakeArgs Condition="'$(_MonoUseNinja)' == 'true'" Include="-G Ninja"/>
<_MonoCMakeArgs Include="-DCMAKE_INSTALL_PREFIX="$(MonoObjDir)out""/>
<_MonoCMakeArgs Include="-DCMAKE_INSTALL_LIBDIR=lib"/>
<_MonoCMakeArgs Include="-DCMAKE_BUILD_TYPE=$(Configuration)"/>
<_MonoCMakeArgs Condition="'$(CMakeArgs)' != ''" Include="$(CMakeArgs)"/>
<_MonoCMakeArgs Condition="'$(MonoEnableLLVM)' == 'true'" Include="-DLLVM_PREFIX=$(MonoLLVMDir.TrimEnd('\/'))" />
<_MonoCMakeArgs Condition="'$(BuildDarwinFrameworks)' == 'true'" Include="-DBUILD_DARWIN_FRAMEWORKS=1" />
<_MonoCMakeArgs Include="-DGC_SUSPEND=$(MonoThreadSuspend)" />
<_MonoCMakeArgs Include="-DMONO_LIB_NAME=$(MonoLibName)" />
<_MonoCMakeArgs Include="-DMONO_SHARED_LIB_NAME=$(MonoSharedLibName)" />
</ItemGroup>
<!-- We build LLVM bits for x64 Linux without C++11 ABI (CentOS 7 has libstdc++ < 5.1) -->
<ItemGroup Condition="'$(TargetsLinux)' == 'true' and '$(MonoEnableLLVM)' == 'true' and '$(MonoLLVMUseCxx11Abi)' != 'true'">
<_MonoCXXFLAGS Include="-D_GLIBCXX_USE_CXX11_ABI=0" />
</ItemGroup>
<ItemGroup Condition="'$(TargetsLinux)' == 'true' and '$(MonoAOTEnableLLVM)' == 'true' and '$(MonoAOTLLVMUseCxx11Abi)' != 'true'">
<_MonoAOTCXXFLAGS Include="-D_GLIBCXX_USE_CXX11_ABI=0" />
</ItemGroup>
<!-- We build LLVM bits for ARM64 Linux with C++11 ABI (Ubuntu 16.04 has libstdc++ > 5.1)-->
<ItemGroup Condition="'$(TargetsLinux)' == 'true' and '$(MonoEnableLLVM)' == 'true' and '$(MonoLLVMUseCxx11Abi)' == 'true'">
<_MonoCXXFLAGS Include="-D_GLIBCXX_USE_CXX11_ABI=1" />
</ItemGroup>
<ItemGroup Condition="'$(TargetsLinux)' == 'true' and '$(MonoAOTEnableLLVM)' == 'true' and '$(MonoAOTLLVMUseCxx11Abi)' == 'true'">
<_MonoAOTCXXFLAGS Include="-D_GLIBCXX_USE_CXX11_ABI=1" />
</ItemGroup>
<!-- ARM Linux cross build options on CI -->
<ItemGroup Condition="'$(TargetsAndroid)' != 'true' and '$(MonoCrossDir)' != '' and ('$(TargetArchitecture)' == 'arm' or '$(TargetArchitecture)' == 'armv6' or '$(TargetArchitecture)' == 'arm64')">
<_MonoCMakeArgs Include="-DCMAKE_TOOLCHAIN_FILE=$(CrossToolchainFile)" />
<_MonoCMakeArgs Condition="'$(TargetOS)' == 'Linux' and ('$(TargetArchitecture)' == 'arm' or '$(TargetArchitecture)' == 'armv6')" Include="-DMONO_ARM_FPU=vfp-hard" />
<_MonoBuildEnv Condition="'$(Platform)' == 'arm64'" Include="TARGET_BUILD_ARCH=arm64" />
<_MonoBuildEnv Condition="'$(Platform)' == 'arm'" Include="TARGET_BUILD_ARCH=arm" />
<_MonoBuildEnv Condition="'$(Platform)' == 'armv6'" Include="TARGET_BUILD_ARCH=armv6" />
<_MonoBuildEnv Condition="'$(Platform)' == 'arm64'" Include="PKG_CONFIG_PATH=$(MonoCrossDir)/usr/lib/aarch64-linux-gnu/pkgconfig" />
<_MonoBuildEnv Condition="'$(Platform)' == 'arm'" Include="PKG_CONFIG_PATH=$(MonoCrossDir)/usr/lib/arm-linux-gnueabihf/pkgconfig" />
<_MonoBuildEnv Condition="'$(Platform)' == 'armv6'" Include="PKG_CONFIG_PATH=$(MonoCrossDir)/usr/lib/arm-linux-gnueabihf/pkgconfig" />
<_MonoCFLAGS Condition="'$(TargetArchitecture)' == 'armv6'" Include="-march=armv6zk" />
<_MonoCFLAGS Condition="'$(TargetArchitecture)' == 'armv6'" Include="-mcpu=arm1176jzf-s" />
<_MonoCFLAGS Condition="'$(TargetArchitecture)' == 'armv6'" Include="-mfpu=vfp" />
<_MonoCFLAGS Condition="'$(TargetArchitecture)' == 'armv6'" Include="-mfloat-abi=hard" />
<_MonoCXXFLAGS Condition="'$(TargetArchitecture)' == 'armv6'" Include="-march=armv6zk" />
<_MonoCXXFLAGS Condition="'$(TargetArchitecture)' == 'armv6'" Include="-mcpu=arm1176jzf-s" />
<_MonoCXXFLAGS Condition="'$(TargetArchitecture)' == 'armv6'" Include="-mfpu=vfp" />
<_MonoCXXFLAGS Condition="'$(TargetArchitecture)' == 'armv6'" Include="-mfloat-abi=hard" />
</ItemGroup>
<!-- x64 illumos cross build options -->
<ItemGroup Condition="'$(Targetsillumos)' == 'true' and '$(MonoCrossDir)' != ''">
<_MonoCMakeArgs Include="-DCMAKE_TOOLCHAIN_FILE=$(CrossToolchainFile)" />
<_MonoBuildEnv Include="TARGET_BUILD_ARCH=x64" />
<_MonoBuildEnv Include="PKG_CONFIG_PATH=$(MonoCrossDir)/lib/pkgconfig" />
</ItemGroup>
<!-- s390x Linux cross build options -->
<ItemGroup Condition="'$(MonoCrossDir)' != '' and '$(TargetArchitecture)' == 's390x'">
<_MonoCMakeArgs Include="-DCMAKE_TOOLCHAIN_FILE=$(CrossToolchainFile)" />
<_MonoBuildEnv Include="TARGET_BUILD_ARCH=s390x" />
<_MonoBuildEnv Include="PKG_CONFIG_PATH=$(MonoCrossDir)/usr/lib/s390x-linux-gnu/pkgconfig" />
</ItemGroup>
<!-- x64 FreeBSD cross build options -->
<ItemGroup Condition="'$(TargetsFreeBSD)' == 'true' and '$(MonoCrossDir)' != ''">
<_MonoCMakeArgs Include="-DCMAKE_TOOLCHAIN_FILE=$(CrossToolchainFile)" />
<_MonoBuildEnv Include="TARGET_BUILD_ARCH=x64" />
</ItemGroup>
<!-- Windows specific options -->
<ItemGroup Condition="'$(TargetsWindows)' == 'true'">
<_MonoCPPFLAGS Include="-DWIN32" />
<_MonoCPPFLAGS Include="-DWIN32_LEAN_AND_MEAN" />
<!--<_MonoCPPFLAGS Include="-D_WINDOWS" />--> <!-- set in monow.vcxproj, not sure we really need it -->
<_MonoCPPFLAGS Condition="'$(Platform)' == 'x64' or '$(Platform)' == 'arm64'" Include="-DWIN64" />
<_MonoCPPFLAGS Condition="'$(Configuration)' == 'Release'" Include="-DNDEBUG" />
<_MonoCPPFLAGS Condition="'$(Configuration)' == 'Debug'" Include="-D_DEBUG" />
<!-- <_MonoCPPFLAGS Include="-D__default_codegen__" /> --> <!-- doesn't seem to be used -->
<_MonoCPPFLAGS Include="-D_CRT_SECURE_NO_WARNINGS" />
<_MonoCPPFLAGS Include="-D_CRT_NONSTDC_NO_DEPRECATE" />
<!--<_MonoCPPFLAGS Include="-DGC_NOT_DLL" />--> <!-- only used for Boehm -->
<_MonoCPPFLAGS Include="-DWIN32_THREADS" />
<_MonoCPPFLAGS Include="-DWINVER=0x0601" />
<_MonoCPPFLAGS Include="-D_WIN32_WINNT=0x0601" />
<_MonoCPPFLAGS Include="-D_WIN32_IE=0x0501" />
<_MonoCPPFLAGS Include="-D_UNICODE" />
<_MonoCPPFLAGS Include="-DUNICODE" />
<_MonoCPPFLAGS Include="-DFD_SETSIZE=1024" />
<_MonoCPPFLAGS Include="-DNVALGRIND" />
<!-- Select generator platform for VS generator -->
<_MonoCMakeArgs Condition="'$(_MonoUseNinja)' != 'true' and '$(Platform)' == 'x64'" Include="-A x64" />
<_MonoCMakeArgs Condition="'$(_MonoUseNinja)' != 'true' and '$(Platform)' == 'x86'" Include="-A Win32" />
<_MonoCMakeArgs Condition="'$(_MonoUseNinja)' != 'true' and '$(Platform)' == 'arm'" Include="-A ARM" />
<_MonoCMakeArgs Condition="'$(_MonoUseNinja)' != 'true' and '$(Platform)' == 'arm64'" Include="-A ARM64" />
</ItemGroup>
<!-- OSX specific options -->
<ItemGroup Condition="'$(TargetsOSX)' == 'true'">
<_MonoCMakeArgs Include="-DCMAKE_OSX_DEPLOYMENT_TARGET=$(macOSVersionMin)" />
<_MonoCMakeArgs Include="-DENABLE_ICALL_EXPORT=1"/>
<_MonoCFLAGS Condition="'$(TargetArchitecture)' == 'arm64'" Include="-arch arm64" />
<_MonoCXXFLAGS Condition="'$(TargetArchitecture)' == 'arm64'" Include="-arch arm64" />
<!-- Force running as arm64 even when invoked from an x86 msbuild process -->
<_MonoBuildEnv Condition="'$(BuildArchitecture)' == 'arm64'" Include="arch -arch arm64" />
</ItemGroup>
<!-- Mac Catalyst specific options -->
<ItemGroup Condition="'$(TargetsMacCatalyst)' == 'true'">
<_MonoCMakeArgs Include="-DCMAKE_SYSTEM_VARIANT=MacCatalyst" />
<!-- https://gitlab.kitware.com/cmake/cmake/-/issues/20132 -->
<_MonoCPPFLAGS Include="-Wno-overriding-t-option" />
<_MonoCFlags Condition="'$(TargetArchitecture)' == 'arm64'" Include="-target arm64-apple-ios14.2-macabi" />
<_MonoCFlags Condition="'$(TargetArchitecture)' == 'x64'" Include="-target x86_64-apple-ios13.5-macabi" />
<_MonoCFLAGS Condition="'$(TargetArchitecture)' == 'arm64'" Include="-arch arm64" />
<_MonoCXXFlags Condition="'$(TargetArchitecture)' == 'arm64'" Include="-target arm64-apple-ios14.2-macabi" />
<_MonoCXXFlags Condition="'$(TargetArchitecture)' == 'x64'" Include="-target x86_64-apple-ios13.5-macabi" />
<_MonoCXXFLAGS Condition="'$(TargetArchitecture)' == 'arm64'" Include="-arch arm64" />
<!-- Force running as arm64 even when invoked from an x86 msbuild process -->
<_MonoBuildEnv Condition="'$(BuildArchitecture)' == 'arm64'" Include="arch -arch arm64" />
</ItemGroup>
<!-- WASM specific options -->
<PropertyGroup Condition="'$(TargetsBrowser)' == 'true'">
<_MonoMinimal Condition="'$(Configuration)' == 'Release'">,debugger_agent,log_dest</_MonoMinimal>
<_MonoMinimal Condition="'$(Configuration)' == 'Release' and '$(MonoEnableAssertMessages)' != 'true'">$(_MonoMinimal),assert_messages</_MonoMinimal>
</PropertyGroup>
<ItemGroup Condition="'$(TargetsBrowser)' == 'true'">
<_MonoCMakeArgs Include="-DENABLE_MINIMAL=jit,sgen_major_marksweep_conc,sgen_split_nursery,sgen_gc_bridge,sgen_toggleref,sgen_debug_helpers,sgen_binary_protocol,logging,shared_perfcounters,interpreter,threads,qcalls$(_MonoMinimal)"/>
<_MonoCMakeArgs Include="-DENABLE_INTERP_LIB=1"/>
<_MonoCMakeArgs Include="-DDISABLE_ICALL_TABLES=1"/>
<_MonoCMakeArgs Include="-DENABLE_ICALL_EXPORT=1"/>
<_MonoCMakeArgs Include="-DENABLE_LAZY_GC_THREAD_CREATION=1"/>
<_MonoCMakeArgs Include="-DENABLE_LLVM_RUNTIME=1"/>
<_MonoCFLAGS Include="-fexceptions"/>
<_MonoCXXFLAGS Include="-fexceptions"/>
<_MonoCFLAGS Include="$(EscapedQuoteW)-I$([MSBuild]::NormalizePath('$(PkgMicrosoft_NETCore_Runtime_ICU_Transport)', 'runtimes', 'browser-wasm', 'native', 'include'))$(EscapedQuoteW)"/>
</ItemGroup>
<!-- iOS/tvOS specific options -->
<PropertyGroup Condition="'$(TargetsiOS)' == 'true' or '$(TargetstvOS)' == 'true'">
<_MonoCCOption>CC="$(XcodeDir)/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang"</_MonoCCOption>
<_MonoCXXOption>CXX="$(XcodeDir)/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang++"</_MonoCXXOption>
<_MonoRunInitCompiler>false</_MonoRunInitCompiler>
<_MonoCMakeSysroot Condition="'$(TargetsiOS)' == 'true' and '$(TargetsiOSSimulator)' != 'true'">$(XcodeDir)/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS$(iOSVersion).sdk</_MonoCMakeSysroot>
<_MonoCMakeSysroot Condition="'$(TargetsiOS)' == 'true' and '$(TargetsiOSSimulator)' == 'true'">$(XcodeDir)/Platforms/iPhoneSimulator.platform/Developer/SDKs/iPhoneSimulator$(iOSVersion).sdk</_MonoCMakeSysroot>
<_MonoCMakeSysroot Condition="'$(TargetstvOS)' == 'true' and '$(TargetstvOSSimulator)' != 'true'">$(XcodeDir)/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS$(tvOSVersion).sdk</_MonoCMakeSysroot>
<_MonoCMakeSysroot Condition="'$(TargetstvOS)' == 'true' and '$(TargetstvOSSimulator)' == 'true'">$(XcodeDir)/Platforms/AppleTVSimulator.platform/Developer/SDKs/AppleTVSimulator$(tvOSVersion).sdk</_MonoCMakeSysroot>
<_MonoCMakeSystemName Condition="'$(TargetsiOS)' == 'true'">iOS</_MonoCMakeSystemName>
<_MonoCMakeSystemName Condition="'$(TargetstvOS)' == 'true'">tvOS</_MonoCMakeSystemName>
<_MonoCMakeVersionMin Condition="'$(TargetsiOS)' == 'true'">$(iOSVersionMin)</_MonoCMakeVersionMin>
<_MonoCMakeVersionMin Condition="'$(TargetstvOS)' == 'true'">$(tvOSVersionMin)</_MonoCMakeVersionMin>
</PropertyGroup>
<PropertyGroup Condition="'$(TargetsiOS)' == 'true' or '$(TargetstvOS)' == 'true' or '$(TargetsMacCatalyst)' == 'true'">
<_IcuLibdir>$(PkgMicrosoft_NETCore_Runtime_ICU_Transport)/runtimes/$(TargetOS)-$(TargetArchitecture)/native/lib</_IcuLibdir>
</PropertyGroup>
<ItemGroup Condition="('$(TargetsOSX)' == 'true' or '$(TargetsMacCatalyst)' == 'true') and '$(Platform)' == 'arm64'">
<_MonoCMakeArgs Include="-DCMAKE_OSX_ARCHITECTURES=arm64"/>
</ItemGroup>
<ItemGroup Condition="'$(TargetsiOS)' == 'true' or '$(TargetstvOS)' == 'true'">
<_MonoCMakeArgs Include="-DCMAKE_SYSTEM_NAME=$(_MonoCMakeSystemName)"/>
<_MonoCMakeArgs Include="-DCMAKE_OSX_DEPLOYMENT_TARGET=$(_MonoCMakeVersionMin)" />
<_MonoCMakeArgs Include="-DCMAKE_OSX_SYSROOT='$(_MonoCMakeSysroot)'" />
<_MonoCMakeArgs Condition="'$(Platform)' == 'x64'" Include="-DCMAKE_OSX_ARCHITECTURES=x86_64"/>
<_MonoCMakeArgs Condition="'$(Platform)' == 'x86'" Include="-DCMAKE_OSX_ARCHITECTURES=i386"/>
<_MonoCMakeArgs Condition="'$(Platform)' == 'arm64'" Include="-DCMAKE_OSX_ARCHITECTURES=arm64"/>
<_MonoCMakeArgs Condition="'$(Platform)' == 'arm'" Include=""-DCMAKE_OSX_ARCHITECTURES=armv7%3Barmv7s""/>
<_MonoCFLAGS Include="-Wl,-application_extension" />
<_MonoCXXFLAGS Include="-Wl,-application_extension" />
</ItemGroup>
<ItemGroup Condition="'$(TargetsiOS)' == 'true' or '$(TargetstvOS)' == 'true' or '$(TargetsMacCatalyst)' == 'true'">
<_MonoCMakeArgs Include="-DICU_LIBDIR=$(_IcuLibdir)"/>
<_MonoCMakeArgs Include="-DENABLE_ICALL_EXPORT=1"/>
<_MonoCFLAGS Include="-I$(PkgMicrosoft_NETCore_Runtime_ICU_Transport)/runtimes/$(TargetOS)-$(TargetArchitecture)/native/include" />
</ItemGroup>
<!-- iOS/tvOS simulator specific options -->
<ItemGroup Condition="('$(TargetsiOS)' == 'true' and '$(TargetsiOSSimulator)' == 'true') or ('$(TargetstvOS)' == 'true' and '$(TargetstvOSSimulator)' == 'true')">
<_MonoCMakeArgs Include="-DENABLE_MINIMAL=shared_perfcounters"/>
</ItemGroup>
<!-- iOS/tvOS device specific options -->
<ItemGroup Condition="('$(TargetsiOS)' == 'true' and '$(TargetsiOSSimulator)' != 'true') or ('$(TargetstvOS)' == 'true' and '$(TargetstvOSSimulator)' != 'true')">
<_MonoCMakeArgs Include="-DENABLE_MINIMAL=jit,logging,shared_perfcounters" />
<_MonoCMakeArgs Include="-DENABLE_VISIBILITY_HIDDEN=1"/>
<_MonoCMakeArgs Include="-DENABLE_LAZY_GC_THREAD_CREATION=1"/>
<_MonoCMakeArgs Include="-DENABLE_SIGALTSTACK=0"/>
<_MonoCMakeArgs Include="-DENABLE_ICALL_EXPORT=1"/>
<_MonoCFLAGS Include="-Werror=partial-availability" />
<_MonoCFLAGS Condition="'$(TargetstvOS)' == 'true'" Include="-fno-gnu-inline-asm" />
<_MonoCFLAGS Include="-fexceptions" />
<_MonoCPPFLAGS Include="-DSMALL_CONFIG" />
<_MonoCPPFLAGS Include="-D_XOPEN_SOURCE" />
<_MonoCPPFLAGS Include="-DHAVE_LARGE_FILE_SUPPORT=1" />
<_MonoCXXFLAGS Include="-Werror=partial-availability" />
<_MonoCXXFLAGS Condition="'$(TargetstvOS)' == 'true'" Include="-fno-gnu-inline-asm" />
<_MonoCXXFLAGS Include="-fexceptions" />
</ItemGroup>
<!-- Android specific options -->
<PropertyGroup Condition="'$(TargetsAndroid)' == 'true'">
<_MonoRunInitCompiler>false</_MonoRunInitCompiler>
</PropertyGroup>
<ItemGroup Condition="'$(TargetsAndroid)' == 'true'">
<_MonoCMakeArgs Include="-DCMAKE_TOOLCHAIN_FILE=$(ANDROID_NDK_ROOT)/build/cmake/android.toolchain.cmake"/>
<_MonoCMakeArgs Include="-DANDROID_NDK=$(ANDROID_NDK_ROOT)"/>
<_MonoCMakeArgs Include="-DANDROID_STL=none"/>
<_MonoCMakeArgs Include="-DANDROID_CPP_FEATURES="no-rtti no-exceptions""/>
<_MonoCMakeArgs Include="-DANDROID_NATIVE_API_LEVEL=$(AndroidApiVersion)"/>
<_MonoCMakeArgs Include="-DANDROID_PLATFORM=android-$(AndroidApiVersion)"/>
<_MonoCMakeArgs Condition="'$(Platform)' == 'arm64'" Include="-DANDROID_ABI=arm64-v8a" />
<_MonoCMakeArgs Condition="'$(Platform)' == 'arm'" Include="-DANDROID_ABI=armeabi-v7a" />
<_MonoCMakeArgs Condition="'$(Platform)' == 'x86'" Include="-DANDROID_ABI=x86" />
<_MonoCMakeArgs Condition="'$(Platform)' == 'x64'" Include="-DANDROID_ABI=x86_64" />
<_MonoCMakeArgs Include="-DENABLE_MINIMAL=ssa,logging" />
<_MonoCMakeArgs Include="-DENABLE_SIGALTSTACK=1"/>
<_MonoCFLAGS Condition="'$(Platform)' == 'arm'" Include="-march=armv7-a" />
<_MonoCFLAGS Condition="'$(Platform)' == 'arm'" Include="-mtune=cortex-a8" />
<_MonoCFLAGS Condition="'$(Platform)' == 'arm'" Include="-mfpu=vfp" />
<_MonoCFLAGS Condition="'$(Platform)' == 'arm'" Include="-mfloat-abi=softfp" />
<_MonoCFLAGS Condition="'$(Platform)' == 'arm64' or '$(Platform)' == 'arm'" Include="-fpic" />
<_MonoCFLAGS Include="-fstack-protector" />
<_MonoCFLAGS Condition="'$(Platform)' == 'arm64'" Include="-DANDROID64" />
<_MonoCFLAGS Condition="'$(Platform)' == 'arm64' or '$(Platform)' == 'x64'" Include="-DL_cuserid=9" />
<_MonoCFLAGS Condition="'$(Platform)' == 'arm64' or '$(Platform)' == 'arm'" Include="-D__POSIX_VISIBLE=201002" />
<_MonoCFLAGS Condition="'$(Platform)' == 'arm64' or '$(Platform)' == 'arm'" Include="-DSK_RELEASE" />
<_MonoCFLAGS Condition="'$(Platform)' == 'arm64' or '$(Platform)' == 'arm'" Include="-DNDEBUG" />
<_MonoCFLAGS Condition="'$(Platform)' == 'arm64' or '$(Platform)' == 'arm'" Include="-UDEBUG" />
<_MonoCXXFLAGS Condition="'$(Platform)' == 'arm'" Include="-march=armv7-a" />
<_MonoCXXFLAGS Condition="'$(Platform)' == 'arm'" Include="-mtune=cortex-a8" />
<_MonoCXXFLAGS Condition="'$(Platform)' == 'arm'" Include="-mfpu=vfp" />
<_MonoCXXFLAGS Condition="'$(Platform)' == 'arm'" Include="-mfloat-abi=softfp" />
<_MonoCXXFLAGS Condition="'$(Platform)' == 'arm64' or '$(Platform)' == 'arm'" Include="-fpic" />
<_MonoCXXFLAGS Include="-fstack-protector" />
<_MonoCXXFLAGS Condition="'$(Platform)' == 'arm64'" Include="-DANDROID64" />
<_MonoCXXFLAGS Condition="'$(Platform)' == 'arm64' or '$(Platform)' == 'x64'" Include="-DL_cuserid=9" />
<_MonoCXXFLAGS Condition="'$(Platform)' == 'arm64' or '$(Platform)' == 'arm'" Include="-D__POSIX_VISIBLE=201002" />
<_MonoCXXFLAGS Condition="'$(Platform)' == 'arm64' or '$(Platform)' == 'arm'" Include="-DSK_RELEASE" />
<_MonoCXXFLAGS Condition="'$(Platform)' == 'arm64' or '$(Platform)' == 'arm'" Include="-DNDEBUG" />
<_MonoCXXFLAGS Condition="'$(Platform)' == 'arm64' or '$(Platform)' == 'arm'" Include="-UDEBUG" />
</ItemGroup>
<!-- Linux options -->
<ItemGroup Condition="'$(TargetsLinux)' == true">
<_MonoCFLAGS Include="-Wl,--build-id=sha1" />
<_MonoCXXFLAGS Include="-Wl,--build-id=sha1" />
</ItemGroup>
<ItemGroup Condition="'$(RealTargetOS)' == 'Linux'">
<_MonoAOTCFLAGS Include="-Wl,--build-id=sha1" />
<_MonoAOTCXXFLAGS Include="-Wl,--build-id=sha1" />
</ItemGroup>
<!-- Devloop features -->
<ItemGroup Condition="'$(MonoMsCorDbi)' == 'true'">
<_MonoCMakeArgs Include="-DENABLE_MSCORDBI=1" />
</ItemGroup>
<ItemGroup Condition="'$(TargetsiOS)' == 'true' or '$(TargetstvOS)' == 'true'">
<_MonoCMakeArgs Include="-DFEATURE_PERFTRACING_PAL_TCP=1"/>
<_MonoCMakeArgs Include="-DFEATURE_PERFTRACING_DISABLE_DEFAULT_LISTEN_PORT=1"/>
<_MonoCMakeArgs Include="-DDISABLE_LINK_STATIC_COMPONENTS=1" Condition="!('$(TargetsiOSSimulator)' == 'true' or '$(TargetstvOSSimulator)' == 'true')"/>
</ItemGroup>
<ItemGroup Condition="'$(TargetsAndroid)' == 'true'">
<_MonoCMakeArgs Include="-DFEATURE_PERFTRACING_PAL_TCP=1"/>
<_MonoCMakeArgs Include="-DFEATURE_PERFTRACING_DISABLE_DEFAULT_LISTEN_PORT=1"/>
</ItemGroup>
<!-- Components -->
<ItemGroup Condition="'$(MonoComponentsStatic)' == 'true'">
<_MonoCMakeArgs Include="-DSTATIC_COMPONENTS=1" />
</ItemGroup>
<ItemGroup>
<_MonoCMakeArgs Include="-DMONO_COMPONENTS_RID=$(TargetOS)-$(TargetArchitecture)" />
</ItemGroup>
<PropertyGroup>
<_MonoCFLAGSOption>-DCMAKE_C_FLAGS="@(_MonoCPPFLAGS, ' ') @(_MonoCFLAGS, ' ')"</_MonoCFLAGSOption>
<_MonoCXXFLAGSOption>-DCMAKE_CXX_FLAGS="@(_MonoCPPFLAGS, ' ') @(_MonoCXXFLAGS, ' ')"</_MonoCXXFLAGSOption>
</PropertyGroup>
<ItemGroup>
<_MonoCMakeArgs Include="$(_MonoCFLAGSOption)"/>
<_MonoCMakeArgs Include="$(_MonoCXXFLAGSOption)"/>
</ItemGroup>
<PropertyGroup>
<EMSDK_PATH>$([MSBuild]::EnsureTrailingSlash('$(EMSDK_PATH)'))</EMSDK_PATH>
<_MonoCMakeConfigureCommand>cmake @(_MonoCMakeArgs, ' ') $(MonoCMakeExtraArgs) "$(MonoProjectRoot.TrimEnd('\/'))"</_MonoCMakeConfigureCommand>
<_MonoCMakeConfigureCommand Condition="'$(TargetsBrowser)' != 'true' and '$(_MonoRunInitCompiler)' != 'false' and '$(HostOS)' != 'windows'">bash -c 'source $(RepositoryEngineeringCommonDir)native/init-compiler.sh "$(RepositoryEngineeringCommonDir)native" "$(_CompilerTargetArch)" "$(MonoCCompiler)" && @(_MonoBuildEnv, ' ') $(_MonoCMakeConfigureCommand)'</_MonoCMakeConfigureCommand>
<_MonoCMakeConfigureCommand Condition="'$(TargetsBrowser)' != 'true' and '$(_MonoRunInitCompiler)' != 'false' and '$(HostOS)' == 'windows'">call "$(RepositoryEngineeringDir)native\init-vs-env.cmd" $(_CompilerTargetArch) && cd /D "$(MonoObjDir)" && @(_MonoBuildEnv, ' ') $(_MonoCMakeConfigureCommand)</_MonoCMakeConfigureCommand>
<_MonoCMakeConfigureCommand Condition="'$(TargetsBrowser)' != 'true' and '$(_MonoRunInitCompiler)' == 'false'">$(_MonoCCOption) $(_MonoCXXOption) @(_MonoBuildEnv, ' ') $(_MonoCMakeConfigureCommand)</_MonoCMakeConfigureCommand>
<_MonoCMakeConfigureCommand Condition="'$(TargetsBrowser)' == 'true' and '$(HostOS)' != 'windows'">bash -c 'source $(EMSDK_PATH)/emsdk_env.sh 2>&1 && emcmake $(_MonoCMakeConfigureCommand)'</_MonoCMakeConfigureCommand>
<_MonoCMakeConfigureCommand Condition="'$(TargetsBrowser)' == 'true' and '$(HostOS)' == 'windows'">call "$(RepositoryEngineeringDir)native\init-vs-env.cmd" && call "$([MSBuild]::NormalizePath('$(EMSDK_PATH)', 'emsdk_env.bat'))" && emcmake $(_MonoCMakeConfigureCommand)</_MonoCMakeConfigureCommand>
<_MonoCMakeBuildCommand>cmake --build . --target install --config $(Configuration)</_MonoCMakeBuildCommand>
<_MonoCMakeBuildCommand Condition="'$(MonoVerboseBuild)' == 'true'">$(_MonoCMakeBuildCommand) --verbose</_MonoCMakeBuildCommand>
<_MonoCMakeBuildCommand Condition="'$(_MonoUseNinja)' != 'true'">$(_MonoCMakeBuildCommand) --parallel $([System.Environment]::ProcessorCount)</_MonoCMakeBuildCommand>
<_MonoCMakeBuildCommand Condition="'$(TargetsBrowser)' != 'true' and '$(HostOS)' != 'windows'">@(_MonoBuildEnv, ' ') $(_MonoCMakeBuildCommand)</_MonoCMakeBuildCommand>
<_MonoCMakeBuildCommand Condition="'$(TargetsBrowser)' != 'true' and '$(HostOS)' == 'windows'">call "$(RepositoryEngineeringDir)native\init-vs-env.cmd" $(_CompilerTargetArch) && cd /D "$(MonoObjDir)" && @(_MonoBuildEnv, ' ') $(_MonoCMakeBuildCommand)</_MonoCMakeBuildCommand>
<_MonoCMakeBuildCommand Condition="'$(TargetsBrowser)' == 'true' and '$(HostOS)' == 'windows'">call "$(RepositoryEngineeringDir)native\init-vs-env.cmd" && $(_MonoCMakeBuildCommand)</_MonoCMakeBuildCommand>
</PropertyGroup>
<MakeDir Directories="$(MonoObjDir)" />
<!-- configure -->
<PropertyGroup>
<_MonoCMakeCmdLineUpToDate Condition="Exists('$(MonoObjDir)cmake_cmd_line.txt') and '$([System.IO.File]::ReadAllText($(MonoObjDir)cmake_cmd_line.txt).Trim())' == '$(_MonoCMakeConfigureCommand.Trim())'">true</_MonoCMakeCmdLineUpToDate>
<_MonoSkipCMakeConfigure>false</_MonoSkipCMakeConfigure>
<_MonoSkipCMakeConfigure Condition="'$(SkipMonoCrossJitConfigure)' == 'true' or '$(_MonoCMakeCmdLineUpToDate)' == 'true'">true</_MonoSkipCMakeConfigure>
</PropertyGroup>
<Message Condition="'$(_MonoSkipCMakeConfigure)' == 'true'" Text="The CMake command line is the same as the last run. Skipping running CMake configure." Importance="High"/>
<Message Condition="'$(_MonoSkipCMakeConfigure)' != 'true'" Text="Running '$(_MonoCMakeConfigureCommand)' in '$(MonoObjDir)'" Importance="High"/>
<Exec Condition="'$(_MonoSkipCMakeConfigure)' != 'true'" Command="$(_MonoCMakeConfigureCommand)" IgnoreStandardErrorWarningFormat="true" WorkingDirectory="$(MonoObjDir)"/>
<WriteLinesToFile
Condition="'$(_MonoSkipCMakeConfigure)' != 'true'"
File="$(MonoObjDir)cmake_cmd_line.txt"
Lines="$(_MonoCMakeConfigureCommand)"
Overwrite="true" />
<!-- build -->
<Message Condition="'$(BuildMonoAOTCrossCompilerOnly)' != 'true'" Text="Running '$(_MonoCMakeBuildCommand)' in '$(MonoObjDir)'" Importance="High"/>
<Exec Condition="'$(BuildMonoAOTCrossCompilerOnly)' != 'true'" Command="$(_MonoCMakeBuildCommand)" IgnoreStandardErrorWarningFormat="true" WorkingDirectory="$(MonoObjDir)"/>
<!-- strip -->
<PropertyGroup>
<MonoToolchainPrebuiltOS Condition="$([MSBuild]::IsOSPlatform('Linux'))">linux-x86_64</MonoToolchainPrebuiltOS>
<MonoToolchainPrebuiltOS Condition="$([MSBuild]::IsOSPlatform('OSX'))">darwin-x86_64</MonoToolchainPrebuiltOS>
<MonoToolchainPrebuiltOS Condition="'$(HostOS)' == 'windows'">windows-x86_64</MonoToolchainPrebuiltOS>
<_MonoRuntimeFilePath>$(MonoObjDir)out\lib\$(MonoFileName)</_MonoRuntimeFilePath>
<_LinuxAbi Condition="'$(TargetsAndroid)' != 'true'">gnu</_LinuxAbi>
<_LinuxAbi Condition="'$(TargetsAndroid)' == 'true'">android</_LinuxAbi>
<_LinuxFloatAbi Condition="'$(TargetsAndroid)' != 'true'">hf</_LinuxFloatAbi>
<_Objcopy>objcopy</_Objcopy>
<_Objcopy Condition="'$(Platform)' == 'arm'">arm-linux-$(_LinuxAbi)eabi$(_LinuxFloatAbi)-$(_Objcopy)</_Objcopy>
<_Objcopy Condition="'$(Platform)' == 'armv6'">arm-linux-$(_LinuxAbi)eabi$(_LinuxFloatAbi)-$(_Objcopy)</_Objcopy>
<_Objcopy Condition="'$(Platform)' == 'arm64'">aarch64-linux-$(_LinuxAbi)-$(_Objcopy)</_Objcopy>
<_Objcopy Condition="'$(Platform)' == 's390x'">s390x-linux-$(_LinuxAbi)-$(_Objcopy)</_Objcopy>
<_Objcopy Condition="'$(Platform)' == 'x64'">x86_64-linux-$(_LinuxAbi)-$(_Objcopy)</_Objcopy>
<_Objcopy Condition="'$(Platform)' == 'x86'">i686-linux-$(_LinuxAbi)-$(_Objcopy)</_Objcopy>
<_Objcopy Condition="'$(TargetsAndroid)' == 'true'">$(ANDROID_NDK_ROOT)/toolchains/llvm/prebuilt/$(MonoToolchainPrebuiltOS)/bin/llvm-objcopy</_Objcopy>
</PropertyGroup>
<!-- test viability of objcopy command -->
<Exec Condition="'$(BuildMonoAOTCrossCompilerOnly)' != 'true' and ('$(TargetsLinux)' == 'true' or '$(TargetsAndroid)' == 'true')" Command="$(_Objcopy) -V" IgnoreStandardErrorWarningFormat="true" ContinueOnError="WarnAndContinue" IgnoreExitCode="true" EchoOff="true" ConsoleToMsBuild="true">
<Output TaskParameter="ExitCode" PropertyName="_ObjcopyFound"/>
</Exec>
<PropertyGroup>
<!-- if all else fails in finding a valid objcopy, fall back to no-prefix from $PATH (used for x64 on CentOS) -->
<_Objcopy Condition="'$(_ObjcopyFound)' != '0'">objcopy</_Objcopy>
</PropertyGroup>
<ItemGroup>
<FilesToStrip Include="$(_MonoRuntimeFilePath)" />
<FilesToStrip Include="$([System.IO.Directory]::GetParent($(_MonoRuntimeFilePath)))\libmono-component-*$(LibSuffix)" />
<FilesToStrip Include="$([System.IO.Directory]::GetParent($(_MonoRuntimeFilePath)))\Mono*framework\**\Mono*" Exclude="$([System.IO.Directory]::GetParent($(_MonoRuntimeFilePath)))\Mono*framework\**\*.dwarf" />
</ItemGroup>
<Message Condition="'$(BuildMonoAOTCrossCompilerOnly)' != 'true' and ($([MSBuild]::IsOSPlatform('OSX')) or $([MSBuild]::IsOSPlatform('Linux')))" Text="Stripping debug symbols from %(FilesToStrip.Identity)" Importance="High"/>
<Exec Condition="!$([System.String]::Copy(%(FilesToStrip.Identity)).EndsWith('.a')) and '$(BuildMonoAOTCrossCompilerOnly)' != 'true' and ('$(TargetsOSX)' == 'true' or '$(TargetsMacCatalyst)' == 'true' or '$(TargetsiOS)' == 'true' or '$(TargetstvOS)' == 'true')" Command="dsymutil --flat --minimize %(FilesToStrip.Identity)" IgnoreStandardErrorWarningFormat="true" WorkingDirectory="$(MonoObjDir)"/>
<Exec Condition="!$([System.String]::Copy(%(FilesToStrip.Identity)).EndsWith('.a')) and '$(BuildMonoAOTCrossCompilerOnly)' != 'true' and ('$(TargetsOSX)' == 'true' or '$(TargetsMacCatalyst)' == 'true' or '$(TargetsiOS)' == 'true' or '$(TargetstvOS)' == 'true') and '$(Configuration)' == 'Release'" Command="strip -no_code_signature_warning -S %(FilesToStrip.Identity)" IgnoreStandardErrorWarningFormat="true" WorkingDirectory="$(MonoObjDir)"/>
<Exec Condition="!$([System.String]::Copy(%(FilesToStrip.Identity)).EndsWith('.a')) and '$(BuildMonoAOTCrossCompilerOnly)' != 'true' and ('$(TargetsLinux)' == 'true' or '$(TargetsAndroid)' == 'true')" Command="$(_Objcopy) --only-keep-debug %(FilesToStrip.Identity) %(FilesToStrip.Identity).dbg" IgnoreStandardErrorWarningFormat="true" WorkingDirectory="$(MonoObjDir)"/>
<Exec Condition="!$([System.String]::Copy(%(FilesToStrip.Identity)).EndsWith('.a')) and '$(BuildMonoAOTCrossCompilerOnly)' != 'true' and ('$(TargetsLinux)' == 'true' or '$(TargetsAndroid)' == 'true')" Command="$(_Objcopy) --strip-unneeded %(FilesToStrip.Identity)" IgnoreStandardErrorWarningFormat="true" WorkingDirectory="$(MonoObjDir)"/>
<Exec Condition="!$([System.String]::Copy(%(FilesToStrip.Identity)).EndsWith('.a')) and '$(BuildMonoAOTCrossCompilerOnly)' != 'true' and ('$(TargetsLinux)' == 'true' or '$(TargetsAndroid)' == 'true')" Command="$(_Objcopy) --add-gnu-debuglink=%(FilesToStrip.Identity).dbg %(FilesToStrip.Identity)" IgnoreStandardErrorWarningFormat="true" WorkingDirectory="$(MonoObjDir)"/>
</Target>
<!-- Build AOT cross compiler (if available) -->
<Target Name="BuildMonoCross" Condition="'$(BuildMonoAOTCrossCompiler)' == 'true'" DependsOnTargets="BuildMonoRuntime">
<!-- iOS/tvOS specific options -->
<PropertyGroup Condition="'$(TargetstvOS)' == 'true' or '$(TargetsiOS)' == 'true'">
<!-- FIXME: Disable for simulator -->
<MonoUseCrossTool>true</MonoUseCrossTool>
<MonoAotCMakeSysroot Condition="'$(TargetsiOS)' == 'true' and '$(TargetsiOSSimulator)' != 'true'">$(XcodeDir)/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS$(iOSVersion).sdk</MonoAotCMakeSysroot>
<MonoAotCMakeSysroot Condition="'$(TargetsiOS)' == 'true' and '$(TargetsiOSSimulator)' == 'true'">$(XcodeDir)/Platforms/iPhoneSimulator.platform/Developer/SDKs/iPhoneSimulator$(iOSVersion).sdk</MonoAotCMakeSysroot>
<MonoAotCMakeSysroot Condition="'$(TargetstvOS)' == 'true' and '$(TargetstvOSSimulator)' != 'true'">$(XcodeDir)/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS$(tvOSVersion).sdk</MonoAotCMakeSysroot>
<MonoAotCMakeSysroot Condition="'$(TargetstvOS)' == 'true' and '$(TargetstvOSSimulator)' == 'true'">$(XcodeDir)/Platforms/AppleTVSimulator.platform/Developer/SDKs/AppleTVSimulator$(tvOSVersion).sdk</MonoAotCMakeSysroot>
<MonoAotOffsetsFile>$(MonoObjCrossDir)offsets-$(Platform)-darwin.h</MonoAotOffsetsFile>
<MonoAotAbi Condition="'$(Platform)' == 'arm64'">aarch64-apple-darwin10</MonoAotAbi>
<MonoAotAbi Condition="'$(Platform)' == 'arm'">arm-apple-darwin10</MonoAotAbi>
<MonoAotAbi Condition="'$(Platform)' == 'x86'">i386-apple-darwin10</MonoAotAbi>
<MonoAotAbi Condition="'$(Platform)' == 'x64'">x86_64-apple-darwin10</MonoAotAbi>
</PropertyGroup>
<!-- Catalyst specific options -->
<PropertyGroup Condition="'$(TargetsMacCatalyst)' == 'true'">
<MonoUseCrossTool>true</MonoUseCrossTool>
<MonoAotCMakeSysroot Condition="'$(TargetsMacCatalyst)' == 'true'">$(XcodeDir)/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk</MonoAotCMakeSysroot>
<MonoAotOffsetsFile>$(MonoObjCrossDir)offsets-$(Platform)-darwin.h</MonoAotOffsetsFile>
<MonoAotAbi Condition="'$(Platform)' == 'arm64'">aarch64-apple-maccatalyst</MonoAotAbi>
<MonoAotAbi Condition="'$(Platform)' == 'x64'">x86_64-apple-maccatalyst</MonoAotAbi>
</PropertyGroup>
<!-- Linux specific options -->
<ItemGroup Condition="'$(RealTargetOS)' == 'Linux' or $([MSBuild]::IsOSPlatform('Linux'))">
<_LibClang Include="$(ANDROID_NDK_ROOT)/toolchains/llvm/prebuilt/$(MonoToolchainPrebuiltOS)/lib64/libclang.so.*"/>
</ItemGroup>
<PropertyGroup Condition="'$(TargetsLinux)' == 'true' and '$(Platform)' == 'arm64'">
<MonoUseCrossTool>true</MonoUseCrossTool>
<MonoAotAbi>aarch64-linux-gnu</MonoAotAbi>
<MonoAotOffsetsFile>$(MonoObjCrossDir)offsets-aarch-linux-gnu.h</MonoAotOffsetsFile>
<MonoAotOffsetsPrefix>$(MonoCrossDir)/usr/lib/gcc/aarch64-linux-gnu/5</MonoAotOffsetsPrefix>
</PropertyGroup>
<!-- macOS host specific options -->
<ItemGroup Condition="'$(RealTargetOS)' == 'OSX' or $([MSBuild]::IsOSPlatform('OSX'))">
<MonoAOTCMakeArgs Include="-DCMAKE_OSX_DEPLOYMENT_TARGET=$(macOSVersionMin)" />
</ItemGroup>
<!-- WASM specific options -->
<PropertyGroup Condition="'$(TargetsBrowser)' == 'true'">
<MonoUseCrossTool>true</MonoUseCrossTool>
<MonoAotAbi>wasm32-unknown-none</MonoAotAbi>
<MonoAotOffsetsFile>$(MonoObjCrossDir)offsets-wasm32-unknown-none.h</MonoAotOffsetsFile>
<MonoLibClang Condition="$([MSBuild]::IsOSPlatform('OSX'))">$(MonoAOTLLVMDir)/lib/libclang.dylib</MonoLibClang>
<MonoLibClang Condition="$([MSBuild]::IsOSPlatform('Linux'))">$(MonoAOTLLVMDir)/lib/libclang.so</MonoLibClang>
<MonoLibClang Condition="$([MSBuild]::IsOSPlatform('Windows'))">$([MSBuild]::NormalizePath('$(MonoAOTLLVMDir)', 'bin', 'libclang.dll'))</MonoLibClang>
<PythonCmd Condition="'$(HostOS)' == 'windows'">setlocal EnableDelayedExpansion && call "$([MSBuild]::NormalizePath('$(EMSDK_PATH)', 'emsdk_env.bat'))" && !EMSDK_PYTHON!</PythonCmd>
<_ForceRelease Condition="$([MSBuild]::IsOSPlatform('Windows')) and '$(TargetArchitecture)' == 'wasm' and '$(Configuration)' == 'Debug'">true</_ForceRelease>
</PropertyGroup>
<!-- Windows specific options -->
<ItemGroup Condition="'$(RealTargetOS)' == 'Windows' or $([MSBuild]::IsOSPlatform('Windows'))">
<_MonoAOTCPPFLAGS Include="-DHOST_WIN32" />
<_MonoAOTCPPFLAGS Include="-D__WIN32__" />
<_MonoAOTCPPFLAGS Include="-DWIN32" />
<_MonoAOTCPPFLAGS Include="-DWIN32_LEAN_AND_MEAN" />
<!--<_MonoAOTCPPFLAGS Include="-D_WINDOWS" />--> <!-- set in monow.vcxproj, not sure we really need it -->
<_MonoAOTCPPFLAGS Condition="'$(Platform)' == 'x64' or '$(Platform)' == 'arm64'" Include="-DWIN64" />
<_MonoAOTCPPFLAGS Condition="'$(Configuration)' == 'Release' or '$(_ForceRelease)' == 'true'" Include="-DNDEBUG" />
<_MonoAOTCPPFLAGS Condition="'$(Configuration)' == 'Debug' and '$(_ForceRelease)' != 'true'" Include="-D_DEBUG" />
<!-- <_MonoAOTCPPFLAGS Include="-D__default_codegen__" /> --> <!-- doesn't seem to be used -->
<_MonoAOTCPPFLAGS Include="-D_CRT_SECURE_NO_WARNINGS" />
<_MonoAOTCPPFLAGS Include="-D_CRT_NONSTDC_NO_DEPRECATE" />
<!--<_MonoAOTCPPFLAGS Include="-DGC_NOT_DLL" />--> <!-- only used for Boehm -->
<_MonoAOTCPPFLAGS Include="-DWIN32_THREADS" />
<_MonoAOTCPPFLAGS Include="-DWINVER=0x0601" />
<_MonoAOTCPPFLAGS Include="-D_WIN32_WINNT=0x0601" />
<_MonoAOTCPPFLAGS Include="-D_WIN32_IE=0x0501" />
<_MonoAOTCPPFLAGS Include="-D_UNICODE" />
<_MonoAOTCPPFLAGS Include="-DUNICODE" />
<_MonoAOTCPPFLAGS Include="-DFD_SETSIZE=1024" />
<_MonoAOTCPPFLAGS Include="-DNVALGRIND" />
<MonoAOTCMakeArgs Include="-DDISABLE_INTERPRETER=1" />
<!-- Select generator platform for VS generator -->
<MonoAOTCMakeArgs Condition="'$(_MonoUseNinja)' != 'true' and '$(Platform)' == 'x64'" Include="-A x64" />
<MonoAOTCMakeArgs Condition="'$(_MonoUseNinja)' != 'true' and '$(Platform)' == 'x86'" Include="-A Win32" />
<MonoAOTCMakeArgs Condition="'$(_MonoUseNinja)' != 'true' and '$(Platform)' == 'arm'" Include="-A ARM" />
<MonoAOTCMakeArgs Condition="'$(_MonoUseNinja)' != 'true' and '$(Platform)' == 'arm64'" Include="-A ARM64" />
</ItemGroup>
<!-- Android specific options -->
<PropertyGroup Condition="'$(TargetsAndroid)' == 'true'">
<MonoToolchainPrebuiltOS Condition="$([MSBuild]::IsOSPlatform('Linux'))">linux-x86_64</MonoToolchainPrebuiltOS>
<MonoToolchainPrebuiltOS Condition="$([MSBuild]::IsOSPlatform('OSX'))">darwin-x86_64</MonoToolchainPrebuiltOS>
<MonoToolchainPrebuiltOS Condition="'$(HostOS)' == 'windows'">windows-x86_64</MonoToolchainPrebuiltOS>
<MonoUseCrossTool>true</MonoUseCrossTool>
<MonoAotCMakeSysroot Condition="Exists('$(ANDROID_NDK_ROOT)/sysroot')">$(ANDROID_NDK_ROOT)/sysroot</MonoAotCMakeSysroot>
<MonoAotCMakeSysroot Condition="'$(MonoAotCMakeSysroot)' == '' And Exists('$(ANDROID_NDK_ROOT)/toolchains/llvm/prebuilt/$(MonoToolchainPrebuiltOS)/sysroot')">$(ANDROID_NDK_ROOT)/toolchains/llvm/prebuilt/$(MonoToolchainPrebuiltOS)/sysroot</MonoAotCMakeSysroot>
<MonoAotAbi Condition="'$(Platform)' == 'arm64'">aarch64-v8a-linux-android</MonoAotAbi>
<MonoAotAbi Condition="'$(Platform)' == 'arm'">armv7-none-linux-androideabi</MonoAotAbi>
<MonoAotAbi Condition="'$(Platform)' == 'x86'">i686-none-linux-android</MonoAotAbi>
<MonoAotAbi Condition="'$(Platform)' == 'x64'">x86_64-none-linux-android</MonoAotAbi>
<MonoAotOffsetsFile>$(MonoObjDir)cross/offsets-$(Platform)-android.h</MonoAotOffsetsFile>
</PropertyGroup>
<PropertyGroup>
<MonoLibClang Condition="$([MSBuild]::IsOSPlatform('OSX')) and '$(MonoLibClang)' == ''">$(XcodeDir)/Toolchains/XcodeDefault.xctoolchain/usr/lib/libclang.dylib</MonoLibClang>
<MonoLibClang Condition="$([MSBuild]::IsOSPlatform('Linux')) and '$(MonoLibClang)' == ''">@(_LibClang)</MonoLibClang>
<MonoLibClang Condition="'$(HostOS)' == 'windows' and '$(MonoLibClang)' == ''">c:/dev/LLVM/bin/libclang.dll</MonoLibClang>
<MonoAotCMakeSysroot Condition="'$(MonoAotCMakeSysroot)' == ''">$(MonoCrossDir)</MonoAotCMakeSysroot>
</PropertyGroup>
<ItemGroup Condition="'$(MonoUseCrossTool)' == 'true'">
<MonoAotCrossOffsetsToolParams Include="--abi=$(MonoAotAbi)" />
<MonoAotCrossOffsetsToolParams Include="--netcore" />
<MonoAotCrossOffsetsToolParams Include="--targetdir="$(MonoObjDir.TrimEnd('\/'))"" />
<MonoAotCrossOffsetsToolParams Include="--monodir="$(MonoProjectRoot.TrimEnd('\/'))"" />
<MonoAotCrossOffsetsToolParams Include="--nativedir="$(SharedNativeRoot.TrimEnd('\/'))"" />
<MonoAotCrossOffsetsToolParams Include="--outfile="$(MonoAotOffsetsFile)"" />
<MonoAotCrossOffsetsToolParams Include="--libclang="$(MonoLibClang)"" />
<MonoAotCrossOffsetsToolParams Condition="'$(MonoAotOffsetsPrefix)' != ''" Include="--prefix="$(MonoAotOffsetsPrefix)"" />
<MonoAotCrossOffsetsToolParams Condition="'$(MonoAotCMakeSysroot)' != ''" Include="--sysroot="$(MonoAotCMakeSysroot)"" />
<MonoAotCrossOffsetsToolParams Condition="'$(TargetsBrowser)' == 'true'" Include="--emscripten-sdk="$([MSBuild]::NormalizePath('$(EMSDK_PATH)', 'upstream', 'emscripten'))"" />
</ItemGroup>
<PropertyGroup>
<_MonoAOTCFLAGSOption>-DCMAKE_C_FLAGS="@(_MonoAOTCPPFLAGS, ' ') @(_MonoAOTCFLAGS, ' ')"</_MonoAOTCFLAGSOption>
<_MonoAOTCXXFLAGSOption>-DCMAKE_CXX_FLAGS="@(_MonoAOTCPPFLAGS, ' ') @(_MonoAOTCXXFLAGS, ' ')"</_MonoAOTCXXFLAGSOption>
</PropertyGroup>
<ItemGroup>
<MonoAOTCMakeArgs Include="-DAOT_TARGET_TRIPLE=$(MonoAotAbi)"/>
<MonoAOTCMakeArgs Condition="'$(_MonoUseNinja)' == 'true'" Include="-G Ninja"/>
<MonoAOTCMakeArgs Include="-DCMAKE_INSTALL_PREFIX=$([MSBuild]::NormalizePath('$(MonoObjCrossDir)', 'out'))"/>
<MonoAOTCMakeArgs Condition="'$(_ForceRelease)' != 'true'" Include="-DCMAKE_BUILD_TYPE=$(Configuration)"/>
<MonoAOTCMakeArgs Condition="'$(_ForceRelease)' == 'true'" Include="-DCMAKE_BUILD_TYPE=Release"/>
<!-- FIXME: Disable more -->
<MonoAOTCMakeArgs Include="-DENABLE_MINIMAL=" />
<MonoAOTCMakeArgs Include="-DENABLE_ICALL_SYMBOL_MAP=1" />
<MonoAOTCMakeArgs Include="-DDISABLE_SHARED_LIBS=1" />
<MonoAOTCMakeArgs Include="-DDISABLE_LIBS=1" />
<MonoAOTCMakeArgs Include="-DDISABLE_COMPONENTS=1" />
<MonoAOTCMakeArgs Condition="'$(MonoAotOffsetsFile)' != ''" Include="-DAOT_OFFSETS_FILE="$(MonoAotOffsetsFile)"" />
<MonoAOTCMakeArgs Condition="'$(MonoAOTEnableLLVM)' == 'true'" Include="-DLLVM_PREFIX=$(MonoAOTLLVMDir.TrimEnd('\/'))" />
<MonoAOTCMakeArgs Include="$(_MonoAOTCFLAGSOption)" />
<MonoAOTCMakeArgs Include="$(_MonoAOTCXXFLAGSOption)" />
<!-- thread suspend -->
<MonoAOTCMakeArgs Include="-DGC_SUSPEND=$(MonoThreadSuspend)" />
<!-- rename exe -->
<MonoAOTCMakeArgs Include="-DMONO_CROSS_COMPILE_EXECUTABLE_NAME=1" />
</ItemGroup>
<PropertyGroup>
<_MonoAotCrossOffsetsCommand Condition="'$(MonoUseCrossTool)' == 'true'">$(PythonCmd) $(MonoProjectRoot)mono/tools/offsets-tool/offsets-tool.py @(MonoAotCrossOffsetsToolParams, ' ')</_MonoAotCrossOffsetsCommand>
<_MonoAotCMakeConfigureCommand>cmake @(MonoAOTCMakeArgs, ' ') $(MonoProjectRoot)</_MonoAotCMakeConfigureCommand>
<_MonoAotCMakeConfigureCommand Condition="'$(HostOS)' == 'windows'">call "$(RepositoryEngineeringDir)native\init-vs-env.cmd" $(_CompilerTargetArch) && cd /D "$(MonoObjCrossDir)" && @(_MonoBuildEnv, ' ') $(_MonoAotCMakeConfigureCommand)</_MonoAotCMakeConfigureCommand>
<_MonoAotCMakeBuildCommand>cmake --build . --target install --config $(Configuration)</_MonoAotCMakeBuildCommand>
<_MonoAotCMakeBuildCommand Condition="'$(MonoVerboseBuild)' == 'true'">$(_MonoAotCMakeBuildCommand) --verbose</_MonoAotCMakeBuildCommand>
<_MonoAotCMakeBuildCommand Condition="'$(_MonoUseNinja)' != 'true'">$(_MonoAotCMakeBuildCommand) --parallel $([System.Environment]::ProcessorCount)</_MonoAotCMakeBuildCommand>
<_MonoAotCMakeBuildCommand Condition="'$(HostOS)' == 'windows'">call "$(RepositoryEngineeringDir)native\init-vs-env.cmd" $(_CompilerTargetArch) && cd /D "$(MonoObjCrossDir)" && @(_MonoBuildEnv, ' ') $(_MonoAotCMakeBuildCommand)</_MonoAotCMakeBuildCommand>
<_MonoAotPrebuiltOffsetsFile>$(ArtifactsObjDir)\mono\offsetfiles\$(PlatformConfigPathPart)\cross\$([System.IO.Path]::GetFileName('$(MonoAotOffsetsFile)'))</_MonoAotPrebuiltOffsetsFile>
</PropertyGroup>
<MakeDir Directories="$(MonoObjCrossDir)" />
<!-- offsets tool -->
<Message Condition="Exists('$(_MonoAotPrebuiltOffsetsFile)')" Text="Out-of-tree offset file found, moving into place" Importance="High" />
<Copy Condition="Exists('$(_MonoAotPrebuiltOffsetsFile)')" SourceFiles="$(_MonoAotPrebuiltOffsetsFile)" DestinationFolder="$([System.IO.Path]::GetDirectoryName('$(MonoAotOffsetsFile)'))" />
<Message Condition="'$(MonoUseCrossTool)' == 'true' and !Exists('$(MonoAotOffsetsFile)')" Text="Running '$(_MonoAotCrossOffsetsCommand)'" Importance="High" />
<Exec Condition="'$(MonoUseCrossTool)' == 'true' and !Exists('$(MonoAotOffsetsFile)')" Command="$(_MonoAotCrossOffsetsCommand)" IgnoreStandardErrorWarningFormat="true" />
<!-- configure -->
<PropertyGroup>
<_MonoAotCMakeCmdLineUpToDate Condition="Exists('$(MonoObjCrossDir)cmake_cmd_line.txt') and '$([System.IO.File]::ReadAllText($(MonoObjCrossDir)cmake_cmd_line.txt).Trim())' == '$(_MonoAotCMakeConfigureCommand.Trim())'">true</_MonoAotCMakeCmdLineUpToDate>
<_MonoSkipAotCMakeConfigure>false</_MonoSkipAotCMakeConfigure>
<_MonoSkipAotCMakeConfigure Condition="'$(MonoGenerateOffsetsOSGroups)' != '' or '$(_MonoAotCMakeCmdLineUpToDate)' == 'true'">true</_MonoSkipAotCMakeConfigure>
</PropertyGroup>
<Message Condition="'$(_MonoSkipAotCMakeConfigure)' == 'true'" Text="The AOT Cross CMake command line is the same as the last run. Skipping running CMake configure." Importance="High"/>
<Message Condition="'$(_MonoSkipAotCMakeConfigure)' != 'true'" Text="Running '$(_MonoAotCMakeConfigureCommand)' in '$(MonoObjCrossDir)'" Importance="High"/>
<Exec Condition="'$(_MonoSkipAotCMakeConfigure)' != 'true'" Command="$(_MonoAotCMakeConfigureCommand)" IgnoreStandardErrorWarningFormat="true" WorkingDirectory="$(MonoObjCrossDir)"/>
<WriteLinesToFile
Condition="'$(_MonoSkipAotCMakeConfigure)' != 'true'"
File="$(MonoObjCrossDir)cmake_cmd_line.txt"
Lines="$(_MonoAotCMakeConfigureCommand)"
Overwrite="true" />
<!-- build -->
<Message Text="Running '$(_MonoAotCMakeBuildCommand)' in '$(MonoObjCrossDir)'" Importance="High" />
<Exec Condition="'$(MonoGenerateOffsetsOSGroups)' == ''" Command="$(_MonoAotCMakeBuildCommand)" IgnoreStandardErrorWarningFormat="true" WorkingDirectory="$(MonoObjCrossDir)"/>
</Target>
<PropertyGroup>
<!-- Hardcode version paths in a global location. Condition on running OS to generate the right files for the Mono WASM cross tools. -->
<NativeVersionFile Condition="'$(HostOS)' == 'windows'">$(ArtifactsObjDir)_version.h</NativeVersionFile>
<NativeVersionFile Condition="'$(HostOS)' != 'windows'">$(ArtifactsObjDir)_version.c</NativeVersionFile>
<AssemblyName>.NET Runtime</AssemblyName>
</PropertyGroup>
<Import Project="$(RepositoryEngineeringDir)versioning.targets" />
<!-- The standard set of targets that need to run before the BuildMono target runs -->
<PropertyGroup>
<MonoDependsOnTargets>CheckEnv;GetXcodeDir;GenerateRuntimeVersionFile;BuildMonoRuntime;BuildMonoCross</MonoDependsOnTargets>
<MonoDependsOnTargets Condition="'$(TargetsBrowser)' == 'true'">GenerateRuntimeVersionFile;ProvisionEmscripten;$(MonoDependsOnTargets)</MonoDependsOnTargets>
</PropertyGroup>
<!-- General targets -->
<Target Name="BuildMono" AfterTargets="Build" DependsOnTargets="$(MonoDependsOnTargets)">
<PropertyGroup Condition="'$(BuildMonoAOTCrossCompilerOnly)' != 'true'">
<_MonoRuntimeFilePath Condition="'$(TargetsWindows)' == 'true'">$(MonoObjDir)out\bin\$(MonoFileName)</_MonoRuntimeFilePath>
<_MonoRuntimeFilePath Condition="'$(_MonoRuntimeFilePath)' == ''">$(MonoObjDir)out\lib\$(MonoFileName)</_MonoRuntimeFilePath>
<_MonoRuntimeStaticFilePath Condition="'$(TargetsMacCatalyst)' == 'true' or '$(TargetsiOS)' == 'true' or '$(TargetstvOS)' == 'true' or '$(TargetsAndroid)' == 'true'">$(MonoObjDir)out\lib\$(MonoStaticLibFileName)</_MonoRuntimeStaticFilePath>
<_MonoIncludeInterpStaticFiles Condition="'$(TargetsBrowser)' == 'true'">true</_MonoIncludeInterpStaticFiles>
<_MonoIncludeIcuFiles Condition="'$(TargetsiOS)' == 'true' or '$(TargetstvOS)' == 'true' or '$(TargetsMacCatalyst)' == 'true'">true</_MonoIncludeIcuFiles>
</PropertyGroup>
<PropertyGroup Condition="'$(BuildMonoAOTCrossCompiler)' == 'true'">
<_MonoAotCrossFilePath>$(MonoObjCrossDir)out\bin\$(MonoAotCrossFileName)</_MonoAotCrossFilePath>
<_MonoAotCrossPdbFilePath>$(MonoObjCrossDir)out\bin\$(MonoAotCrossPdbFileName)</_MonoAotCrossPdbFilePath>
</PropertyGroup>
<!-- Copy Mono runtime files to artifacts directory -->
<ItemGroup>
<_MonoRuntimeComponentsStaticFilePath Include="$([System.IO.Directory]::GetParent($(_MonoRuntimeFilePath)))\libmono-component-*$(StaticLibSuffix)" Condition="Exists($(_MonoRuntimeFilePath))" />
<_MonoRuntimeComponentsSharedFilePath Include="$([System.IO.Directory]::GetParent($(_MonoRuntimeFilePath)))\libmono-component-*$(LibSuffix)" Condition="Exists($(_MonoRuntimeFilePath))" />
<_MonoRuntimeComponentsSharedFilePath Include="$([System.IO.Directory]::GetParent($(_MonoRuntimeFilePath)))\libmono-component-*$(LibSuffix).dwarf" Condition="Exists('$(_MonoRuntimeFilePath).dwarf')" />
<_MonoRuntimeComponentsSharedFilePath Include="$([System.IO.Directory]::GetParent($(_MonoRuntimeFilePath)))\libmono-component-*$(LibSuffix).dbg" Condition="Exists('$(_MonoRuntimeFilePath).dbg')" />
<_MonoRuntimeArtifacts Include="$(_MonoRuntimeFilePath)" Condition="Exists($(_MonoRuntimeFilePath))">
<Destination>$(RuntimeBinDir)$(MonoFileName)</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Include="$(_MonoRuntimeFilePath).dbg" Condition="Exists('$(_MonoRuntimeFilePath).dbg')">
<Destination>$(RuntimeBinDir)$(MonoFileName).dbg</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Include="$(_MonoRuntimeFilePath).dwarf" Condition="Exists('$(_MonoRuntimeFilePath).dwarf')">
<Destination>$(RuntimeBinDir)$(MonoFileName).dwarf</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Include="$(_MonoRuntimeStaticFilePath)" Condition="Exists($(_MonoRuntimeStaticFilePath)) and '$(_MonoRuntimeStaticFilePath)' != '$(_MonoRuntimeFilePath)'">
<Destination>$(RuntimeBinDir)$(MonoStaticLibFileName)</Destination>
</_MonoRuntimeArtifacts>
<!-- copy the mono runtime component shared or static libraries -->
<_MonoRuntimeArtifacts Include="@(_MonoRuntimeComponentsStaticFilePath)">
<Destination>$(RuntimeBinDir)%(_MonoRuntimeComponentsStaticFilePath.Filename)%(_MonoRuntimeComponentsStaticFilePath.Extension)</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Include="@(_MonoRuntimeComponentsSharedFilePath)">
<Destination>$(RuntimeBinDir)%(_MonoRuntimeComponentsSharedFilePath.Filename)%(_MonoRuntimeComponentsSharedFilePath.Extension)</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Include="$(_MonoAotCrossFilePath)">
<Destination>$(RuntimeBinDir)cross\$(PackageRID)\$(MonoAotCrossFileName)</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Include="$(_MonoAotCrossPdbFilePath)" Condition="Exists('$(_MonoAotCrossPdbFilePath)')">
<Destination>$(RuntimeBinDir)cross\$(PackageRID)\$(MonoAotCrossPdbFileName)</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Condition="'$(MonoBundleLLVMOptimizer)' == 'true'" Include="$(MonoLLVMDir)\bin\llc$(ExeSuffix)">
<Destination>$(RuntimeBinDir)\llc$(ExeSuffix)</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Condition="'$(MonoBundleLLVMOptimizer)' == 'true'" Include="$(MonoLLVMDir)\bin\opt$(ExeSuffix)">
<Destination>$(RuntimeBinDir)\opt$(ExeSuffix)</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Condition="'$(MonoAOTBundleLLVMOptimizer)' == 'true'" Include="$(MonoAOTLLVMDir)\bin\llc$(ExeSuffix)">
<Destination>$(RuntimeBinDir)cross\$(PackageRID)\llc$(ExeSuffix)</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Condition="'$(MonoAOTBundleLLVMOptimizer)' == 'true'" Include="$(MonoAOTLLVMDir)\bin\opt$(ExeSuffix)">
<Destination>$(RuntimeBinDir)cross\$(PackageRID)\opt$(ExeSuffix)</Destination>
</_MonoRuntimeArtifacts>
<_MonoIncludeArtifacts Include="$(MonoObjDir)out\include\**" />
<_MonoRuntimeArtifacts Condition="'$(MonoComponentsStatic)' != 'true' and Exists('$(MonoObjDir)out\lib\Mono.release.framework')" Include="@(_MonoRuntimeComponentsSharedFilePath)">
<Destination>$(RuntimeBinDir)\Mono.release.framework\%(_MonoRuntimeComponentsSharedFilePath.Filename)%(_MonoRuntimeComponentsSharedFilePath.Extension)</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Condition="Exists('$(MonoObjDir)out\lib\Mono.release.framework') and !Exists('$(MonoObjDir)out\lib\Mono.release.framework\Versions')" Include="$(MonoObjDir)out\lib\Mono.release.framework\Mono.release">
<Destination>$(RuntimeBinDir)\Mono.release.framework\Mono</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Condition="Exists('$(MonoObjDir)out\lib\Mono.release.framework') and Exists('$(MonoObjDir)out\lib\Mono.release.framework\Versions')" Include="$(MonoObjDir)out\lib\Mono.release.framework\Versions\Current\Mono.release">
<Destination>$(RuntimeBinDir)\Mono.release.framework\Mono</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Condition="Exists('$(MonoObjDir)out\lib\Mono.release.framework')" Include="$(MonoObjDir)out\lib\Mono.release.framework\Mono.release.dwarf">
<Destination>$(RuntimeBinDir)\Mono.release.framework\Mono.dwarf</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Condition="'$(MonoComponentsStatic)' != 'true' and Exists('$(MonoObjDir)out\lib\Mono.debug.framework')" Include="@(_MonoRuntimeComponentsSharedFilePath)">
<Destination>$(RuntimeBinDir)\Mono.debug.framework\%(_MonoRuntimeComponentsSharedFilePath.Filename)%(_MonoRuntimeComponentsSharedFilePath.Extension)</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Condition="Exists('$(MonoObjDir)out\lib\Mono.debug.framework') and !Exists('$(MonoObjDir)out\lib\Mono.debug.framework\Versions')" Include="$(MonoObjDir)out\lib\Mono.debug.framework\Mono.debug">
<Destination>$(RuntimeBinDir)\Mono.debug.framework\Mono</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Condition="Exists('$(MonoObjDir)out\lib\Mono.debug.framework') and Exists('$(MonoObjDir)out\lib\Mono.debug.framework\Versions')" Include="$(MonoObjDir)out\lib\Mono.debug.framework\Versions\Current\Mono.debug">
<Destination>$(RuntimeBinDir)\Mono.debug.framework\Mono</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Condition="Exists('$(MonoObjDir)out\lib\Mono.debug.framework')" Include="$(MonoObjDir)out\lib\Mono.debug.framework\Mono.debug.dwarf">
<Destination>$(RuntimeBinDir)\Mono.debug.framework\Mono.dwarf</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Condition="Exists('$(MonoObjDir)out\lib\Mono.release.framework') and !Exists('$(MonoObjDir)out\lib\Mono.release.framework\Versions')" Include="$(MonoObjDir)out\lib\Mono.release.framework\Info.plist">
<Destination>$(RuntimeBinDir)\Mono.release.framework\Info.plist</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Condition="Exists('$(MonoObjDir)out\lib\Mono.release.framework') and Exists('$(MonoObjDir)out\lib\Mono.release.framework\Versions')" Include="$(MonoObjDir)out\lib\Mono.release.framework\Versions\Current\Resources\Info.plist">
<Destination>$(RuntimeBinDir)\Mono.release.framework\Info.plist</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Condition="Exists('$(MonoObjDir)out\lib\Mono.debug.framework') and !Exists('$(MonoObjDir)out\lib\Mono.debug.framework\Versions')" Include="$(MonoObjDir)out\lib\Mono.debug.framework\Info.plist">
<Destination>$(RuntimeBinDir)\Mono.debug.framework\Info.plist</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Condition="Exists('$(MonoObjDir)out\lib\Mono.debug.framework') and Exists('$(MonoObjDir)out\lib\Mono.debug.framework\Versions')" Include="$(MonoObjDir)out\lib\Mono.debug.framework\Versions\Current\Resources\Info.plist">
<Destination>$(RuntimeBinDir)\Mono.debug.framework\Info.plist</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeBuildArtifacts Include="$(MonoObjDir)\build\**" />
<_MonoRuntimeArtifacts Condition="'$(_MonoIncludeInterpStaticFiles)' == 'true'" Include="$(MonoObjDir)out\lib\libmono-ee-interp.a">
<Destination>$(RuntimeBinDir)libmono-ee-interp.a</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Condition="'$(_MonoIncludeInterpStaticFiles)' == 'true'" Include="$(MonoObjDir)out\lib\libmono-icall-table.a">
<Destination>$(RuntimeBinDir)libmono-icall-table.a</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Condition="'$(_MonoIncludeInterpStaticFiles)' == 'true'" Include="$(MonoObjDir)out\lib\libmono-ilgen.a">
<Destination>$(RuntimeBinDir)libmono-ilgen.a</Destination>
</_MonoRuntimeArtifacts>
<_MonoRuntimeArtifacts Condition="'$(TargetsBrowser)' == 'true' and '$(BuildMonoAOTCrossCompilerOnly)' != 'true'" Include="$(MonoObjDir)out\lib\libmono-profiler-aot.a">
<Destination>$(RuntimeBinDir)libmono-profiler-aot.a</Destination>
</_MonoRuntimeArtifacts>
<_MonoICorDebugArtifacts Condition="'$(MonoMsCorDbi)' == 'true'" Include="$(MonoObjDir)out\lib\$(LibPrefix)dbgshim$(LibSuffix)">
<Destination>$(RuntimeBinDir)$(LibPrefix)dbgshim$(LibSuffix)</Destination>
</_MonoICorDebugArtifacts>
<_MonoICorDebugArtifacts Condition="'$(MonoMsCorDbi)' == 'true'" Include="$(MonoObjDir)out\lib\$(LibPrefix)mscordbi$(LibSuffix)">
<Destination>$(RuntimeBinDir)$(LibPrefix)mscordbi$(LibSuffix)</Destination>
</_MonoICorDebugArtifacts>
<_IcuArtifacts Condition="'$(_MonoIncludeIcuFiles)' == 'true'"
Include="$(_IcuLibdir)\libicuuc.a;
$(_IcuLibdir)\libicui18n.a;
$(_IcuLibdir)\libicudata.a;
$(_IcuLibdir)\*.dat" />
</ItemGroup>
<Copy Condition="'$(_MonoIncludeIcuFiles)' == 'true'"
SourceFiles="@(_IcuArtifacts)"
DestinationFolder="$(RuntimeBinDir)"
SkipUnchangedFiles="true" />
<Copy SourceFiles="@(_MonoRuntimeArtifacts)"
DestinationFiles="%(_MonoRuntimeArtifacts.Destination)"
Condition="'$(MonoGenerateOffsetsOSGroups)' == ''"
SkipUnchangedFiles="true" />
<Copy SourceFiles="@(_MonoICorDebugArtifacts)"
DestinationFiles="%(_MonoICorDebugArtifacts.Destination)"
SkipUnchangedFiles="true"
Condition="Exists(@(_MonoICorDebugArtifacts))" />
<Copy SourceFiles="@(_MonoIncludeArtifacts)"
DestinationFiles="@(_MonoIncludeArtifacts->'$(RuntimeBinDir)include\%(RecursiveDir)%(Filename)%(Extension)')"
SkipUnchangedFiles="true"
Condition="'$(MonoGenerateOffsetsOSGroups)' == '' and ('$(TargetsMacCatalyst)' == 'true' or '$(TargetsiOS)' == 'true' or '$(TargetstvOS)' == 'true' or '$(TargetsAndroid)' == 'true' or '$(TargetsBrowser)' == 'true')"/>
<Copy SourceFiles="@(_MonoRuntimeBuildArtifacts)"
DestinationFiles="@(_MonoRuntimeBuildArtifacts->'$(RuntimeBinDir)build\%(RecursiveDir)%(Filename)%(Extension)')"
SkipUnchangedFiles="true"
Condition="'$(BuildMonoAOTCrossCompilerOnly)' != 'true'" />
<Exec Condition="'$(BuildMonoAOTCrossCompilerOnly)' != 'true' and '$(MonoGenerateOffsetsOSGroups)' == '' and ('$(TargetsOSX)' == 'true' or '$(TargetsMacCatalyst)' == 'true' or '$(TargetsiOS)' == 'true' or '$(TargetstvOS)' == 'true')" Command="install_name_tool -id @rpath/$(MonoFileName) $(RuntimeBinDir)$(MonoFileName)" />
</Target>
<Target Name="CleanMono">
<RemoveDir Directories="$(MonoObjDir)" />
</Target>
</Project>
| 1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/mono/sample/wasm/browser-bench/Console/Makefile
|
TOP=../../../../../..
include ../../wasm.mk
ifneq ($(AOT),)
override MSBUILD_ARGS+=/p:RunAOTCompilation=true
endif
PROJECT_NAME=Wasm.Console.Bench.Sample.csproj
CONSOLE_DLL=Wasm.Console.Bench.Sample.dll
MAIN_JS=test-main.js
run: run-console
|
TOP=../../../../../..
include ../../wasm.mk
ifneq ($(AOT),)
override MSBUILD_ARGS+=/p:RunAOTCompilation=true
endif
PROJECT_NAME=Wasm.Console.Bench.Sample.csproj
CONSOLE_DLL=Wasm.Console.Bench.Sample.dll
MAIN_JS=test-main.js
ARGS=--run $(CONSOLE_DLL)
run: run-console
| 1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/mono/wasm/wasm.proj
|
<Project Sdk="Microsoft.Build.NoTargets">
<UsingTask TaskName="Microsoft.WebAssembly.Build.Tasks.RunWithEmSdkEnv" AssemblyFile="$(WasmAppBuilderTasksAssemblyPath)" />
<PropertyGroup>
<!-- FIXME: clean up the duplication with libraries Directory.Build.props -->
<PackageRID>browser-wasm</PackageRID>
<NativeBinDir>$([MSBuild]::NormalizeDirectory('$(ArtifactsBinDir)', 'native', '$(NetCoreAppCurrent)-$(TargetOS)-$(Configuration)-$(TargetArchitecture)'))</NativeBinDir>
<ICULibDir>$([MSBuild]::NormalizeDirectory('$(PkgMicrosoft_NETCore_Runtime_ICU_Transport)', 'runtimes', 'browser-wasm', 'native', 'lib'))</ICULibDir>
<WasmEnableES6 Condition="'$(WasmEnableES6)' == ''">false</WasmEnableES6>
<FilterSystemTimeZones Condition="'$(FilterSystemTimeZones)' == ''">false</FilterSystemTimeZones>
<EmccCmd>emcc</EmccCmd>
<WasmObjDir>$(ArtifactsObjDir)wasm</WasmObjDir>
<_EmccDefaultsRspPath>$(NativeBinDir)src\emcc-default.rsp</_EmccDefaultsRspPath>
<_EmccCompileRspPath>$(NativeBinDir)src\emcc-compile.rsp</_EmccCompileRspPath>
<_EmccLinkRspPath>$(NativeBinDir)src\emcc-link.rsp</_EmccLinkRspPath>
<WasmNativeStrip Condition="'$(ContinuousIntegrationBuild)' == 'true'">false</WasmNativeStrip>
</PropertyGroup>
<Target Name="CheckEnv">
<Error Condition="'$(TargetArchitecture)' != 'wasm'" Text="Expected TargetArchitecture==wasm, got '$(TargetArchitecture)'"/>
<Error Condition="'$(TargetOS)' != 'Browser'" Text="Expected TargetOS==Browser, got '$(TargetOS)'"/>
<Error Condition="'$(EMSDK_PATH)' == ''" Text="The EMSDK_PATH environment variable should be set pointing to the emscripten SDK root dir."/>
</Target>
<ItemGroup>
<PackageReference Include="Microsoft.NETCore.Runtime.ICU.Transport" PrivateAssets="all" Version="$(MicrosoftNETCoreRuntimeICUTransportVersion)" GeneratePathProperty="true" />
<PackageReference Include="System.Runtime.TimeZoneData" PrivateAssets="all" Version="$(SystemRuntimeTimeZoneDataVersion)" GeneratePathProperty="true" />
</ItemGroup>
<UsingTask TaskName="PInvokeTableGenerator" AssemblyFile="$(WasmAppBuilderTasksAssemblyPath)"/>
<Target Name="BuildPInvokeTable" DependsOnTargets="CheckEnv;ResolveLibrariesFromLocalBuild">
<PropertyGroup>
<WasmPInvokeTablePath>$(ArtifactsObjDir)wasm\pinvoke-table.h</WasmPInvokeTablePath>
</PropertyGroup>
<ItemGroup>
<WasmPInvokeModule Include="libSystem.Native" />
<WasmPInvokeModule Include="libSystem.IO.Compression.Native" />
<WasmPInvokeModule Include="libSystem.Globalization.Native" />
<WasmPInvokeAssembly Include="@(LibrariesRuntimeFiles)" Condition="'%(Extension)' == '.dll' and '%(IsNative)' != 'true'" />
</ItemGroup>
<!-- Retrieve CoreLib's targetpath via GetTargetPath as it isn't binplaced yet. -->
<MSBuild Projects="$(CoreLibProject)"
Targets="GetTargetPath">
<Output TaskParameter="TargetOutputs" ItemName="WasmPInvokeAssembly" />
</MSBuild>
<MakeDir Directories="$(ArtifactsObjDir)wasm" />
<PInvokeTableGenerator Modules="@(WasmPInvokeModule)"
Assemblies="@(WasmPInvokeAssembly)"
OutputPath="$(WasmPInvokeTablePath)" />
</Target>
<UsingTask TaskName="GenerateWasmBundle"
AssemblyFile="$(WasmBuildTasksAssemblyPath)"/>
<Target Name="BundleTimeZones">
<PropertyGroup>
<TimeZonesDataPath>$(NativeBinDir)dotnet.timezones.blat</TimeZonesDataPath>
</PropertyGroup>
<GenerateWasmBundle
InputDirectory="$([MSBuild]::NormalizePath('$(PkgSystem_Runtime_TimeZoneData)', 'contentFiles', 'any', 'any', 'data'))"
OutputFileName="$(TimeZonesDataPath)" />
</Target>
<Target Name="GenerateEmccPropsAndRspFiles">
<ItemGroup>
<_EmccLinkFlags Include="-s EXPORT_ES6=1" Condition="'$(WasmEnableES6)' == 'true'" />
<_EmccLinkFlags Include="-s ALLOW_MEMORY_GROWTH=1" />
<_EmccLinkFlags Include="-s NO_EXIT_RUNTIME=1" />
<_EmccLinkFlags Include="-s FORCE_FILESYSTEM=1" />
<_EmccLinkFlags Include="-s EXPORTED_RUNTIME_METHODS="['FS','print','ccall','cwrap','setValue','getValue','UTF8ToString','UTF8ArrayToString','FS_createPath','FS_createDataFile','removeRunDependency','addRunDependency', 'FS_readFile']"" />
<!-- _htons,_ntohs,__get_daylight,__get_timezone,__get_tzname are exported temporarily, until the issue is fixed in emscripten, https://github.com/dotnet/runtime/issues/64724 -->
<_EmccLinkFlags Include="-s EXPORTED_FUNCTIONS=_free,_malloc,_htons,_ntohs,__get_daylight,__get_timezone,__get_tzname,_memalign" />
<_EmccLinkFlags Include="--source-map-base http://example.com" />
<_EmccLinkFlags Include="-s STRICT_JS=1" />
<_EmccLinkFlags Include="-s EXPORT_NAME="'createDotnetRuntime'"" />
<_EmccLinkFlags Include="-s MODULARIZE=1"/>
<_EmccLinkFlags Include="-Wl,--allow-undefined"/>
<_EmccLinkFlags Include="-s ENVIRONMENT="web,webview,worker,node,shell"" />
</ItemGroup>
<ItemGroup Condition="'$(OS)' != 'Windows_NT'">
<_EmccLinkFlags Include="--profiling-funcs" />
<_EmccFlags Include="@(_EmccCommonFlags)" />
</ItemGroup>
<ItemGroup Condition="'$(OS)' == 'Windows_NT'">
<_EmccFlags Include="@(_EmccCommonFlags)" />
</ItemGroup>
<WriteLinesToFile File="$(_EmccDefaultsRspPath)"
Lines="@(_EmccFlags)"
WriteOnlyWhenDifferent="true"
Overwrite="true" />
<WriteLinesToFile File="$(_EmccCompileRspPath)"
Lines="@(_EmccCompileFlags)"
WriteOnlyWhenDifferent="true"
Overwrite="true" />
<WriteLinesToFile File="$(_EmccLinkRspPath)"
Lines="@(_EmccLinkFlags)"
WriteOnlyWhenDifferent="true"
Overwrite="true" />
<!-- Generate emcc-props.json -->
<RunWithEmSdkEnv Command="$(EmccCmd) --version"
ConsoleToMsBuild="true"
EmSdkPath="$(EMSDK_PATH)"
IgnoreStandardErrorWarningFormat="true">
<Output TaskParameter="ConsoleOutput" ItemName="_VersionLines" />
</RunWithEmSdkEnv>
<!-- we want to get the first line from the output, which has the version.
Rest of the lines are the license -->
<ItemGroup>
<_ReversedVersionLines Include="@(_VersionLines->Reverse())" />
</ItemGroup>
<PropertyGroup>
<_EmccVersionRaw>%(_ReversedVersionLines.Identity)</_EmccVersionRaw>
<_EmccVersionRegexPattern>^ *emcc \([^\)]+\) *([0-9\.]+).*\(([^\)]+)\)$</_EmccVersionRegexPattern>
<_EmccVersion>$([System.Text.RegularExpressions.Regex]::Match($(_EmccVersionRaw), $(_EmccVersionRegexPattern)).Groups[1].Value)</_EmccVersion>
<_EmccVersionHash>$([System.Text.RegularExpressions.Regex]::Match($(_EmccVersionRaw), $(_EmccVersionRegexPattern)).Groups[2].Value)</_EmccVersionHash>
<_EmccPropsJson>
<![CDATA[
{
"items": {
"EmccProperties": [
{ "identity": "RuntimeEmccVersion", "value": "$(_EmccVersion)" },
{ "identity": "RuntimeEmccVersionRaw", "value": "$(_EmccVersionRaw)" },
{ "identity": "RuntimeEmccVersionHash", "value": "$(_EmccVersionHash)" }
]
}
}
]]>
</_EmccPropsJson>
</PropertyGroup>
<Error Text="Failed to parse emcc version, and hash from the full version string: '$(_EmccVersionRaw)'"
Condition="'$(_EmccVersion)' == '' or '$(_EmccVersionHash)' == ''" />
<WriteLinesToFile File="$(NativeBinDir)src\emcc-props.json"
Lines="$(_EmccPropsJson)"
Overwrite="true"
WriteOnlyWhenDifferent="true" />
</Target>
<!-- This is a documented target that is invoked by developers in their innerloop work. -->
<Target Name="BuildWasmRuntimes"
AfterTargets="Build"
DependsOnTargets="GenerateEmccPropsAndRspFiles;BuildPInvokeTable;BundleTimeZones;InstallNpmPackages;BuildWithRollup">
<ItemGroup>
<ICULibNativeFiles Include="$(ICULibDir)/libicuuc.a;
$(ICULibDir)/libicui18n.a" />
<ICULibFiles Include="$(ICULibDir)/*.dat" />
</ItemGroup>
<PropertyGroup>
<PInvokeTableFile>$(ArtifactsObjDir)wasm/pinvoke-table.h</PInvokeTableFile>
<CMakeConfigurationEmccFlags Condition="'$(Configuration)' == 'Debug'">-g -Os -s -DDEBUG=1 -DENABLE_AOT_PROFILER=1</CMakeConfigurationEmccFlags>
<CMakeConfigurationEmccFlags Condition="'$(Configuration)' == 'Release'">-Oz</CMakeConfigurationEmccFlags>
<CMakeConfigurationLinkFlags Condition="'$(Configuration)' == 'Debug'" >$(CMakeConfigurationEmccFlags)</CMakeConfigurationLinkFlags>
<CMakeConfigurationLinkFlags Condition="'$(Configuration)' == 'Release'">-O2</CMakeConfigurationLinkFlags>
<CMakeConfigurationLinkFlags >$(CMakeConfigurationLinkFlags) --emit-symbol-map</CMakeConfigurationLinkFlags>
<CMakeConfigurationEmsdkPath Condition="'$(Configuration)' == 'Release'"> -DEMSDK_PATH="$(EMSDK_PATH.TrimEnd('\/'))"</CMakeConfigurationEmsdkPath>
<CMakeBuildRuntimeConfigureCmd>emcmake cmake $(MSBuildThisFileDirectory)runtime</CMakeBuildRuntimeConfigureCmd>
<CMakeBuildRuntimeConfigureCmd>$(CMakeBuildRuntimeConfigureCmd) -DCMAKE_BUILD_TYPE=$(Configuration)</CMakeBuildRuntimeConfigureCmd>
<CMakeBuildRuntimeConfigureCmd>$(CMakeBuildRuntimeConfigureCmd) -DCONFIGURATION_EMCC_FLAGS="$(CMakeConfigurationEmccFlags)"</CMakeBuildRuntimeConfigureCmd>
<CMakeBuildRuntimeConfigureCmd>$(CMakeBuildRuntimeConfigureCmd) -DCONFIGURATION_LINK_FLAGS="$(CMakeConfigurationLinkFlags)"</CMakeBuildRuntimeConfigureCmd>
<CMakeBuildRuntimeConfigureCmd>$(CMakeBuildRuntimeConfigureCmd) -DMONO_INCLUDES="$(MonoArtifactsPath)include/mono-2.0"</CMakeBuildRuntimeConfigureCmd>
<CMakeBuildRuntimeConfigureCmd>$(CMakeBuildRuntimeConfigureCmd) -DMONO_OBJ_INCLUDES="$(MonoObjDir.TrimEnd('\/'))"</CMakeBuildRuntimeConfigureCmd>
<CMakeBuildRuntimeConfigureCmd>$(CMakeBuildRuntimeConfigureCmd) -DICU_LIB_DIR="$(ICULibDir.TrimEnd('\/'))"</CMakeBuildRuntimeConfigureCmd>
<CMakeBuildRuntimeConfigureCmd>$(CMakeBuildRuntimeConfigureCmd) -DMONO_ARTIFACTS_DIR="$(MonoArtifactsPath.TrimEnd('\/'))"</CMakeBuildRuntimeConfigureCmd>
<CMakeBuildRuntimeConfigureCmd>$(CMakeBuildRuntimeConfigureCmd) -DNATIVE_BIN_DIR="$(NativeBinDir.TrimEnd('\/'))"</CMakeBuildRuntimeConfigureCmd>
<CMakeBuildRuntimeConfigureCmd>$(CMakeBuildRuntimeConfigureCmd) $(CMakeConfigurationEmsdkPath)</CMakeBuildRuntimeConfigureCmd>
<CMakeBuildRuntimeConfigureCmd Condition="'$(OS)' == 'Windows_NT'">call "$(RepositoryEngineeringDir)native\init-vs-env.cmd" && call "$([MSBuild]::NormalizePath('$(EMSDK_PATH)', 'emsdk_env.bat'))" && $(CMakeBuildRuntimeConfigureCmd)</CMakeBuildRuntimeConfigureCmd>
<CMakeBuildRuntimeConfigureCmd Condition="'$(OS)' != 'Windows_NT'">bash -c 'source $(EMSDK_PATH)/emsdk_env.sh 2>&1 && $(CMakeBuildRuntimeConfigureCmd)'</CMakeBuildRuntimeConfigureCmd>
<CMakeOptions Condition="'$(MonoVerboseBuild)' != ''">-v</CMakeOptions>
<CMakeBuildRuntimeCmd>cmake --build . --config $(Configuration) $(CmakeOptions)</CMakeBuildRuntimeCmd>
<CMakeBuildRuntimeCmd Condition="'$(OS)' == 'Windows_NT'">call "$(RepositoryEngineeringDir)native\init-vs-env.cmd" && call "$([MSBuild]::NormalizePath('$(EMSDK_PATH)', 'emsdk_env.bat'))" && $(CMakeBuildRuntimeCmd)</CMakeBuildRuntimeCmd>
<CMakeBuildRuntimeCmd Condition="'$(OS)' != 'Windows_NT'">bash -c 'source $(EMSDK_PATH)/emsdk_env.sh 2>&1 && $(CMakeBuildRuntimeCmd)'</CMakeBuildRuntimeCmd>
</PropertyGroup>
<Copy SourceFiles="$(PInvokeTableFile)"
DestinationFolder="$(MonoObjDir)"
SkipUnchangedFiles="true" />
<Copy SourceFiles="runtime/driver.c;
runtime/pinvoke.c;
runtime/corebindings.c;
$(SharedNativeRoot)libs\System.Native\pal_random.lib.js;"
DestinationFolder="$(NativeBinDir)src"
SkipUnchangedFiles="true" />
<Copy SourceFiles="runtime/cjs/dotnet.cjs.pre.js;
runtime/cjs/dotnet.cjs.lib.js;
runtime/cjs/dotnet.cjs.post.js;
runtime/cjs/dotnet.cjs.extpost.js;"
DestinationFolder="$(NativeBinDir)src/cjs"
SkipUnchangedFiles="true" />
<Copy SourceFiles="runtime/es6/dotnet.es6.pre.js;
runtime/es6/dotnet.es6.lib.js;
runtime/es6/dotnet.es6.post.js;"
DestinationFolder="$(NativeBinDir)src/es6"
SkipUnchangedFiles="true" />
<Copy SourceFiles="runtime\pinvoke.h"
DestinationFolder="$(NativeBinDir)include\wasm"
SkipUnchangedFiles="true" />
<Copy SourceFiles="@(ICULibFiles);
@(ICULibNativeFiles);"
DestinationFolder="$(NativeBinDir)"
SkipUnchangedFiles="true" />
<Exec Command="$(CMakeBuildRuntimeConfigureCmd)" WorkingDirectory="$(NativeBinDir)" />
<Exec Command="$(CMakeBuildRuntimeCmd)" WorkingDirectory="$(NativeBinDir)" />
<ItemGroup>
<IcuDataFiles Include="$(NativeBinDir)*.dat" />
<WasmSrcFiles Include="$(NativeBinDir)src\*.c;
$(NativeBinDir)src\*.js;
$(_EmccDefaultsRspPath);
$(_EmccCompileRspPath);
$(_EmccLinkRspPath);
$(NativeBinDir)src\emcc-props.json" />
<WasmSrcFilesCjs Include="$(NativeBinDir)src\cjs\*.js;" />
<WasmSrcFilesEs6 Include="$(NativeBinDir)src\es6\*.js;" />
<WasmHeaderFiles Include="$(NativeBinDir)include\wasm\*.h" />
</ItemGroup>
<Copy SourceFiles="$(NativeBinDir)dotnet.js;
$(NativeBinDir)dotnet.d.ts;
$(NativeBinDir)package.json;
$(NativeBinDir)dotnet.wasm;
$(NativeBinDir)dotnet.timezones.blat"
DestinationFolder="$(MicrosoftNetCoreAppRuntimePackNativeDir)"
SkipUnchangedFiles="true" />
<Copy SourceFiles="$(NativeBinDir)dotnet.js.symbols"
DestinationFolder="$(MicrosoftNetCoreAppRuntimePackNativeDir)"
SkipUnchangedFiles="true" />
<Copy SourceFiles="@(IcuDataFiles);@(ICULibNativeFiles)"
DestinationFolder="$(MicrosoftNetCoreAppRuntimePackNativeDir)"
SkipUnchangedFiles="true" />
<Copy SourceFiles="@(WasmSrcFiles)"
DestinationFolder="$(MicrosoftNetCoreAppRuntimePackNativeDir)src"
SkipUnchangedFiles="true" />
<Copy SourceFiles="@(WasmSrcFilesCjs)"
DestinationFolder="$(MicrosoftNetCoreAppRuntimePackNativeDir)src\cjs"
SkipUnchangedFiles="true" />
<Copy SourceFiles="@(WasmSrcFilesEs6)"
DestinationFolder="$(MicrosoftNetCoreAppRuntimePackNativeDir)src\es6"
SkipUnchangedFiles="true" />
<Copy SourceFiles="@(WasmHeaderFiles)"
DestinationFolder="$(MicrosoftNetCoreAppRuntimePackNativeDir)include\wasm"
SkipUnchangedFiles="true" />
</Target>
<Target Name="InstallNpmPackages"
Inputs="$(MonoProjectRoot)wasm/runtime/package.json"
Outputs="$(MonoProjectRoot)wasm/runtime/node_modules/.npm-stamp"
>
<!-- install typescript and rollup -->
<RunWithEmSdkEnv Condition="'$(ContinuousIntegrationBuild)' == 'true'" Command="npm ci" EmSdkPath="$(EMSDK_PATH)" IgnoreStandardErrorWarningFormat="true" WorkingDirectory="$(MonoProjectRoot)wasm/runtime/"/>
<RunWithEmSdkEnv Condition="'$(ContinuousIntegrationBuild)' == 'true'" Command="npm audit" EmSdkPath="$(EMSDK_PATH)" IgnoreStandardErrorWarningFormat="true" WorkingDirectory="$(MonoProjectRoot)wasm/runtime/"/>
<!-- npm install is faster on dev machine as it doesn't wipe node_modules folder -->
<RunWithEmSdkEnv Condition="'$(ContinuousIntegrationBuild)' != 'true'" Command="npm install" EmSdkPath="$(EMSDK_PATH)" IgnoreStandardErrorWarningFormat="true" WorkingDirectory="$(MonoProjectRoot)wasm/runtime/"/>
<Touch Files="$(MonoProjectRoot)wasm/runtime/node_modules/.npm-stamp" AlwaysCreate="true" />
</Target>
<ItemGroup>
<_RollupInputs Include="$(MonoProjectRoot)wasm/runtime/*.ts"/>
<_RollupInputs Include="$(MonoProjectRoot)wasm/runtime/types/*.ts"/>
<_RollupInputs Include="$(MonoProjectRoot)wasm/runtimetypes/*.d.ts"/>
<_RollupInputs Include="$(MonoProjectRoot)wasm/runtime/*.json"/>
<_RollupInputs Include="$(MonoProjectRoot)wasm/runtime/*.js"/>
</ItemGroup>
<Target Name="BuildWithRollup"
Inputs="@(_RollupInputs)"
Outputs="$(NativeBinDir).rollup-stamp"
>
<!-- code style check -->
<RunWithEmSdkEnv Command="npm run lint" StandardOutputImportance="High" EmSdkPath="$(EMSDK_PATH)" WorkingDirectory="$(MonoProjectRoot)wasm/runtime/"/>
<!-- compile typescript -->
<RunWithEmSdkEnv Command="npm run rollup -- --environment Configuration:$(Configuration),NativeBinDir:$(NativeBinDir),ProductVersion:$(ProductVersion)" EmSdkPath="$(EMSDK_PATH)" IgnoreStandardErrorWarningFormat="true" WorkingDirectory="$(MonoProjectRoot)wasm/runtime/"/>
<Copy SourceFiles="runtime/package.json;"
DestinationFolder="$(NativeBinDir)"
SkipUnchangedFiles="true" />
<!-- set version -->
<RunWithEmSdkEnv Command="npm version $(PackageVersion)" EmSdkPath="$(EMSDK_PATH)" WorkingDirectory="$(NativeBinDir)"/>
<Touch Files="$(NativeBinDir).rollup-stamp" AlwaysCreate="true" />
</Target>
</Project>
|
<Project Sdk="Microsoft.Build.NoTargets">
<UsingTask TaskName="Microsoft.WebAssembly.Build.Tasks.RunWithEmSdkEnv" AssemblyFile="$(WasmAppBuilderTasksAssemblyPath)" />
<PropertyGroup>
<!-- FIXME: clean up the duplication with libraries Directory.Build.props -->
<PackageRID>browser-wasm</PackageRID>
<NativeBinDir>$([MSBuild]::NormalizeDirectory('$(ArtifactsBinDir)', 'native', '$(NetCoreAppCurrent)-$(TargetOS)-$(Configuration)-$(TargetArchitecture)'))</NativeBinDir>
<ICULibDir>$([MSBuild]::NormalizeDirectory('$(PkgMicrosoft_NETCore_Runtime_ICU_Transport)', 'runtimes', 'browser-wasm', 'native', 'lib'))</ICULibDir>
<WasmEnableES6 Condition="'$(WasmEnableES6)' == ''">false</WasmEnableES6>
<FilterSystemTimeZones Condition="'$(FilterSystemTimeZones)' == ''">false</FilterSystemTimeZones>
<EmccCmd>emcc</EmccCmd>
<WasmObjDir>$(ArtifactsObjDir)wasm</WasmObjDir>
<_EmccDefaultsRspPath>$(NativeBinDir)src\emcc-default.rsp</_EmccDefaultsRspPath>
<_EmccCompileRspPath>$(NativeBinDir)src\emcc-compile.rsp</_EmccCompileRspPath>
<_EmccLinkRspPath>$(NativeBinDir)src\emcc-link.rsp</_EmccLinkRspPath>
<WasmNativeStrip Condition="'$(ContinuousIntegrationBuild)' == 'true'">false</WasmNativeStrip>
</PropertyGroup>
<Target Name="CheckEnv">
<Error Condition="'$(TargetArchitecture)' != 'wasm'" Text="Expected TargetArchitecture==wasm, got '$(TargetArchitecture)'"/>
<Error Condition="'$(TargetOS)' != 'Browser'" Text="Expected TargetOS==Browser, got '$(TargetOS)'"/>
<Error Condition="'$(EMSDK_PATH)' == ''" Text="The EMSDK_PATH environment variable should be set pointing to the emscripten SDK root dir."/>
</Target>
<ItemGroup>
<PackageReference Include="Microsoft.NETCore.Runtime.ICU.Transport" PrivateAssets="all" Version="$(MicrosoftNETCoreRuntimeICUTransportVersion)" GeneratePathProperty="true" />
<PackageReference Include="System.Runtime.TimeZoneData" PrivateAssets="all" Version="$(SystemRuntimeTimeZoneDataVersion)" GeneratePathProperty="true" />
</ItemGroup>
<UsingTask TaskName="PInvokeTableGenerator" AssemblyFile="$(WasmAppBuilderTasksAssemblyPath)"/>
<Target Name="BuildPInvokeTable" DependsOnTargets="CheckEnv;ResolveLibrariesFromLocalBuild">
<PropertyGroup>
<WasmPInvokeTablePath>$(ArtifactsObjDir)wasm\pinvoke-table.h</WasmPInvokeTablePath>
</PropertyGroup>
<ItemGroup>
<WasmPInvokeModule Include="libSystem.Native" />
<WasmPInvokeModule Include="libSystem.IO.Compression.Native" />
<WasmPInvokeModule Include="libSystem.Globalization.Native" />
<WasmPInvokeAssembly Include="@(LibrariesRuntimeFiles)" Condition="'%(Extension)' == '.dll' and '%(IsNative)' != 'true'" />
</ItemGroup>
<!-- Retrieve CoreLib's targetpath via GetTargetPath as it isn't binplaced yet. -->
<MSBuild Projects="$(CoreLibProject)"
Targets="GetTargetPath">
<Output TaskParameter="TargetOutputs" ItemName="WasmPInvokeAssembly" />
</MSBuild>
<MakeDir Directories="$(ArtifactsObjDir)wasm" />
<PInvokeTableGenerator Modules="@(WasmPInvokeModule)"
Assemblies="@(WasmPInvokeAssembly)"
OutputPath="$(WasmPInvokeTablePath)" />
</Target>
<UsingTask TaskName="GenerateWasmBundle"
AssemblyFile="$(WasmBuildTasksAssemblyPath)"/>
<Target Name="BundleTimeZones">
<PropertyGroup>
<TimeZonesDataPath>$(NativeBinDir)dotnet.timezones.blat</TimeZonesDataPath>
</PropertyGroup>
<GenerateWasmBundle
InputDirectory="$([MSBuild]::NormalizePath('$(PkgSystem_Runtime_TimeZoneData)', 'contentFiles', 'any', 'any', 'data'))"
OutputFileName="$(TimeZonesDataPath)" />
</Target>
<Target Name="GenerateEmccPropsAndRspFiles">
<!-- Generate emcc-props.json -->
<RunWithEmSdkEnv Command="$(EmccCmd) --version"
ConsoleToMsBuild="true"
EmSdkPath="$(EMSDK_PATH)"
IgnoreStandardErrorWarningFormat="true">
<Output TaskParameter="ConsoleOutput" ItemName="_VersionLines" />
</RunWithEmSdkEnv>
<!-- we want to get the first line from the output, which has the version.
Rest of the lines are the license -->
<ItemGroup>
<_ReversedVersionLines Include="@(_VersionLines->Reverse())" />
</ItemGroup>
<PropertyGroup>
<_EmccVersionRaw>%(_ReversedVersionLines.Identity)</_EmccVersionRaw>
<_EmccVersionRegexPattern>^ *emcc \([^\)]+\) *([0-9\.]+).*\(([^\)]+)\)$</_EmccVersionRegexPattern>
<_EmccVersion>$([System.Text.RegularExpressions.Regex]::Match($(_EmccVersionRaw), $(_EmccVersionRegexPattern)).Groups[1].Value)</_EmccVersion>
<_EmccVersionHash>$([System.Text.RegularExpressions.Regex]::Match($(_EmccVersionRaw), $(_EmccVersionRegexPattern)).Groups[2].Value)</_EmccVersionHash>
</PropertyGroup>
<Error Text="Failed to parse emcc version, and hash from the full version string: '$(_EmccVersionRaw)'"
Condition="'$(_EmccVersion)' == '' or '$(_EmccVersionHash)' == ''" />
<PropertyGroup>
<_EmccPropsJson>
<![CDATA[
{
"items": {
"EmccProperties": [
{ "identity": "RuntimeEmccVersion", "value": "$(_EmccVersion)" },
{ "identity": "RuntimeEmccVersionRaw", "value": "$(_EmccVersionRaw)" },
{ "identity": "RuntimeEmccVersionHash", "value": "$(_EmccVersionHash)" }
]
}
}
]]>
</_EmccPropsJson>
</PropertyGroup>
<WriteLinesToFile File="$(NativeBinDir)src\emcc-props.json"
Lines="$(_EmccPropsJson)"
Overwrite="true"
WriteOnlyWhenDifferent="true" />
<ItemGroup>
<_EmccLinkFlags Include="-s EXPORT_ES6=1" Condition="'$(WasmEnableES6)' == 'true'" />
<_EmccLinkFlags Include="-s ALLOW_MEMORY_GROWTH=1" />
<_EmccLinkFlags Include="-s NO_EXIT_RUNTIME=1" />
<_EmccLinkFlags Include="-s FORCE_FILESYSTEM=1" />
<_EmccLinkFlags Include="-s EXPORTED_RUNTIME_METHODS="['FS','print','ccall','cwrap','setValue','getValue','UTF8ToString','UTF8ArrayToString','FS_createPath','FS_createDataFile','removeRunDependency','addRunDependency', 'FS_readFile']"" />
<!-- _htons,_ntohs,__get_daylight,__get_timezone,__get_tzname are exported temporarily, until the issue is fixed in emscripten, https://github.com/dotnet/runtime/issues/64724 -->
<_EmccLinkFlags Include="-s EXPORTED_FUNCTIONS=_free,_malloc,_memalign,_memset" Condition="$([MSBuild]::VersionGreaterThan('$(_EmccVersion)', '3.0'))" />
<_EmccLinkFlags Include="-s EXPORTED_FUNCTIONS=_free,_malloc,_htons,_ntohs,__get_daylight,__get_timezone,__get_tzname,_memalign" Condition="$([MSBuild]::VersionLessThan('$(_EmccVersion)', '3.0'))" />
<_EmccLinkFlags Include="--source-map-base http://example.com" />
<_EmccLinkFlags Include="-s STRICT_JS=1" />
<_EmccLinkFlags Include="-s EXPORT_NAME="'createDotnetRuntime'"" />
<_EmccLinkFlags Include="-s MODULARIZE=1"/>
<_EmccLinkFlags Include="-Wl,--allow-undefined"/>
<_EmccLinkFlags Include="-s ENVIRONMENT="web,webview,worker,node,shell"" />
</ItemGroup>
<ItemGroup Condition="'$(OS)' != 'Windows_NT'">
<_EmccLinkFlags Include="--profiling-funcs" />
<_EmccFlags Include="@(_EmccCommonFlags)" />
</ItemGroup>
<ItemGroup Condition="'$(OS)' == 'Windows_NT'">
<_EmccFlags Include="@(_EmccCommonFlags)" />
</ItemGroup>
<WriteLinesToFile File="$(_EmccDefaultsRspPath)"
Lines="@(_EmccFlags)"
WriteOnlyWhenDifferent="true"
Overwrite="true" />
<WriteLinesToFile File="$(_EmccCompileRspPath)"
Lines="@(_EmccCompileFlags)"
WriteOnlyWhenDifferent="true"
Overwrite="true" />
<WriteLinesToFile File="$(_EmccLinkRspPath)"
Lines="@(_EmccLinkFlags)"
WriteOnlyWhenDifferent="true"
Overwrite="true" />
</Target>
<!-- This is a documented target that is invoked by developers in their innerloop work. -->
<Target Name="BuildWasmRuntimes"
AfterTargets="Build"
DependsOnTargets="GenerateEmccPropsAndRspFiles;BuildPInvokeTable;BundleTimeZones;InstallNpmPackages;BuildWithRollup">
<ItemGroup>
<ICULibNativeFiles Include="$(ICULibDir)/libicuuc.a;
$(ICULibDir)/libicui18n.a" />
<ICULibFiles Include="$(ICULibDir)/*.dat" />
</ItemGroup>
<PropertyGroup>
<PInvokeTableFile>$(ArtifactsObjDir)wasm/pinvoke-table.h</PInvokeTableFile>
<CMakeConfigurationEmccFlags Condition="'$(Configuration)' == 'Debug'">-g -Os -s -DDEBUG=1 -DENABLE_AOT_PROFILER=1</CMakeConfigurationEmccFlags>
<CMakeConfigurationEmccFlags Condition="'$(Configuration)' == 'Release'">-Oz</CMakeConfigurationEmccFlags>
<CMakeConfigurationLinkFlags Condition="'$(Configuration)' == 'Debug'" >$(CMakeConfigurationEmccFlags)</CMakeConfigurationLinkFlags>
<CMakeConfigurationLinkFlags Condition="'$(Configuration)' == 'Release'">-O2</CMakeConfigurationLinkFlags>
<CMakeConfigurationLinkFlags >$(CMakeConfigurationLinkFlags) --emit-symbol-map</CMakeConfigurationLinkFlags>
<CMakeConfigurationEmsdkPath Condition="'$(Configuration)' == 'Release'"> -DEMSDK_PATH="$(EMSDK_PATH.TrimEnd('\/'))"</CMakeConfigurationEmsdkPath>
<CMakeBuildRuntimeConfigureCmd>emcmake cmake $(MSBuildThisFileDirectory)runtime</CMakeBuildRuntimeConfigureCmd>
<CMakeBuildRuntimeConfigureCmd>$(CMakeBuildRuntimeConfigureCmd) -DCMAKE_BUILD_TYPE=$(Configuration)</CMakeBuildRuntimeConfigureCmd>
<CMakeBuildRuntimeConfigureCmd>$(CMakeBuildRuntimeConfigureCmd) -DCONFIGURATION_EMCC_FLAGS="$(CMakeConfigurationEmccFlags)"</CMakeBuildRuntimeConfigureCmd>
<CMakeBuildRuntimeConfigureCmd>$(CMakeBuildRuntimeConfigureCmd) -DCONFIGURATION_LINK_FLAGS="$(CMakeConfigurationLinkFlags)"</CMakeBuildRuntimeConfigureCmd>
<CMakeBuildRuntimeConfigureCmd>$(CMakeBuildRuntimeConfigureCmd) -DMONO_INCLUDES="$(MonoArtifactsPath)include/mono-2.0"</CMakeBuildRuntimeConfigureCmd>
<CMakeBuildRuntimeConfigureCmd>$(CMakeBuildRuntimeConfigureCmd) -DMONO_OBJ_INCLUDES="$(MonoObjDir.TrimEnd('\/'))"</CMakeBuildRuntimeConfigureCmd>
<CMakeBuildRuntimeConfigureCmd>$(CMakeBuildRuntimeConfigureCmd) -DICU_LIB_DIR="$(ICULibDir.TrimEnd('\/'))"</CMakeBuildRuntimeConfigureCmd>
<CMakeBuildRuntimeConfigureCmd>$(CMakeBuildRuntimeConfigureCmd) -DMONO_ARTIFACTS_DIR="$(MonoArtifactsPath.TrimEnd('\/'))"</CMakeBuildRuntimeConfigureCmd>
<CMakeBuildRuntimeConfigureCmd>$(CMakeBuildRuntimeConfigureCmd) -DNATIVE_BIN_DIR="$(NativeBinDir.TrimEnd('\/'))"</CMakeBuildRuntimeConfigureCmd>
<CMakeBuildRuntimeConfigureCmd>$(CMakeBuildRuntimeConfigureCmd) $(CMakeConfigurationEmsdkPath)</CMakeBuildRuntimeConfigureCmd>
<CMakeBuildRuntimeConfigureCmd Condition="'$(OS)' == 'Windows_NT'">call "$(RepositoryEngineeringDir)native\init-vs-env.cmd" && call "$([MSBuild]::NormalizePath('$(EMSDK_PATH)', 'emsdk_env.bat'))" && $(CMakeBuildRuntimeConfigureCmd)</CMakeBuildRuntimeConfigureCmd>
<CMakeBuildRuntimeConfigureCmd Condition="'$(OS)' != 'Windows_NT'">bash -c 'source $(EMSDK_PATH)/emsdk_env.sh 2>&1 && $(CMakeBuildRuntimeConfigureCmd)'</CMakeBuildRuntimeConfigureCmd>
<CMakeOptions Condition="'$(MonoVerboseBuild)' != ''">-v</CMakeOptions>
<CMakeBuildRuntimeCmd>cmake --build . --config $(Configuration) $(CmakeOptions)</CMakeBuildRuntimeCmd>
<CMakeBuildRuntimeCmd Condition="'$(OS)' == 'Windows_NT'">call "$(RepositoryEngineeringDir)native\init-vs-env.cmd" && call "$([MSBuild]::NormalizePath('$(EMSDK_PATH)', 'emsdk_env.bat'))" && $(CMakeBuildRuntimeCmd)</CMakeBuildRuntimeCmd>
<CMakeBuildRuntimeCmd Condition="'$(OS)' != 'Windows_NT'">bash -c 'source $(EMSDK_PATH)/emsdk_env.sh 2>&1 && $(CMakeBuildRuntimeCmd)'</CMakeBuildRuntimeCmd>
</PropertyGroup>
<Copy SourceFiles="$(PInvokeTableFile)"
DestinationFolder="$(MonoObjDir)"
SkipUnchangedFiles="true" />
<Copy SourceFiles="runtime/driver.c;
runtime/pinvoke.c;
runtime/corebindings.c;
$(SharedNativeRoot)libs\System.Native\pal_random.lib.js;"
DestinationFolder="$(NativeBinDir)src"
SkipUnchangedFiles="true" />
<Copy SourceFiles="runtime/cjs/dotnet.cjs.pre.js;
runtime/cjs/dotnet.cjs.lib.js;
runtime/cjs/dotnet.cjs.post.js;
runtime/cjs/dotnet.cjs.extpost.js;"
DestinationFolder="$(NativeBinDir)src/cjs"
SkipUnchangedFiles="true" />
<Copy SourceFiles="runtime/es6/dotnet.es6.pre.js;
runtime/es6/dotnet.es6.lib.js;
runtime/es6/dotnet.es6.post.js;"
DestinationFolder="$(NativeBinDir)src/es6"
SkipUnchangedFiles="true" />
<Copy SourceFiles="runtime\pinvoke.h"
DestinationFolder="$(NativeBinDir)include\wasm"
SkipUnchangedFiles="true" />
<Copy SourceFiles="@(ICULibFiles);
@(ICULibNativeFiles);"
DestinationFolder="$(NativeBinDir)"
SkipUnchangedFiles="true" />
<Exec Command="$(CMakeBuildRuntimeConfigureCmd)" WorkingDirectory="$(NativeBinDir)" />
<Exec Command="$(CMakeBuildRuntimeCmd)" WorkingDirectory="$(NativeBinDir)" />
<ItemGroup>
<IcuDataFiles Include="$(NativeBinDir)*.dat" />
<WasmSrcFiles Include="$(NativeBinDir)src\*.c;
$(NativeBinDir)src\*.js;
$(_EmccDefaultsRspPath);
$(_EmccCompileRspPath);
$(_EmccLinkRspPath);
$(NativeBinDir)src\emcc-props.json" />
<WasmSrcFilesCjs Include="$(NativeBinDir)src\cjs\*.js;" />
<WasmSrcFilesEs6 Include="$(NativeBinDir)src\es6\*.js;" />
<WasmHeaderFiles Include="$(NativeBinDir)include\wasm\*.h" />
</ItemGroup>
<Copy SourceFiles="$(NativeBinDir)dotnet.js;
$(NativeBinDir)dotnet.d.ts;
$(NativeBinDir)package.json;
$(NativeBinDir)dotnet.wasm;
$(NativeBinDir)dotnet.timezones.blat"
DestinationFolder="$(MicrosoftNetCoreAppRuntimePackNativeDir)"
SkipUnchangedFiles="true" />
<Copy SourceFiles="$(NativeBinDir)dotnet.js.symbols"
DestinationFolder="$(MicrosoftNetCoreAppRuntimePackNativeDir)"
SkipUnchangedFiles="true" />
<Copy SourceFiles="@(IcuDataFiles);@(ICULibNativeFiles)"
DestinationFolder="$(MicrosoftNetCoreAppRuntimePackNativeDir)"
SkipUnchangedFiles="true" />
<Copy SourceFiles="@(WasmSrcFiles)"
DestinationFolder="$(MicrosoftNetCoreAppRuntimePackNativeDir)src"
SkipUnchangedFiles="true" />
<Copy SourceFiles="@(WasmSrcFilesCjs)"
DestinationFolder="$(MicrosoftNetCoreAppRuntimePackNativeDir)src\cjs"
SkipUnchangedFiles="true" />
<Copy SourceFiles="@(WasmSrcFilesEs6)"
DestinationFolder="$(MicrosoftNetCoreAppRuntimePackNativeDir)src\es6"
SkipUnchangedFiles="true" />
<Copy SourceFiles="@(WasmHeaderFiles)"
DestinationFolder="$(MicrosoftNetCoreAppRuntimePackNativeDir)include\wasm"
SkipUnchangedFiles="true" />
</Target>
<Target Name="InstallNpmPackages"
Inputs="$(MonoProjectRoot)wasm/runtime/package.json"
Outputs="$(MonoProjectRoot)wasm/runtime/node_modules/.npm-stamp"
>
<!-- install typescript and rollup -->
<RunWithEmSdkEnv Condition="'$(ContinuousIntegrationBuild)' == 'true'" Command="npm ci" EmSdkPath="$(EMSDK_PATH)" IgnoreStandardErrorWarningFormat="true" WorkingDirectory="$(MonoProjectRoot)wasm/runtime/"/>
<RunWithEmSdkEnv Condition="'$(ContinuousIntegrationBuild)' == 'true'" Command="npm audit" EmSdkPath="$(EMSDK_PATH)" IgnoreStandardErrorWarningFormat="true" WorkingDirectory="$(MonoProjectRoot)wasm/runtime/"/>
<!-- npm install is faster on dev machine as it doesn't wipe node_modules folder -->
<RunWithEmSdkEnv Condition="'$(ContinuousIntegrationBuild)' != 'true'" Command="npm install" EmSdkPath="$(EMSDK_PATH)" IgnoreStandardErrorWarningFormat="true" WorkingDirectory="$(MonoProjectRoot)wasm/runtime/"/>
<Touch Files="$(MonoProjectRoot)wasm/runtime/node_modules/.npm-stamp" AlwaysCreate="true" />
</Target>
<ItemGroup>
<_RollupInputs Include="$(MonoProjectRoot)wasm/runtime/*.ts"/>
<_RollupInputs Include="$(MonoProjectRoot)wasm/runtime/types/*.ts"/>
<_RollupInputs Include="$(MonoProjectRoot)wasm/runtimetypes/*.d.ts"/>
<_RollupInputs Include="$(MonoProjectRoot)wasm/runtime/*.json"/>
<_RollupInputs Include="$(MonoProjectRoot)wasm/runtime/*.js"/>
</ItemGroup>
<Target Name="BuildWithRollup"
Inputs="@(_RollupInputs)"
Outputs="$(NativeBinDir).rollup-stamp"
>
<!-- code style check -->
<RunWithEmSdkEnv Command="npm run lint" StandardOutputImportance="High" EmSdkPath="$(EMSDK_PATH)" WorkingDirectory="$(MonoProjectRoot)wasm/runtime/"/>
<!-- compile typescript -->
<RunWithEmSdkEnv Command="npm run rollup -- --environment Configuration:$(Configuration),NativeBinDir:$(NativeBinDir),ProductVersion:$(ProductVersion)" EmSdkPath="$(EMSDK_PATH)" IgnoreStandardErrorWarningFormat="true" WorkingDirectory="$(MonoProjectRoot)wasm/runtime/"/>
<Copy SourceFiles="runtime/package.json;"
DestinationFolder="$(NativeBinDir)"
SkipUnchangedFiles="true" />
<!-- set version -->
<RunWithEmSdkEnv Command="npm version $(PackageVersion)" EmSdkPath="$(EMSDK_PATH)" WorkingDirectory="$(NativeBinDir)"/>
<Touch Files="$(NativeBinDir).rollup-stamp" AlwaysCreate="true" />
</Target>
</Project>
| 1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/native/libs/CMakeLists.txt
|
cmake_minimum_required(VERSION 3.6.2)
include(CheckCCompilerFlag)
if (CLR_CMAKE_TARGET_IOS OR CLR_CMAKE_TARGET_TVOS)
# CMake 3.14.5 contains bug fixes for iOS
cmake_minimum_required(VERSION 3.14.5)
elseif (CLR_CMAKE_TARGET_MACCATALYST)
# CMake 3.18.1 properly generates MacCatalyst C compiler
cmake_minimum_required(VERSION 3.18.1)
endif ()
if (WIN32)
cmake_policy(SET CMP0091 NEW)
else ()
cmake_policy(SET CMP0042 NEW)
endif ()
project(CoreFX C)
include(../../../eng/native/configurepaths.cmake)
include(${CLR_ENG_NATIVE_DIR}/configurecompiler.cmake)
include_directories(${CLR_SRC_NATIVE_DIR})
set(CMAKE_INCLUDE_CURRENT_DIR ON)
if (STATIC_LIBS_ONLY)
# Suppress exporting of the PAL APIs
add_definitions(-DPALEXPORT=EXTERN_C)
set(GEN_SHARED_LIB 0)
set(STATIC_LIB_DESTINATION lib)
else ()
set(GEN_SHARED_LIB 1)
set(STATIC_LIB_DESTINATION .)
endif ()
if (CLR_CMAKE_TARGET_UNIX OR CLR_CMAKE_TARGET_BROWSER)
set(CMAKE_MACOSX_RPATH ON)
if (CLR_CMAKE_TARGET_MACCATALYST OR CLR_CMAKE_TARGET_IOS OR CLR_CMAKE_TARGET_TVOS)
set(CMAKE_BUILD_WITH_INSTALL_NAME_DIR ON)
set(CMAKE_INSTALL_NAME_DIR "@rpath")
endif ()
set(CMAKE_INSTALL_PREFIX $ENV{__CMakeBinDir})
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=gnu99")
add_compile_options(-I${CMAKE_CURRENT_SOURCE_DIR}/Common)
add_compile_options(-I${CMAKE_CURRENT_BINARY_DIR}/Common)
if (CLR_CMAKE_TARGET_BROWSER)
set(GEN_SHARED_LIB 0)
set(STATIC_LIB_DESTINATION .)
endif ()
if (CLR_CMAKE_TARGET_TVOS)
# with -fembed-bitcode passing -headerpad_max_install_names is not allowed so remove it from the CMake flags
string(REPLACE "-Wl,-headerpad_max_install_names" "" CMAKE_C_LINK_FLAGS ${CMAKE_C_LINK_FLAGS})
string(REPLACE "-Wl,-headerpad_max_install_names" "" CMAKE_SHARED_LIBRARY_CREATE_C_FLAGS ${CMAKE_SHARED_LIBRARY_CREATE_C_FLAGS})
add_compile_options(-fembed-bitcode)
add_link_options(-fembed-bitcode)
endif ()
if (CLR_CMAKE_TARGET_ANDROID)
if (CROSS_ROOTFS)
include_directories(SYSTEM "${CROSS_ROOTFS}/usr/include")
endif ()
endif ()
string(TOUPPER ${CMAKE_BUILD_TYPE} UPPERCASE_CMAKE_BUILD_TYPE)
if (UPPERCASE_CMAKE_BUILD_TYPE STREQUAL DEBUG OR UPPERCASE_CMAKE_BUILD_TYPE STREQUAL CHECKED)
if (UPPERCASE_CMAKE_BUILD_TYPE STREQUAL DEBUG)
add_compile_options(-O0)
elseif (UPPERCASE_CMAKE_BUILD_TYPE STREQUAL CHECKED)
add_compile_options(-O2)
endif ()
add_definitions(-DDEBUG)
# obtain settings from running coreclr\enablesanitizers.sh
string(FIND "$ENV{DEBUG_SANITIZERS}" "asan" __ASAN_POS)
string(FIND "$ENV{DEBUG_SANITIZERS}" "ubsan" __UBSAN_POS)
if (${__ASAN_POS} GREATER -1 OR ${__UBSAN_POS} GREATER -1)
set(CLR_SANITIZE_LINK_FLAGS "${CLR_SANITIZE_LINK_FLAGS} -fsanitize=")
if (${__ASAN_POS} GREATER -1)
set(CLR_SANITIZE_LINK_FLAGS "${CLR_SANITIZE_LINK_FLAGS}address,")
message("Address Sanitizer (asan) enabled")
endif ()
if (${__UBSAN_POS} GREATER -1)
set(CLR_SANITIZE_LINK_FLAGS "${CLR_SANITIZE_LINK_FLAGS}undefined")
message("Undefined Behavior Sanitizer (ubsan) enabled")
endif ()
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${CLR_SANITIZE_LINK_FLAGS}")
# -Wl and --gc-sections: drop unused sections\functions (similar to Windows /Gy function-level-linking)
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${CLR_SANITIZE_LINK_FLAGS} -Wl,--gc-sections")
endif ()
elseif (UPPERCASE_CMAKE_BUILD_TYPE STREQUAL RELEASE)
# Use O1 option when the clang version is smaller than 3.9
# Otherwise use O3 option in release build
if (CLR_CMAKE_TARGET_ARCH_ARMV7L AND DEFINED ENV{CROSSCOMPILE} AND CMAKE_C_COMPILER_VERSION VERSION_LESS 3.9)
add_compile_options (-O1)
else ()
add_compile_options (-O3)
endif ()
else ()
message(FATAL_ERROR "Unknown build type. Set CMAKE_BUILD_TYPE to DEBUG or RELEASE.")
endif ()
if (CLR_CMAKE_TARGET_OSX OR CLR_CMAKE_TARGET_MACCATALYST OR CLR_CMAKE_TARGET_IOS OR CLR_CMAKE_TARGET_TVOS)
add_definitions(-D__APPLE_USE_RFC_3542)
endif ()
if (CLR_CMAKE_TARGET_LINUX)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_GNU_SOURCE")
endif ()
else ()
set(CMAKE_SHARED_LIBRARY_PREFIX "")
# we only need to build System.Globalization.Native when building static libs.
if (STATIC_LIBS_ONLY)
add_subdirectory(System.Globalization.Native)
endif ()
endif ()
add_subdirectory(System.IO.Compression.Native)
if (CLR_CMAKE_TARGET_UNIX OR CLR_CMAKE_TARGET_BROWSER)
include(configure.cmake)
if (NOT CLR_CMAKE_TARGET_BROWSER AND NOT CLR_CMAKE_TARGET_MACCATALYST AND NOT CLR_CMAKE_TARGET_IOS AND NOT CLR_CMAKE_TARGET_TVOS AND NOT CLR_CMAKE_TARGET_ANDROID)
add_subdirectory(System.IO.Ports.Native)
endif ()
if (CMAKE_C_COMPILER_ID STREQUAL Clang)
add_compile_options(-Weverything)
add_compile_options(-Wno-format-nonliteral)
add_compile_options(-Wno-disabled-macro-expansion)
add_compile_options(-Wno-padded)
add_compile_options(-Wno-empty-translation-unit)
add_compile_options(-Wno-cast-align)
add_compile_options(-Wno-typedef-redefinition)
add_compile_options(-Wno-c11-extensions)
add_compile_options(-Wno-thread-safety-analysis)
endif ()
add_subdirectory(System.Native)
if (CLR_CMAKE_TARGET_BROWSER)
# skip for now
elseif (CLR_CMAKE_TARGET_MACCATALYST)
add_subdirectory(System.Net.Security.Native)
# System.Security.Cryptography.Native is intentionally disabled on iOS
# it is only used for interacting with OpenSSL which isn't useful there
elseif (CLR_CMAKE_TARGET_IOS)
add_subdirectory(System.Net.Security.Native)
# System.Security.Cryptography.Native is intentionally disabled on iOS
# it is only used for interacting with OpenSSL which isn't useful there
elseif (CLR_CMAKE_TARGET_TVOS)
#add_subdirectory(System.Net.Security.Native) # no gssapi on tvOS, see https://developer.apple.com/documentation/gss
# System.Security.Cryptography.Native is intentionally disabled on tvOS
# it is only used for interacting with OpenSSL which isn't useful there
elseif (CLR_CMAKE_TARGET_ANDROID)
add_subdirectory(System.Security.Cryptography.Native.Android)
else ()
add_subdirectory(System.Globalization.Native)
add_subdirectory(System.Net.Security.Native)
add_subdirectory(System.Security.Cryptography.Native)
endif ()
if (CLR_CMAKE_TARGET_OSX OR CLR_CMAKE_TARGET_MACCATALYST OR CLR_CMAKE_TARGET_IOS OR CLR_CMAKE_TARGET_TVOS)
add_subdirectory(System.Security.Cryptography.Native.Apple)
endif ()
endif ()
|
cmake_minimum_required(VERSION 3.6.2)
include(CheckCCompilerFlag)
if (CLR_CMAKE_TARGET_IOS OR CLR_CMAKE_TARGET_TVOS)
# CMake 3.14.5 contains bug fixes for iOS
cmake_minimum_required(VERSION 3.14.5)
elseif (CLR_CMAKE_TARGET_MACCATALYST)
# CMake 3.18.1 properly generates MacCatalyst C compiler
cmake_minimum_required(VERSION 3.18.1)
endif ()
if (WIN32)
cmake_policy(SET CMP0091 NEW)
else ()
cmake_policy(SET CMP0042 NEW)
endif ()
project(CoreFX C)
include(../../../eng/native/configurepaths.cmake)
include(${CLR_ENG_NATIVE_DIR}/configurecompiler.cmake)
include_directories(${CLR_SRC_NATIVE_DIR})
set(CMAKE_INCLUDE_CURRENT_DIR ON)
if (STATIC_LIBS_ONLY)
# Suppress exporting of the PAL APIs
add_definitions(-DPALEXPORT=EXTERN_C)
set(GEN_SHARED_LIB 0)
set(STATIC_LIB_DESTINATION lib)
else ()
set(GEN_SHARED_LIB 1)
set(STATIC_LIB_DESTINATION .)
endif ()
if (CLR_CMAKE_TARGET_UNIX OR CLR_CMAKE_TARGET_BROWSER)
set(CMAKE_MACOSX_RPATH ON)
if (CLR_CMAKE_TARGET_MACCATALYST OR CLR_CMAKE_TARGET_IOS OR CLR_CMAKE_TARGET_TVOS)
set(CMAKE_BUILD_WITH_INSTALL_NAME_DIR ON)
set(CMAKE_INSTALL_NAME_DIR "@rpath")
endif ()
set(CMAKE_INSTALL_PREFIX $ENV{__CMakeBinDir})
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=gnu99 -Wno-declaration-after-statement")
add_compile_options(-I${CMAKE_CURRENT_SOURCE_DIR}/Common)
add_compile_options(-I${CMAKE_CURRENT_BINARY_DIR}/Common)
if (CLR_CMAKE_TARGET_BROWSER)
set(GEN_SHARED_LIB 0)
set(STATIC_LIB_DESTINATION .)
endif ()
if (CLR_CMAKE_TARGET_TVOS)
# with -fembed-bitcode passing -headerpad_max_install_names is not allowed so remove it from the CMake flags
string(REPLACE "-Wl,-headerpad_max_install_names" "" CMAKE_C_LINK_FLAGS ${CMAKE_C_LINK_FLAGS})
string(REPLACE "-Wl,-headerpad_max_install_names" "" CMAKE_SHARED_LIBRARY_CREATE_C_FLAGS ${CMAKE_SHARED_LIBRARY_CREATE_C_FLAGS})
add_compile_options(-fembed-bitcode)
add_link_options(-fembed-bitcode)
endif ()
if (CLR_CMAKE_TARGET_ANDROID)
if (CROSS_ROOTFS)
include_directories(SYSTEM "${CROSS_ROOTFS}/usr/include")
endif ()
endif ()
string(TOUPPER ${CMAKE_BUILD_TYPE} UPPERCASE_CMAKE_BUILD_TYPE)
if (UPPERCASE_CMAKE_BUILD_TYPE STREQUAL DEBUG OR UPPERCASE_CMAKE_BUILD_TYPE STREQUAL CHECKED)
if (UPPERCASE_CMAKE_BUILD_TYPE STREQUAL DEBUG)
add_compile_options(-O0)
elseif (UPPERCASE_CMAKE_BUILD_TYPE STREQUAL CHECKED)
add_compile_options(-O2)
endif ()
add_definitions(-DDEBUG)
# obtain settings from running coreclr\enablesanitizers.sh
string(FIND "$ENV{DEBUG_SANITIZERS}" "asan" __ASAN_POS)
string(FIND "$ENV{DEBUG_SANITIZERS}" "ubsan" __UBSAN_POS)
if (${__ASAN_POS} GREATER -1 OR ${__UBSAN_POS} GREATER -1)
set(CLR_SANITIZE_LINK_FLAGS "${CLR_SANITIZE_LINK_FLAGS} -fsanitize=")
if (${__ASAN_POS} GREATER -1)
set(CLR_SANITIZE_LINK_FLAGS "${CLR_SANITIZE_LINK_FLAGS}address,")
message("Address Sanitizer (asan) enabled")
endif ()
if (${__UBSAN_POS} GREATER -1)
set(CLR_SANITIZE_LINK_FLAGS "${CLR_SANITIZE_LINK_FLAGS}undefined")
message("Undefined Behavior Sanitizer (ubsan) enabled")
endif ()
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${CLR_SANITIZE_LINK_FLAGS}")
# -Wl and --gc-sections: drop unused sections\functions (similar to Windows /Gy function-level-linking)
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${CLR_SANITIZE_LINK_FLAGS} -Wl,--gc-sections")
endif ()
elseif (UPPERCASE_CMAKE_BUILD_TYPE STREQUAL RELEASE)
# Use O1 option when the clang version is smaller than 3.9
# Otherwise use O3 option in release build
if (CLR_CMAKE_TARGET_ARCH_ARMV7L AND DEFINED ENV{CROSSCOMPILE} AND CMAKE_C_COMPILER_VERSION VERSION_LESS 3.9)
add_compile_options (-O1)
else ()
add_compile_options (-O3)
endif ()
else ()
message(FATAL_ERROR "Unknown build type. Set CMAKE_BUILD_TYPE to DEBUG or RELEASE.")
endif ()
if (CLR_CMAKE_TARGET_OSX OR CLR_CMAKE_TARGET_MACCATALYST OR CLR_CMAKE_TARGET_IOS OR CLR_CMAKE_TARGET_TVOS)
add_definitions(-D__APPLE_USE_RFC_3542)
endif ()
if (CLR_CMAKE_TARGET_LINUX)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_GNU_SOURCE")
endif ()
else ()
set(CMAKE_SHARED_LIBRARY_PREFIX "")
# we only need to build System.Globalization.Native when building static libs.
if (STATIC_LIBS_ONLY)
add_subdirectory(System.Globalization.Native)
endif ()
endif ()
add_subdirectory(System.IO.Compression.Native)
if (CLR_CMAKE_TARGET_UNIX OR CLR_CMAKE_TARGET_BROWSER)
include(configure.cmake)
if (NOT CLR_CMAKE_TARGET_BROWSER AND NOT CLR_CMAKE_TARGET_MACCATALYST AND NOT CLR_CMAKE_TARGET_IOS AND NOT CLR_CMAKE_TARGET_TVOS AND NOT CLR_CMAKE_TARGET_ANDROID)
add_subdirectory(System.IO.Ports.Native)
endif ()
if (CMAKE_C_COMPILER_ID STREQUAL Clang)
add_compile_options(-Weverything)
add_compile_options(-Wno-format-nonliteral)
add_compile_options(-Wno-disabled-macro-expansion)
add_compile_options(-Wno-padded)
add_compile_options(-Wno-empty-translation-unit)
add_compile_options(-Wno-cast-align)
add_compile_options(-Wno-typedef-redefinition)
add_compile_options(-Wno-c11-extensions)
add_compile_options(-Wno-thread-safety-analysis)
endif ()
add_subdirectory(System.Native)
if (CLR_CMAKE_TARGET_BROWSER)
# skip for now
elseif (CLR_CMAKE_TARGET_MACCATALYST)
add_subdirectory(System.Net.Security.Native)
# System.Security.Cryptography.Native is intentionally disabled on iOS
# it is only used for interacting with OpenSSL which isn't useful there
elseif (CLR_CMAKE_TARGET_IOS)
add_subdirectory(System.Net.Security.Native)
# System.Security.Cryptography.Native is intentionally disabled on iOS
# it is only used for interacting with OpenSSL which isn't useful there
elseif (CLR_CMAKE_TARGET_TVOS)
#add_subdirectory(System.Net.Security.Native) # no gssapi on tvOS, see https://developer.apple.com/documentation/gss
# System.Security.Cryptography.Native is intentionally disabled on tvOS
# it is only used for interacting with OpenSSL which isn't useful there
elseif (CLR_CMAKE_TARGET_ANDROID)
add_subdirectory(System.Security.Cryptography.Native.Android)
else ()
add_subdirectory(System.Globalization.Native)
add_subdirectory(System.Net.Security.Native)
add_subdirectory(System.Security.Cryptography.Native)
endif ()
if (CLR_CMAKE_TARGET_OSX OR CLR_CMAKE_TARGET_MACCATALYST OR CLR_CMAKE_TARGET_IOS OR CLR_CMAKE_TARGET_TVOS)
add_subdirectory(System.Security.Cryptography.Native.Apple)
endif ()
endif ()
| 1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/libraries/System.Private.Xml/tests/Xslt/TestFiles/TestData/xsltc/baseline/bft10.txt
|
<?xml version="1.0" encoding="utf-8"?>Hello, world!
|
<?xml version="1.0" encoding="utf-8"?>Hello, world!
| -1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/libraries/System.Private.Xml/tests/Xslt/TestFiles/TestData/xsltc/baseline/cnt26.txt
|
<?xml version="1.0" encoding="utf-8"?>Hello, world!
|
<?xml version="1.0" encoding="utf-8"?>Hello, world!
| -1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./docs/issue-mappings/core-setup.mapping.txt
|
https://github.com/dotnet/core-setup/issues/XXXX ---> https://github.com/dotnet/runtime/issues/YYYY
18 ---> 2412
20 ---> 2413
23 ---> 2414
25 ---> 2415
26 ---> 2416
27 ---> 2417
33 ---> 2418
34 ---> 2419
37 ---> 2420
40 ---> 2421
41 ---> 2422
42 ---> 2423
71 ---> 2424
72 ---> 2425
80 ---> 2426
84 ---> 2427
89 ---> 2428
94 ---> 2429
102 ---> 2430
108 ---> 2431
110 ---> 2432
118 ---> 2433
119 ---> 2434
121 ---> 2435
150 ---> 2436
154 ---> 2437
158 ---> 2438
161 ---> 2439
165 ---> 2440
167 ---> 2441
169 ---> 2442
172 ---> 2443
174 ---> 2444
176 ---> 2445
177 ---> 2446
181 ---> 2447
183 ---> 2448
185 ---> 2449
186 ---> 2450
189 ---> 2451
191 ---> 2452
193 ---> 2453
194 ---> 2454
196 ---> 2455
198 ---> 2456
200 ---> 2457
204 ---> 2458
211 ---> 2459
215 ---> 2460
216 ---> 2461
217 ---> 2462
223 ---> 2463
225 ---> 2464
228 ---> 2465
229 ---> 2466
230 ---> 2467
233 ---> 2468
234 ---> 2469
238 ---> 2470
253 ---> 2471
256 ---> 2472
259 ---> 2473
274 ---> 2475
275 ---> 2476
276 ---> 2477
279 ---> 2478
283 ---> 2479
298 ---> 2480
300 ---> 2481
306 ---> 2482
325 ---> 2483
327 ---> 2484
347 ---> 2485
361 ---> 2486
364 ---> 2487
378 ---> 2488
385 ---> 2489
390 ---> 2490
392 ---> 2491
400 ---> 2492
402 ---> 2493
409 ---> 2494
421 ---> 2495
422 ---> 2496
423 ---> 2497
432 ---> 2498
436 ---> 2499
438 ---> 2500
444 ---> 2501
455 ---> 2502
456 ---> 2503
458 ---> 2504
459 ---> 2505
461 ---> 2506
467 ---> 2507
472 ---> 2508
499 ---> 2509
510 ---> 2510
514 ---> 2511
515 ---> 2512
538 ---> 2513
544 ---> 2514
545 ---> 2515
565 ---> 2516
570 ---> 2517
572 ---> 2518
579 ---> 2519
595 ---> 2520
617 ---> 2521
618 ---> 2522
619 ---> 2523
620 ---> 2524
622 ---> 2525
623 ---> 2526
624 ---> 2527
625 ---> 2528
626 ---> 2529
627 ---> 2530
628 ---> 2531
634 ---> 2532
635 ---> 2533
636 ---> 2534
637 ---> 2535
638 ---> 2536
650 ---> 2537
651 ---> 2538
675 ---> 2539
676 ---> 2540
679 ---> 2541
680 ---> 2542
685 ---> 2543
695 ---> 2544
715 ---> 2545
721 ---> 2546
723 ---> 2547
725 ---> 2548
729 ---> 2550
734 ---> 2551
736 ---> 2552
753 ---> 2553
764 ---> 2554
789 ---> 2555
790 ---> 2556
801 ---> 2557
811 ---> 2558
813 ---> 2559
847 ---> 2560
849 ---> 2561
934 ---> 2562
949 ---> 2563
1017 ---> 2564
1021 ---> 2565
1030 ---> 2566
1121 ---> 2567
1126 ---> 2568
1183 ---> 2569
1192 ---> 2570
1217 ---> 2571
1219 ---> 2572
1221 ---> 2573
1229 ---> 2574
1230 ---> 2575
1231 ---> 2576
1267 ---> 2577
1285 ---> 2578
1288 ---> 2579
1322 ---> 2580
1378 ---> 2581
1386 ---> 2582
1400 ---> 2583
1408 ---> 2584
1411 ---> 2585
1432 ---> 2586
1454 ---> 2587
1461 ---> 2588
1484 ---> 2589
1489 ---> 2590
1490 ---> 2591
1503 ---> 2592
1504 ---> 2593
1507 ---> 2594
1519 ---> 2595
1530 ---> 2401
1532 ---> 2596
1548 ---> 2597
1550 ---> 2598
1551 ---> 2599
1553 ---> 2600
1555 ---> 2601
1557 ---> 2602
1559 ---> 2603
1561 ---> 2604
1562 ---> 2605
1564 ---> 2606
1565 ---> 2607
1566 ---> 2608
1570 ---> 2609
1573 ---> 2610
1574 ---> 2611
1575 ---> 2612
1578 ---> 2613
1581 ---> 2614
1589 ---> 2615
1590 ---> 2616
1591 ---> 2617
1593 ---> 2618
1596 ---> 2619
1597 ---> 2620
1598 ---> 2621
1599 ---> 2622
1605 ---> 2623
1607 ---> 2624
1609 ---> 2625
1614 ---> 2626
1620 ---> 2627
1622 ---> 2628
1632 ---> 2629
1636 ---> 2630
1638 ---> 2631
1645 ---> 2632
1646 ---> 2633
1651 ---> 2044
1653 ---> 2634
1664 ---> 2635
1673 ---> 2636
1674 ---> 2637
1707 ---> 2638
1719 ---> 2639
1721 ---> 2640
1722 ---> 2641
1725 ---> 2642
1739 ---> 2643
1740 ---> 2644
1741 ---> 2645
1749 ---> 2646
1755 ---> 2647
1756 ---> 2648
1757 ---> 2649
1758 ---> 2650
1760 ---> 2651
1762 ---> 2652
1801 ---> 2653
1805 ---> 2654
1810 ---> 2655
1816 ---> 2656
1824 ---> 2657
1825 ---> 2658
1832 ---> 2659
1835 ---> 2660
1840 ---> 2661
1842 ---> 2662
1846 ---> 2663
1848 ---> 2664
1856 ---> 2665
1868 ---> 2666
1871 ---> 2667
1884 ---> 2668
1886 ---> 2669
1903 ---> 2670
1908 ---> 2671
1913 ---> 2672
1926 ---> 2673
1931 ---> 2674
1947 ---> 2675
1950 ---> 2676
1960 ---> 2677
1961 ---> 2678
1964 ---> 2679
1973 ---> 2680
1974 ---> 2681
1975 ---> 2682
1977 ---> 2683
1984 ---> 2684
1987 ---> 2685
1993 ---> 2686
1994 ---> 2687
2007 ---> 2688
2014 ---> 2689
2016 ---> 2690
2025 ---> 2691
2031 ---> 2692
2033 ---> 2693
2050 ---> 2694
2054 ---> 2695
2070 ---> 2696
2076 ---> 2697
2081 ---> 2698
2082 ---> 2699
2096 ---> 2700
2113 ---> 2701
2117 ---> 2702
2127 ---> 2703
2130 ---> 2704
2136 ---> 2705
2144 ---> 2706
2155 ---> 2707
2156 ---> 2708
2157 ---> 2709
2161 ---> 2710
2169 ---> 2711
2176 ---> 2712
2188 ---> 2713
2189 ---> 2714
2190 ---> 2715
2191 ---> 2716
2192 ---> 2717
2193 ---> 2718
2194 ---> 2719
2195 ---> 2720
2196 ---> 2721
2197 ---> 2722
2198 ---> 2723
2199 ---> 2724
2200 ---> 2725
2201 ---> 2726
2208 ---> 2727
2210 ---> 2728
2214 ---> 2729
2216 ---> 2730
2222 ---> 2731
2223 ---> 2732
2224 ---> 2733
2225 ---> 2734
2228 ---> 2735
2230 ---> 2736
2231 ---> 2737
2232 ---> 2738
2233 ---> 2739
2238 ---> 2740
2245 ---> 2741
2246 ---> 2742
2247 ---> 2743
2252 ---> 2744
2257 ---> 2745
2260 ---> 2746
2261 ---> 2747
2262 ---> 2748
2263 ---> 2749
2265 ---> 2750
2266 ---> 2751
2267 ---> 2752
2268 ---> 2753
2270 ---> 2754
2273 ---> 2755
2279 ---> 2756
2280 ---> 2757
2281 ---> 2758
2283 ---> 2759
2284 ---> 2760
2294 ---> 2761
2301 ---> 2762
2302 ---> 2763
2303 ---> 2764
2306 ---> 2765
2307 ---> 2766
2313 ---> 2767
2319 ---> 2768
2322 ---> 2769
2327 ---> 2770
2334 ---> 2771
2335 ---> 2772
2336 ---> 2773
2338 ---> 2774
2339 ---> 2775
2341 ---> 2776
2344 ---> 2777
2347 ---> 2778
2349 ---> 2779
2358 ---> 2780
2360 ---> 2781
2376 ---> 2782
2377 ---> 2783
2380 ---> 2784
2398 ---> 2785
2405 ---> 2786
2407 ---> 2787
2412 ---> 2788
2413 ---> 2789
2432 ---> 2790
2435 ---> 2791
2437 ---> 2792
2438 ---> 2793
2443 ---> 2794
2449 ---> 2795
2451 ---> 2796
2473 ---> 2797
2491 ---> 2798
2496 ---> 2799
2497 ---> 2800
2502 ---> 2801
2506 ---> 2802
2512 ---> 2803
2513 ---> 2804
2516 ---> 2805
2519 ---> 2806
2523 ---> 2807
2531 ---> 2808
2533 ---> 2809
2559 ---> 2810
2562 ---> 2811
2563 ---> 2812
2570 ---> 2813
2586 ---> 2814
2588 ---> 2815
2597 ---> 2816
2601 ---> 2817
2606 ---> 2818
2611 ---> 2819
2613 ---> 2820
2617 ---> 2821
2624 ---> 2822
2647 ---> 2823
2649 ---> 2824
2655 ---> 2825
2657 ---> 2826
2658 ---> 2827
2661 ---> 2828
2663 ---> 2829
2682 ---> 2830
2690 ---> 2831
2697 ---> 2832
2707 ---> 2833
2709 ---> 2834
2715 ---> 2835
2716 ---> 2836
2719 ---> 2837
2743 ---> 2838
2759 ---> 2839
2765 ---> 2840
2768 ---> 2841
2794 ---> 2842
2806 ---> 2843
2808 ---> 2844
2811 ---> 2845
2817 ---> 2846
2819 ---> 2847
2820 ---> 2848
2828 ---> 2849
2845 ---> 2850
2852 ---> 2851
2855 ---> 2852
2862 ---> 2853
2868 ---> 2854
2875 ---> 2855
2876 ---> 2856
2892 ---> 2857
2897 ---> 2858
2898 ---> 2859
2943 ---> 2860
2947 ---> 2861
2954 ---> 2862
2962 ---> 2863
2963 ---> 2864
2969 ---> 2865
2981 ---> 2866
2983 ---> 2867
2986 ---> 2868
2990 ---> 2869
3008 ---> 2870
3012 ---> 2871
3013 ---> 2872
3014 ---> 2873
3015 ---> 2874
3023 ---> 2875
3030 ---> 2876
3044 ---> 2877
3049 ---> 2878
3054 ---> 2879
3061 ---> 2880
3067 ---> 2881
3072 ---> 2882
3073 ---> 2883
3077 ---> 2884
3078 ---> 2885
3079 ---> 2886
3081 ---> 2887
3085 ---> 2888
3092 ---> 2889
3094 ---> 2890
3095 ---> 2891
3100 ---> 2892
3107 ---> 2893
3111 ---> 2894
3117 ---> 2895
3122 ---> 2896
3123 ---> 2897
3126 ---> 2898
3127 ---> 2899
3128 ---> 2900
3131 ---> 2901
3133 ---> 2902
3138 ---> 2903
3150 ---> 2904
3153 ---> 2905
3155 ---> 2906
3174 ---> 2907
3175 ---> 2908
3176 ---> 2909
3177 ---> 2910
3185 ---> 2911
3189 ---> 2912
3190 ---> 2913
3192 ---> 2914
3193 ---> 2915
3213 ---> 2916
3220 ---> 2917
3226 ---> 2918
3227 ---> 2919
3232 ---> 2920
3233 ---> 2921
3243 ---> 2922
3260 ---> 2923
3264 ---> 2924
3265 ---> 2925
3269 ---> 2926
3275 ---> 2927
3276 ---> 2928
3278 ---> 2929
3279 ---> 2930
3280 ---> 2931
3281 ---> 2932
3297 ---> 2933
3301 ---> 2934
3303 ---> 2935
3308 ---> 2936
3311 ---> 2937
3314 ---> 2938
3315 ---> 2939
3323 ---> 2940
3325 ---> 2941
3332 ---> 2942
3333 ---> 2943
3344 ---> 2944
3354 ---> 2945
3358 ---> 2946
3360 ---> 2947
3361 ---> 2948
3362 ---> 2949
3369 ---> 2950
3373 ---> 2951
3377 ---> 2952
3381 ---> 2953
3391 ---> 2954
3396 ---> 2955
3397 ---> 2956
3399 ---> 2957
3400 ---> 2958
3407 ---> 2959
3410 ---> 2960
3418 ---> 2961
3427 ---> 2962
3428 ---> 2963
3429 ---> 2964
3430 ---> 2965
3446 ---> 2967
3447 ---> 2968
3455 ---> 2969
3457 ---> 2970
3458 ---> 2971
3461 ---> 2972
3462 ---> 2973
3468 ---> 2974
3469 ---> 2975
3470 ---> 2976
3474 ---> 2977
3476 ---> 2978
3489 ---> 2979
3490 ---> 2980
3494 ---> 2981
3499 ---> 2982
3502 ---> 2983
3503 ---> 2984
3506 ---> 2985
3507 ---> 2986
3510 ---> 2987
3511 ---> 2988
3516 ---> 2989
3521 ---> 2990
3527 ---> 2991
3528 ---> 2992
3531 ---> 2993
3546 ---> 2994
3548 ---> 2995
3556 ---> 2996
3561 ---> 2997
3566 ---> 2998
3577 ---> 2999
3579 ---> 3000
3580 ---> 3001
3584 ---> 3002
3592 ---> 3003
3593 ---> 3004
3597 ---> 3005
3598 ---> 3006
3603 ---> 3007
3606 ---> 3008
3607 ---> 3010
3609 ---> 3011
3610 ---> 3012
3613 ---> 3013
3614 ---> 3014
3615 ---> 3015
3623 ---> 3016
3625 ---> 3017
3626 ---> 3018
3627 ---> 3019
3628 ---> 3020
3629 ---> 3021
3630 ---> 3022
3631 ---> 3023
3632 ---> 3024
3633 ---> 3025
3634 ---> 3026
3636 ---> 3027
3638 ---> 3028
3642 ---> 3029
3647 ---> 3030
3649 ---> 3031
3663 ---> 3032
3668 ---> 3033
3671 ---> 3034
3673 ---> 3035
3680 ---> 3037
3684 ---> 3038
3690 ---> 3039
3693 ---> 3040
3701 ---> 3041
3702 ---> 3042
3703 ---> 3043
3706 ---> 3044
3709 ---> 3045
3712 ---> 3046
3718 ---> 3047
3720 ---> 3048
3723 ---> 3049
3726 ---> 3050
3734 ---> 3051
3742 ---> 3052
3751 ---> 3053
3754 ---> 3054
3756 ---> 3055
3757 ---> 3056
3758 ---> 3057
3762 ---> 3058
3764 ---> 3059
3765 ---> 3060
3768 ---> 3061
3770 ---> 3062
3773 ---> 3063
3777 ---> 3064
3791 ---> 3065
3792 ---> 3066
3793 ---> 3067
3799 ---> 3068
3802 ---> 3069
3805 ---> 3070
3807 ---> 3071
3814 ---> 3072
3815 ---> 3073
3816 ---> 3074
3817 ---> 3075
3818 ---> 3076
3819 ---> 3077
3822 ---> 3078
3823 ---> 3079
3830 ---> 3080
3837 ---> 3081
3839 ---> 3082
3841 ---> 3083
3843 ---> 1466
3845 ---> 3084
3850 ---> 3085
3852 ---> 3086
3856 ---> 3087
3875 ---> 3088
3877 ---> 3089
3881 ---> 3090
3884 ---> 3091
3885 ---> 3092
3889 ---> 3093
3890 ---> 3094
3896 ---> 3095
3902 ---> 3096
3907 ---> 3097
3910 ---> 3098
3911 ---> 3099
3930 ---> 3100
3931 ---> 3101
3940 ---> 3102
3944 ---> 3103
3945 ---> 3104
3949 ---> 3105
3951 ---> 3106
3953 ---> 3107
3965 ---> 3108
3972 ---> 3109
3976 ---> 3110
3977 ---> 3111
3980 ---> 3112
3987 ---> 3113
3997 ---> 3114
3998 ---> 3115
3999 ---> 3116
4000 ---> 3117
4001 ---> 3118
4003 ---> 3119
4007 ---> 3120
4008 ---> 3121
4009 ---> 3122
4011 ---> 3123
4016 ---> 3124
4022 ---> 3125
4026 ---> 3126
4027 ---> 3127
4030 ---> 3128
4034 ---> 3129
4037 ---> 3130
4038 ---> 3131
4039 ---> 3132
4045 ---> 3133
4047 ---> 3134
4049 ---> 3135
4052 ---> 3136
4053 ---> 3137
4054 ---> 3138
4055 ---> 3139
4058 ---> 3140
4068 ---> 3141
4071 ---> 3142
4078 ---> 3143
4080 ---> 3144
4081 ---> 3145
4082 ---> 3146
4083 ---> 3147
4088 ---> 3148
4097 ---> 3149
4098 ---> 3150
4099 ---> 3151
4100 ---> 3152
4101 ---> 3153
4102 ---> 3154
4109 ---> 3155
4112 ---> 3156
4113 ---> 3157
4125 ---> 3158
4126 ---> 3159
4130 ---> 3160
4134 ---> 3161
4139 ---> 3162
4141 ---> 3163
4143 ---> 3164
4144 ---> 3165
4146 ---> 3166
4148 ---> 3167
4149 ---> 3168
4152 ---> 3169
4153 ---> 3170
4154 ---> 3171
4158 ---> 3172
4159 ---> 3173
4161 ---> 3174
4169 ---> 3175
4173 ---> 3176
4175 ---> 3177
4177 ---> 3178
4178 ---> 3179
4179 ---> 3180
4181 ---> 3181
4183 ---> 3182
4185 ---> 3183
4186 ---> 3184
4187 ---> 3185
4188 ---> 3186
4192 ---> 3187
4193 ---> 3188
4194 ---> 3189
4195 ---> 3190
4197 ---> 3191
4200 ---> 3192
4201 ---> 3193
4202 ---> 3194
4207 ---> 3195
4208 ---> 3196
4209 ---> 3197
4216 ---> 3198
4217 ---> 3199
4224 ---> 3200
4228 ---> 3201
4229 ---> 3202
4230 ---> 3203
4232 ---> 3204
4233 ---> 3205
4236 ---> 3206
4245 ---> 3207
4247 ---> 3208
4248 ---> 3209
4249 ---> 3210
4256 ---> 3211
4262 ---> 3212
4289 ---> 3213
4291 ---> 3214
4293 ---> 3215
4294 ---> 3216
4295 ---> 3217
4296 ---> 3218
4298 ---> 3219
4299 ---> 3220
4303 ---> 3221
4304 ---> 3222
4308 ---> 3223
4309 ---> 3224
4311 ---> 3225
4312 ---> 3226
4313 ---> 3227
4314 ---> 3228
4315 ---> 3229
4317 ---> 3230
4318 ---> 3231
4319 ---> 3232
4320 ---> 3233
4322 ---> 3234
4324 ---> 3235
4326 ---> 3236
4328 ---> 3237
4330 ---> 3238
4334 ---> 3239
4343 ---> 3240
4344 ---> 3241
4350 ---> 3242
4352 ---> 3243
4361 ---> 3244
4362 ---> 3245
4365 ---> 3246
4366 ---> 3247
4368 ---> 3248
4370 ---> 3249
4372 ---> 3250
4375 ---> 3251
4376 ---> 3252
4378 ---> 3253
4379 ---> 3254
4384 ---> 3255
4386 ---> 3256
4388 ---> 3257
4390 ---> 3258
4391 ---> 3259
4408 ---> 3260
4413 ---> 3261
4417 ---> 3262
4419 ---> 3263
4422 ---> 3264
4425 ---> 3265
4426 ---> 3266
4427 ---> 3267
4428 ---> 3268
4435 ---> 3269
4446 ---> 3270
4453 ---> 3271
4455 ---> 3272
4459 ---> 3273
4460 ---> 3274
4472 ---> 3275
4473 ---> 3276
4475 ---> 3277
4481 ---> 3278
4484 ---> 3279
4492 ---> 3280
4494 ---> 3281
4496 ---> 3282
4497 ---> 3283
4506 ---> 3284
4511 ---> 3285
4512 ---> 3286
4517 ---> 3287
4525 ---> 3288
4529 ---> 3289
4530 ---> 3290
4533 ---> 3291
4537 ---> 3292
4541 ---> 3293
4543 ---> 3294
4556 ---> 3295
4558 ---> 3296
4559 ---> 3297
4560 ---> 3298
4561 ---> 3299
4565 ---> 3300
4573 ---> 3301
4578 ---> 3302
4579 ---> 3303
4587 ---> 3304
4593 ---> 3305
4596 ---> 3306
4622 ---> 3307
4636 ---> 3308
4637 ---> 3309
4638 ---> 3310
4639 ---> 3311
4640 ---> 3312
4641 ---> 3313
4642 ---> 3314
4646 ---> 3315
4649 ---> 3316
4654 ---> 3317
4657 ---> 3318
4664 ---> 3319
4697 ---> 3320
4698 ---> 3321
4703 ---> 3322
4707 ---> 3323
4709 ---> 3324
4710 ---> 3325
4716 ---> 3326
4722 ---> 3327
4724 ---> 3328
4725 ---> 3329
4727 ---> 3330
4729 ---> 3331
4730 ---> 3332
4731 ---> 3333
4732 ---> 3334
4742 ---> 3335
4744 ---> 3336
4745 ---> 3337
4747 ---> 3338
4752 ---> 3339
4755 ---> 3340
4768 ---> 3341
4769 ---> 3342
4770 ---> 3343
4771 ---> 3344
4772 ---> 3345
4773 ---> 3346
4774 ---> 3347
4775 ---> 3348
4776 ---> 3349
4777 ---> 3350
4778 ---> 3351
4779 ---> 3352
4780 ---> 3353
4781 ---> 3354
4782 ---> 3355
4783 ---> 3356
4784 ---> 3357
4785 ---> 3358
4786 ---> 3359
4787 ---> 3360
4788 ---> 3361
4789 ---> 3362
4795 ---> 3363
4797 ---> 3364
4802 ---> 3365
4809 ---> 3366
4810 ---> 3367
4811 ---> 3368
4814 ---> 3369
4819 ---> 3370
4823 ---> 3371
4828 ---> 3372
4830 ---> 3373
4833 ---> 3374
4836 ---> 3375
4840 ---> 3376
4843 ---> 3377
4849 ---> 3378
4854 ---> 3379
4858 ---> 3380
4859 ---> 3381
4865 ---> 3382
4866 ---> 3383
4868 ---> 3384
4873 ---> 3385
4879 ---> 3386
4880 ---> 3387
4884 ---> 3388
4890 ---> 3389
4894 ---> 3390
4900 ---> 3391
4903 ---> 3392
4904 ---> 3393
4915 ---> 3394
4921 ---> 3395
4923 ---> 3396
4935 ---> 3397
4937 ---> 3398
4943 ---> 3399
4944 ---> 3400
4945 ---> 3401
4947 ---> 3402
4949 ---> 3403
4951 ---> 3404
4952 ---> 3405
4957 ---> 3406
4958 ---> 3407
4962 ---> 3408
4963 ---> 3409
4969 ---> 3410
4973 ---> 3411
4975 ---> 3412
4978 ---> 3413
4986 ---> 3414
4987 ---> 3415
4997 ---> 3416
5002 ---> 3417
5019 ---> 3418
5020 ---> 3419
5021 ---> 3420
5028 ---> 3421
5034 ---> 3422
5040 ---> 3423
5044 ---> 3424
5045 ---> 3425
5047 ---> 3426
5048 ---> 3427
5051 ---> 3428
5052 ---> 3429
5053 ---> 3430
5056 ---> 3431
5058 ---> 3432
5061 ---> 3433
5062 ---> 3434
5064 ---> 3435
5066 ---> 3436
5067 ---> 3437
5068 ---> 3438
5073 ---> 3439
5074 ---> 3440
5076 ---> 3441
5082 ---> 3442
5083 ---> 3443
5099 ---> 3444
5101 ---> 3445
5102 ---> 3446
5105 ---> 3447
5106 ---> 3448
5107 ---> 3449
5108 ---> 3450
5112 ---> 3451
5113 ---> 3452
5120 ---> 3453
5130 ---> 3454
5132 ---> 3455
5135 ---> 3456
5174 ---> 3457
5175 ---> 3458
5176 ---> 3459
5179 ---> 3460
5180 ---> 3461
5181 ---> 3462
5182 ---> 3463
5186 ---> 3464
5197 ---> 3465
5199 ---> 3466
5201 ---> 3467
5205 ---> 3468
5206 ---> 3469
5213 ---> 3470
5219 ---> 3471
5221 ---> 3472
5224 ---> 3473
5225 ---> 3474
5226 ---> 3475
5234 ---> 3476
5235 ---> 3477
5244 ---> 3478
5253 ---> 3479
5254 ---> 3480
5262 ---> 3481
5297 ---> 3482
5303 ---> 3483
5317 ---> 3484
5320 ---> 3485
5323 ---> 3486
5329 ---> 3487
5337 ---> 3488
5341 ---> 3489
5366 ---> 3490
5368 ---> 3491
5379 ---> 3492
5396 ---> 3493
5399 ---> 3494
5427 ---> 3495
5435 ---> 3496
5441 ---> 3497
5446 ---> 3498
5447 ---> 3499
5449 ---> 3500
5454 ---> 3501
5456 ---> 3502
5458 ---> 3503
5481 ---> 3504
5483 ---> 3506
5485 ---> 3507
5487 ---> 3508
5489 ---> 3509
5490 ---> 3510
5494 ---> 3511
5511 ---> 3512
5518 ---> 3513
5529 ---> 3514
5530 ---> 3515
5551 ---> 3516
5553 ---> 3517
5556 ---> 3518
5562 ---> 3519
5572 ---> 3520
5580 ---> 3521
5588 ---> 3522
5628 ---> 3523
5643 ---> 3524
5645 ---> 3525
5661 ---> 3526
5662 ---> 3527
5663 ---> 3528
5665 ---> 3529
5666 ---> 3530
5675 ---> 3531
5694 ---> 3532
5716 ---> 3533
5728 ---> 3534
5731 ---> 3535
5732 ---> 3536
5739 ---> 3537
5750 ---> 3538
5753 ---> 3539
5759 ---> 3540
5780 ---> 3541
5790 ---> 3542
5800 ---> 3543
5812 ---> 3544
5813 ---> 3545
5817 ---> 3546
5820 ---> 3547
5832 ---> 3548
5852 ---> 3549
5870 ---> 3550
5872 ---> 3551
5874 ---> 3552
5903 ---> 3553
5932 ---> 3554
5940 ---> 3555
5941 ---> 3556
5946 ---> 3557
5969 ---> 3558
5975 ---> 3559
5996 ---> 3560
6050 ---> 3561
6051 ---> 3562
6084 ---> 3563
6100 ---> 3564
6114 ---> 3565
6116 ---> 3566
6117 ---> 3567
6175 ---> 3568
6189 ---> 3569
6197 ---> 3570
6199 ---> 3571
6210 ---> 3572
6212 ---> 3573
6222 ---> 3574
6236 ---> 3575
6262 ---> 3576
6269 ---> 3577
6286 ---> 3578
6337 ---> 3579
6338 ---> 3580
6341 ---> 3581
6345 ---> 3582
6354 ---> 3583
6355 ---> 3585
6361 ---> 3586
6370 ---> 3587
6380 ---> 3588
6381 ---> 3589
6384 ---> 3590
6388 ---> 3591
6396 ---> 3592
6409 ---> 3593
6412 ---> 3594
6420 ---> 3595
6436 ---> 3596
6507 ---> 3597
6508 ---> 3598
6509 ---> 3599
6511 ---> 3600
6516 ---> 3601
6518 ---> 3602
6521 ---> 3603
6532 ---> 3604
6550 ---> 3605
6552 ---> 3606
6587 ---> 3607
6588 ---> 3608
6589 ---> 3609
6591 ---> 3610
6592 ---> 3611
6593 ---> 3612
6594 ---> 3613
6595 ---> 3614
6607 ---> 3615
6610 ---> 3616
6635 ---> 3617
6651 ---> 3618
6662 ---> 3619
6669 ---> 3620
6678 ---> 3621
6679 ---> 3622
6687 ---> 3623
6688 ---> 3624
6694 ---> 3625
6696 ---> 3626
6711 ---> 3627
6713 ---> 3628
6727 ---> 3629
6728 ---> 3630
6730 ---> 3631
6762 ---> 3632
6764 ---> 3633
6768 ---> 3634
6782 ---> 3635
6786 ---> 3636
6793 ---> 3637
6810 ---> 3638
6813 ---> 3639
6825 ---> 3640
6826 ---> 3641
6832 ---> 3642
6838 ---> 3643
6839 ---> 3644
6858 ---> 3645
6865 ---> 3646
6877 ---> 3647
6881 ---> 3648
6883 ---> 3649
6893 ---> 3650
6901 ---> 3651
6908 ---> 3652
6912 ---> 3653
6914 ---> 3654
6922 ---> 3655
6934 ---> 3656
6938 ---> 1382
6942 ---> 3657
6958 ---> 3658
6960 ---> 3659
6961 ---> 3660
6966 ---> 3661
6970 ---> 3662
6972 ---> 3663
6991 ---> 3664
7018 ---> 3665
7031 ---> 3666
7039 ---> 3667
7046 ---> 3668
7062 ---> 3669
7063 ---> 3670
7065 ---> 3671
7070 ---> 3672
7112 ---> 3673
7114 ---> 3674
7118 ---> 3675
7123 ---> 3676
7128 ---> 3677
7137 ---> 3678
7147 ---> 3679
7148 ---> 3680
7149 ---> 3681
7150 ---> 3682
7173 ---> 3683
7212 ---> 3684
7218 ---> 3685
7234 ---> 3686
7250 ---> 3687
7253 ---> 3688
7254 ---> 3689
7269 ---> 3690
7274 ---> 3691
7290 ---> 3692
7317 ---> 3693
7327 ---> 3694
7340 ---> 3695
7343 ---> 3696
7344 ---> 3697
7348 ---> 3698
7368 ---> 3699
7377 ---> 3700
7396 ---> 3701
7412 ---> 3702
7470 ---> 3703
7491 ---> 3704
7516 ---> 3705
7531 ---> 3706
7532 ---> 3707
7534 ---> 3708
7539 ---> 3709
7546 ---> 3710
7550 ---> 3711
7557 ---> 3712
7567 ---> 3713
7572 ---> 3714
7575 ---> 3715
7585 ---> 3716
7597 ---> 3717
7609 ---> 3718
7610 ---> 3719
7629 ---> 3720
7635 ---> 3721
7651 ---> 3722
7653 ---> 3723
7659 ---> 3724
7691 ---> 3725
7693 ---> 3726
7694 ---> 3727
7698 ---> 3728
7700 ---> 3729
7728 ---> 3730
7730 ---> 3731
7732 ---> 3732
7737 ---> 3733
7738 ---> 3734
7746 ---> 3735
7763 ---> 3736
7772 ---> 3737
7778 ---> 3738
7794 ---> 3739
7800 ---> 3740
7809 ---> 3741
7817 ---> 3742
7818 ---> 3743
7819 ---> 3744
7825 ---> 3745
7846 ---> 3746
7848 ---> 3747
7849 ---> 3748
7851 ---> 3749
7860 ---> 831
7870 ---> 3750
7872 ---> 3751
7913 ---> 3752
7933 ---> 3753
7940 ---> 3754
7954 ---> 3755
8024 ---> 3756
8030 ---> 3757
8100 ---> 3758
8112 ---> 3759
8125 ---> 30813
8170 ---> 3760
8222 ---> 3761
8233 ---> 3762
8235 ---> 1375
8244 ---> 3763
8252 ---> 3764
8254 ---> 3765
8275 ---> 3766
8276 ---> 3767
8277 ---> 3768
8278 ---> 3769
8285 ---> 3770
8297 ---> 3771
8298 ---> 3772
8299 ---> 3773
8305 ---> 3774
8306 ---> 3775
8307 ---> 3776
8310 ---> 3777
8314 ---> 3778
8347 ---> 3779
8354 ---> 3780
8356 ---> 3781
8363 ---> 3782
8368 ---> 3783
8379 ---> 3784
8398 ---> 3785
8404 ---> 3786
8422 ---> 3787
8424 ---> 3788
8427 ---> 3789
8438 ---> 3790
8461 ---> 3791
8466 ---> 3792
8488 ---> 3793
8490 ---> 3794
8495 ---> 3795
8496 ---> 3796
8498 ---> 3797
8504 ---> 3798
8505 ---> 3799
8507 ---> 3800
8511 ---> 3801
8514 ---> 3802
8521 ---> 3803
8522 ---> 3804
8525 ---> 3805
8529 ---> 3806
8538 ---> 3807
8539 ---> 2402
8546 ---> 3808
8559 ---> 3809
8570 ---> 3810
8573 ---> 3811
8588 ---> 3812
8589 ---> 3813
8597 ---> 3814
8601 ---> 3815
8620 ---> 3816
8626 ---> 3817
8643 ---> 2320
8669 ---> 2400
8681 ---> 3818
8682 ---> 3819
8683 ---> 3820
8686 ---> 906
8691 ---> 3821
8692 ---> 3822
8694 ---> 3823
8698 ---> 3824
8703 ---> 3825
8734 ---> 3826
8735 ---> 3827
8763 ---> 3828
8764 ---> 3829
8766 ---> 3830
8774 ---> 3831
8776 ---> 3832
8779 ---> 3833
8805 ---> 3834
8829 ---> 3835
8830 ---> 3836
8832 ---> 3837
8850 ---> 3838
8852 ---> 3839
8853 ---> 1362
8860 ---> 3840
8861 ---> 3841
8862 ---> 3842
8863 ---> 3843
8865 ---> 3844
8871 ---> 3845
8882 ---> 3846
8901 ---> 3847
8902 ---> 3848
8903 ---> 3849
8913 ---> 641
8926 ---> 640
8927 ---> 639
8932 ---> 3850
8933 ---> 3851
8938 ---> 638
8941 ---> 3852
8942 ---> 3853
8944 ---> 1325
8951 ---> 3854
8986 ---> 2000
|
https://github.com/dotnet/core-setup/issues/XXXX ---> https://github.com/dotnet/runtime/issues/YYYY
18 ---> 2412
20 ---> 2413
23 ---> 2414
25 ---> 2415
26 ---> 2416
27 ---> 2417
33 ---> 2418
34 ---> 2419
37 ---> 2420
40 ---> 2421
41 ---> 2422
42 ---> 2423
71 ---> 2424
72 ---> 2425
80 ---> 2426
84 ---> 2427
89 ---> 2428
94 ---> 2429
102 ---> 2430
108 ---> 2431
110 ---> 2432
118 ---> 2433
119 ---> 2434
121 ---> 2435
150 ---> 2436
154 ---> 2437
158 ---> 2438
161 ---> 2439
165 ---> 2440
167 ---> 2441
169 ---> 2442
172 ---> 2443
174 ---> 2444
176 ---> 2445
177 ---> 2446
181 ---> 2447
183 ---> 2448
185 ---> 2449
186 ---> 2450
189 ---> 2451
191 ---> 2452
193 ---> 2453
194 ---> 2454
196 ---> 2455
198 ---> 2456
200 ---> 2457
204 ---> 2458
211 ---> 2459
215 ---> 2460
216 ---> 2461
217 ---> 2462
223 ---> 2463
225 ---> 2464
228 ---> 2465
229 ---> 2466
230 ---> 2467
233 ---> 2468
234 ---> 2469
238 ---> 2470
253 ---> 2471
256 ---> 2472
259 ---> 2473
274 ---> 2475
275 ---> 2476
276 ---> 2477
279 ---> 2478
283 ---> 2479
298 ---> 2480
300 ---> 2481
306 ---> 2482
325 ---> 2483
327 ---> 2484
347 ---> 2485
361 ---> 2486
364 ---> 2487
378 ---> 2488
385 ---> 2489
390 ---> 2490
392 ---> 2491
400 ---> 2492
402 ---> 2493
409 ---> 2494
421 ---> 2495
422 ---> 2496
423 ---> 2497
432 ---> 2498
436 ---> 2499
438 ---> 2500
444 ---> 2501
455 ---> 2502
456 ---> 2503
458 ---> 2504
459 ---> 2505
461 ---> 2506
467 ---> 2507
472 ---> 2508
499 ---> 2509
510 ---> 2510
514 ---> 2511
515 ---> 2512
538 ---> 2513
544 ---> 2514
545 ---> 2515
565 ---> 2516
570 ---> 2517
572 ---> 2518
579 ---> 2519
595 ---> 2520
617 ---> 2521
618 ---> 2522
619 ---> 2523
620 ---> 2524
622 ---> 2525
623 ---> 2526
624 ---> 2527
625 ---> 2528
626 ---> 2529
627 ---> 2530
628 ---> 2531
634 ---> 2532
635 ---> 2533
636 ---> 2534
637 ---> 2535
638 ---> 2536
650 ---> 2537
651 ---> 2538
675 ---> 2539
676 ---> 2540
679 ---> 2541
680 ---> 2542
685 ---> 2543
695 ---> 2544
715 ---> 2545
721 ---> 2546
723 ---> 2547
725 ---> 2548
729 ---> 2550
734 ---> 2551
736 ---> 2552
753 ---> 2553
764 ---> 2554
789 ---> 2555
790 ---> 2556
801 ---> 2557
811 ---> 2558
813 ---> 2559
847 ---> 2560
849 ---> 2561
934 ---> 2562
949 ---> 2563
1017 ---> 2564
1021 ---> 2565
1030 ---> 2566
1121 ---> 2567
1126 ---> 2568
1183 ---> 2569
1192 ---> 2570
1217 ---> 2571
1219 ---> 2572
1221 ---> 2573
1229 ---> 2574
1230 ---> 2575
1231 ---> 2576
1267 ---> 2577
1285 ---> 2578
1288 ---> 2579
1322 ---> 2580
1378 ---> 2581
1386 ---> 2582
1400 ---> 2583
1408 ---> 2584
1411 ---> 2585
1432 ---> 2586
1454 ---> 2587
1461 ---> 2588
1484 ---> 2589
1489 ---> 2590
1490 ---> 2591
1503 ---> 2592
1504 ---> 2593
1507 ---> 2594
1519 ---> 2595
1530 ---> 2401
1532 ---> 2596
1548 ---> 2597
1550 ---> 2598
1551 ---> 2599
1553 ---> 2600
1555 ---> 2601
1557 ---> 2602
1559 ---> 2603
1561 ---> 2604
1562 ---> 2605
1564 ---> 2606
1565 ---> 2607
1566 ---> 2608
1570 ---> 2609
1573 ---> 2610
1574 ---> 2611
1575 ---> 2612
1578 ---> 2613
1581 ---> 2614
1589 ---> 2615
1590 ---> 2616
1591 ---> 2617
1593 ---> 2618
1596 ---> 2619
1597 ---> 2620
1598 ---> 2621
1599 ---> 2622
1605 ---> 2623
1607 ---> 2624
1609 ---> 2625
1614 ---> 2626
1620 ---> 2627
1622 ---> 2628
1632 ---> 2629
1636 ---> 2630
1638 ---> 2631
1645 ---> 2632
1646 ---> 2633
1651 ---> 2044
1653 ---> 2634
1664 ---> 2635
1673 ---> 2636
1674 ---> 2637
1707 ---> 2638
1719 ---> 2639
1721 ---> 2640
1722 ---> 2641
1725 ---> 2642
1739 ---> 2643
1740 ---> 2644
1741 ---> 2645
1749 ---> 2646
1755 ---> 2647
1756 ---> 2648
1757 ---> 2649
1758 ---> 2650
1760 ---> 2651
1762 ---> 2652
1801 ---> 2653
1805 ---> 2654
1810 ---> 2655
1816 ---> 2656
1824 ---> 2657
1825 ---> 2658
1832 ---> 2659
1835 ---> 2660
1840 ---> 2661
1842 ---> 2662
1846 ---> 2663
1848 ---> 2664
1856 ---> 2665
1868 ---> 2666
1871 ---> 2667
1884 ---> 2668
1886 ---> 2669
1903 ---> 2670
1908 ---> 2671
1913 ---> 2672
1926 ---> 2673
1931 ---> 2674
1947 ---> 2675
1950 ---> 2676
1960 ---> 2677
1961 ---> 2678
1964 ---> 2679
1973 ---> 2680
1974 ---> 2681
1975 ---> 2682
1977 ---> 2683
1984 ---> 2684
1987 ---> 2685
1993 ---> 2686
1994 ---> 2687
2007 ---> 2688
2014 ---> 2689
2016 ---> 2690
2025 ---> 2691
2031 ---> 2692
2033 ---> 2693
2050 ---> 2694
2054 ---> 2695
2070 ---> 2696
2076 ---> 2697
2081 ---> 2698
2082 ---> 2699
2096 ---> 2700
2113 ---> 2701
2117 ---> 2702
2127 ---> 2703
2130 ---> 2704
2136 ---> 2705
2144 ---> 2706
2155 ---> 2707
2156 ---> 2708
2157 ---> 2709
2161 ---> 2710
2169 ---> 2711
2176 ---> 2712
2188 ---> 2713
2189 ---> 2714
2190 ---> 2715
2191 ---> 2716
2192 ---> 2717
2193 ---> 2718
2194 ---> 2719
2195 ---> 2720
2196 ---> 2721
2197 ---> 2722
2198 ---> 2723
2199 ---> 2724
2200 ---> 2725
2201 ---> 2726
2208 ---> 2727
2210 ---> 2728
2214 ---> 2729
2216 ---> 2730
2222 ---> 2731
2223 ---> 2732
2224 ---> 2733
2225 ---> 2734
2228 ---> 2735
2230 ---> 2736
2231 ---> 2737
2232 ---> 2738
2233 ---> 2739
2238 ---> 2740
2245 ---> 2741
2246 ---> 2742
2247 ---> 2743
2252 ---> 2744
2257 ---> 2745
2260 ---> 2746
2261 ---> 2747
2262 ---> 2748
2263 ---> 2749
2265 ---> 2750
2266 ---> 2751
2267 ---> 2752
2268 ---> 2753
2270 ---> 2754
2273 ---> 2755
2279 ---> 2756
2280 ---> 2757
2281 ---> 2758
2283 ---> 2759
2284 ---> 2760
2294 ---> 2761
2301 ---> 2762
2302 ---> 2763
2303 ---> 2764
2306 ---> 2765
2307 ---> 2766
2313 ---> 2767
2319 ---> 2768
2322 ---> 2769
2327 ---> 2770
2334 ---> 2771
2335 ---> 2772
2336 ---> 2773
2338 ---> 2774
2339 ---> 2775
2341 ---> 2776
2344 ---> 2777
2347 ---> 2778
2349 ---> 2779
2358 ---> 2780
2360 ---> 2781
2376 ---> 2782
2377 ---> 2783
2380 ---> 2784
2398 ---> 2785
2405 ---> 2786
2407 ---> 2787
2412 ---> 2788
2413 ---> 2789
2432 ---> 2790
2435 ---> 2791
2437 ---> 2792
2438 ---> 2793
2443 ---> 2794
2449 ---> 2795
2451 ---> 2796
2473 ---> 2797
2491 ---> 2798
2496 ---> 2799
2497 ---> 2800
2502 ---> 2801
2506 ---> 2802
2512 ---> 2803
2513 ---> 2804
2516 ---> 2805
2519 ---> 2806
2523 ---> 2807
2531 ---> 2808
2533 ---> 2809
2559 ---> 2810
2562 ---> 2811
2563 ---> 2812
2570 ---> 2813
2586 ---> 2814
2588 ---> 2815
2597 ---> 2816
2601 ---> 2817
2606 ---> 2818
2611 ---> 2819
2613 ---> 2820
2617 ---> 2821
2624 ---> 2822
2647 ---> 2823
2649 ---> 2824
2655 ---> 2825
2657 ---> 2826
2658 ---> 2827
2661 ---> 2828
2663 ---> 2829
2682 ---> 2830
2690 ---> 2831
2697 ---> 2832
2707 ---> 2833
2709 ---> 2834
2715 ---> 2835
2716 ---> 2836
2719 ---> 2837
2743 ---> 2838
2759 ---> 2839
2765 ---> 2840
2768 ---> 2841
2794 ---> 2842
2806 ---> 2843
2808 ---> 2844
2811 ---> 2845
2817 ---> 2846
2819 ---> 2847
2820 ---> 2848
2828 ---> 2849
2845 ---> 2850
2852 ---> 2851
2855 ---> 2852
2862 ---> 2853
2868 ---> 2854
2875 ---> 2855
2876 ---> 2856
2892 ---> 2857
2897 ---> 2858
2898 ---> 2859
2943 ---> 2860
2947 ---> 2861
2954 ---> 2862
2962 ---> 2863
2963 ---> 2864
2969 ---> 2865
2981 ---> 2866
2983 ---> 2867
2986 ---> 2868
2990 ---> 2869
3008 ---> 2870
3012 ---> 2871
3013 ---> 2872
3014 ---> 2873
3015 ---> 2874
3023 ---> 2875
3030 ---> 2876
3044 ---> 2877
3049 ---> 2878
3054 ---> 2879
3061 ---> 2880
3067 ---> 2881
3072 ---> 2882
3073 ---> 2883
3077 ---> 2884
3078 ---> 2885
3079 ---> 2886
3081 ---> 2887
3085 ---> 2888
3092 ---> 2889
3094 ---> 2890
3095 ---> 2891
3100 ---> 2892
3107 ---> 2893
3111 ---> 2894
3117 ---> 2895
3122 ---> 2896
3123 ---> 2897
3126 ---> 2898
3127 ---> 2899
3128 ---> 2900
3131 ---> 2901
3133 ---> 2902
3138 ---> 2903
3150 ---> 2904
3153 ---> 2905
3155 ---> 2906
3174 ---> 2907
3175 ---> 2908
3176 ---> 2909
3177 ---> 2910
3185 ---> 2911
3189 ---> 2912
3190 ---> 2913
3192 ---> 2914
3193 ---> 2915
3213 ---> 2916
3220 ---> 2917
3226 ---> 2918
3227 ---> 2919
3232 ---> 2920
3233 ---> 2921
3243 ---> 2922
3260 ---> 2923
3264 ---> 2924
3265 ---> 2925
3269 ---> 2926
3275 ---> 2927
3276 ---> 2928
3278 ---> 2929
3279 ---> 2930
3280 ---> 2931
3281 ---> 2932
3297 ---> 2933
3301 ---> 2934
3303 ---> 2935
3308 ---> 2936
3311 ---> 2937
3314 ---> 2938
3315 ---> 2939
3323 ---> 2940
3325 ---> 2941
3332 ---> 2942
3333 ---> 2943
3344 ---> 2944
3354 ---> 2945
3358 ---> 2946
3360 ---> 2947
3361 ---> 2948
3362 ---> 2949
3369 ---> 2950
3373 ---> 2951
3377 ---> 2952
3381 ---> 2953
3391 ---> 2954
3396 ---> 2955
3397 ---> 2956
3399 ---> 2957
3400 ---> 2958
3407 ---> 2959
3410 ---> 2960
3418 ---> 2961
3427 ---> 2962
3428 ---> 2963
3429 ---> 2964
3430 ---> 2965
3446 ---> 2967
3447 ---> 2968
3455 ---> 2969
3457 ---> 2970
3458 ---> 2971
3461 ---> 2972
3462 ---> 2973
3468 ---> 2974
3469 ---> 2975
3470 ---> 2976
3474 ---> 2977
3476 ---> 2978
3489 ---> 2979
3490 ---> 2980
3494 ---> 2981
3499 ---> 2982
3502 ---> 2983
3503 ---> 2984
3506 ---> 2985
3507 ---> 2986
3510 ---> 2987
3511 ---> 2988
3516 ---> 2989
3521 ---> 2990
3527 ---> 2991
3528 ---> 2992
3531 ---> 2993
3546 ---> 2994
3548 ---> 2995
3556 ---> 2996
3561 ---> 2997
3566 ---> 2998
3577 ---> 2999
3579 ---> 3000
3580 ---> 3001
3584 ---> 3002
3592 ---> 3003
3593 ---> 3004
3597 ---> 3005
3598 ---> 3006
3603 ---> 3007
3606 ---> 3008
3607 ---> 3010
3609 ---> 3011
3610 ---> 3012
3613 ---> 3013
3614 ---> 3014
3615 ---> 3015
3623 ---> 3016
3625 ---> 3017
3626 ---> 3018
3627 ---> 3019
3628 ---> 3020
3629 ---> 3021
3630 ---> 3022
3631 ---> 3023
3632 ---> 3024
3633 ---> 3025
3634 ---> 3026
3636 ---> 3027
3638 ---> 3028
3642 ---> 3029
3647 ---> 3030
3649 ---> 3031
3663 ---> 3032
3668 ---> 3033
3671 ---> 3034
3673 ---> 3035
3680 ---> 3037
3684 ---> 3038
3690 ---> 3039
3693 ---> 3040
3701 ---> 3041
3702 ---> 3042
3703 ---> 3043
3706 ---> 3044
3709 ---> 3045
3712 ---> 3046
3718 ---> 3047
3720 ---> 3048
3723 ---> 3049
3726 ---> 3050
3734 ---> 3051
3742 ---> 3052
3751 ---> 3053
3754 ---> 3054
3756 ---> 3055
3757 ---> 3056
3758 ---> 3057
3762 ---> 3058
3764 ---> 3059
3765 ---> 3060
3768 ---> 3061
3770 ---> 3062
3773 ---> 3063
3777 ---> 3064
3791 ---> 3065
3792 ---> 3066
3793 ---> 3067
3799 ---> 3068
3802 ---> 3069
3805 ---> 3070
3807 ---> 3071
3814 ---> 3072
3815 ---> 3073
3816 ---> 3074
3817 ---> 3075
3818 ---> 3076
3819 ---> 3077
3822 ---> 3078
3823 ---> 3079
3830 ---> 3080
3837 ---> 3081
3839 ---> 3082
3841 ---> 3083
3843 ---> 1466
3845 ---> 3084
3850 ---> 3085
3852 ---> 3086
3856 ---> 3087
3875 ---> 3088
3877 ---> 3089
3881 ---> 3090
3884 ---> 3091
3885 ---> 3092
3889 ---> 3093
3890 ---> 3094
3896 ---> 3095
3902 ---> 3096
3907 ---> 3097
3910 ---> 3098
3911 ---> 3099
3930 ---> 3100
3931 ---> 3101
3940 ---> 3102
3944 ---> 3103
3945 ---> 3104
3949 ---> 3105
3951 ---> 3106
3953 ---> 3107
3965 ---> 3108
3972 ---> 3109
3976 ---> 3110
3977 ---> 3111
3980 ---> 3112
3987 ---> 3113
3997 ---> 3114
3998 ---> 3115
3999 ---> 3116
4000 ---> 3117
4001 ---> 3118
4003 ---> 3119
4007 ---> 3120
4008 ---> 3121
4009 ---> 3122
4011 ---> 3123
4016 ---> 3124
4022 ---> 3125
4026 ---> 3126
4027 ---> 3127
4030 ---> 3128
4034 ---> 3129
4037 ---> 3130
4038 ---> 3131
4039 ---> 3132
4045 ---> 3133
4047 ---> 3134
4049 ---> 3135
4052 ---> 3136
4053 ---> 3137
4054 ---> 3138
4055 ---> 3139
4058 ---> 3140
4068 ---> 3141
4071 ---> 3142
4078 ---> 3143
4080 ---> 3144
4081 ---> 3145
4082 ---> 3146
4083 ---> 3147
4088 ---> 3148
4097 ---> 3149
4098 ---> 3150
4099 ---> 3151
4100 ---> 3152
4101 ---> 3153
4102 ---> 3154
4109 ---> 3155
4112 ---> 3156
4113 ---> 3157
4125 ---> 3158
4126 ---> 3159
4130 ---> 3160
4134 ---> 3161
4139 ---> 3162
4141 ---> 3163
4143 ---> 3164
4144 ---> 3165
4146 ---> 3166
4148 ---> 3167
4149 ---> 3168
4152 ---> 3169
4153 ---> 3170
4154 ---> 3171
4158 ---> 3172
4159 ---> 3173
4161 ---> 3174
4169 ---> 3175
4173 ---> 3176
4175 ---> 3177
4177 ---> 3178
4178 ---> 3179
4179 ---> 3180
4181 ---> 3181
4183 ---> 3182
4185 ---> 3183
4186 ---> 3184
4187 ---> 3185
4188 ---> 3186
4192 ---> 3187
4193 ---> 3188
4194 ---> 3189
4195 ---> 3190
4197 ---> 3191
4200 ---> 3192
4201 ---> 3193
4202 ---> 3194
4207 ---> 3195
4208 ---> 3196
4209 ---> 3197
4216 ---> 3198
4217 ---> 3199
4224 ---> 3200
4228 ---> 3201
4229 ---> 3202
4230 ---> 3203
4232 ---> 3204
4233 ---> 3205
4236 ---> 3206
4245 ---> 3207
4247 ---> 3208
4248 ---> 3209
4249 ---> 3210
4256 ---> 3211
4262 ---> 3212
4289 ---> 3213
4291 ---> 3214
4293 ---> 3215
4294 ---> 3216
4295 ---> 3217
4296 ---> 3218
4298 ---> 3219
4299 ---> 3220
4303 ---> 3221
4304 ---> 3222
4308 ---> 3223
4309 ---> 3224
4311 ---> 3225
4312 ---> 3226
4313 ---> 3227
4314 ---> 3228
4315 ---> 3229
4317 ---> 3230
4318 ---> 3231
4319 ---> 3232
4320 ---> 3233
4322 ---> 3234
4324 ---> 3235
4326 ---> 3236
4328 ---> 3237
4330 ---> 3238
4334 ---> 3239
4343 ---> 3240
4344 ---> 3241
4350 ---> 3242
4352 ---> 3243
4361 ---> 3244
4362 ---> 3245
4365 ---> 3246
4366 ---> 3247
4368 ---> 3248
4370 ---> 3249
4372 ---> 3250
4375 ---> 3251
4376 ---> 3252
4378 ---> 3253
4379 ---> 3254
4384 ---> 3255
4386 ---> 3256
4388 ---> 3257
4390 ---> 3258
4391 ---> 3259
4408 ---> 3260
4413 ---> 3261
4417 ---> 3262
4419 ---> 3263
4422 ---> 3264
4425 ---> 3265
4426 ---> 3266
4427 ---> 3267
4428 ---> 3268
4435 ---> 3269
4446 ---> 3270
4453 ---> 3271
4455 ---> 3272
4459 ---> 3273
4460 ---> 3274
4472 ---> 3275
4473 ---> 3276
4475 ---> 3277
4481 ---> 3278
4484 ---> 3279
4492 ---> 3280
4494 ---> 3281
4496 ---> 3282
4497 ---> 3283
4506 ---> 3284
4511 ---> 3285
4512 ---> 3286
4517 ---> 3287
4525 ---> 3288
4529 ---> 3289
4530 ---> 3290
4533 ---> 3291
4537 ---> 3292
4541 ---> 3293
4543 ---> 3294
4556 ---> 3295
4558 ---> 3296
4559 ---> 3297
4560 ---> 3298
4561 ---> 3299
4565 ---> 3300
4573 ---> 3301
4578 ---> 3302
4579 ---> 3303
4587 ---> 3304
4593 ---> 3305
4596 ---> 3306
4622 ---> 3307
4636 ---> 3308
4637 ---> 3309
4638 ---> 3310
4639 ---> 3311
4640 ---> 3312
4641 ---> 3313
4642 ---> 3314
4646 ---> 3315
4649 ---> 3316
4654 ---> 3317
4657 ---> 3318
4664 ---> 3319
4697 ---> 3320
4698 ---> 3321
4703 ---> 3322
4707 ---> 3323
4709 ---> 3324
4710 ---> 3325
4716 ---> 3326
4722 ---> 3327
4724 ---> 3328
4725 ---> 3329
4727 ---> 3330
4729 ---> 3331
4730 ---> 3332
4731 ---> 3333
4732 ---> 3334
4742 ---> 3335
4744 ---> 3336
4745 ---> 3337
4747 ---> 3338
4752 ---> 3339
4755 ---> 3340
4768 ---> 3341
4769 ---> 3342
4770 ---> 3343
4771 ---> 3344
4772 ---> 3345
4773 ---> 3346
4774 ---> 3347
4775 ---> 3348
4776 ---> 3349
4777 ---> 3350
4778 ---> 3351
4779 ---> 3352
4780 ---> 3353
4781 ---> 3354
4782 ---> 3355
4783 ---> 3356
4784 ---> 3357
4785 ---> 3358
4786 ---> 3359
4787 ---> 3360
4788 ---> 3361
4789 ---> 3362
4795 ---> 3363
4797 ---> 3364
4802 ---> 3365
4809 ---> 3366
4810 ---> 3367
4811 ---> 3368
4814 ---> 3369
4819 ---> 3370
4823 ---> 3371
4828 ---> 3372
4830 ---> 3373
4833 ---> 3374
4836 ---> 3375
4840 ---> 3376
4843 ---> 3377
4849 ---> 3378
4854 ---> 3379
4858 ---> 3380
4859 ---> 3381
4865 ---> 3382
4866 ---> 3383
4868 ---> 3384
4873 ---> 3385
4879 ---> 3386
4880 ---> 3387
4884 ---> 3388
4890 ---> 3389
4894 ---> 3390
4900 ---> 3391
4903 ---> 3392
4904 ---> 3393
4915 ---> 3394
4921 ---> 3395
4923 ---> 3396
4935 ---> 3397
4937 ---> 3398
4943 ---> 3399
4944 ---> 3400
4945 ---> 3401
4947 ---> 3402
4949 ---> 3403
4951 ---> 3404
4952 ---> 3405
4957 ---> 3406
4958 ---> 3407
4962 ---> 3408
4963 ---> 3409
4969 ---> 3410
4973 ---> 3411
4975 ---> 3412
4978 ---> 3413
4986 ---> 3414
4987 ---> 3415
4997 ---> 3416
5002 ---> 3417
5019 ---> 3418
5020 ---> 3419
5021 ---> 3420
5028 ---> 3421
5034 ---> 3422
5040 ---> 3423
5044 ---> 3424
5045 ---> 3425
5047 ---> 3426
5048 ---> 3427
5051 ---> 3428
5052 ---> 3429
5053 ---> 3430
5056 ---> 3431
5058 ---> 3432
5061 ---> 3433
5062 ---> 3434
5064 ---> 3435
5066 ---> 3436
5067 ---> 3437
5068 ---> 3438
5073 ---> 3439
5074 ---> 3440
5076 ---> 3441
5082 ---> 3442
5083 ---> 3443
5099 ---> 3444
5101 ---> 3445
5102 ---> 3446
5105 ---> 3447
5106 ---> 3448
5107 ---> 3449
5108 ---> 3450
5112 ---> 3451
5113 ---> 3452
5120 ---> 3453
5130 ---> 3454
5132 ---> 3455
5135 ---> 3456
5174 ---> 3457
5175 ---> 3458
5176 ---> 3459
5179 ---> 3460
5180 ---> 3461
5181 ---> 3462
5182 ---> 3463
5186 ---> 3464
5197 ---> 3465
5199 ---> 3466
5201 ---> 3467
5205 ---> 3468
5206 ---> 3469
5213 ---> 3470
5219 ---> 3471
5221 ---> 3472
5224 ---> 3473
5225 ---> 3474
5226 ---> 3475
5234 ---> 3476
5235 ---> 3477
5244 ---> 3478
5253 ---> 3479
5254 ---> 3480
5262 ---> 3481
5297 ---> 3482
5303 ---> 3483
5317 ---> 3484
5320 ---> 3485
5323 ---> 3486
5329 ---> 3487
5337 ---> 3488
5341 ---> 3489
5366 ---> 3490
5368 ---> 3491
5379 ---> 3492
5396 ---> 3493
5399 ---> 3494
5427 ---> 3495
5435 ---> 3496
5441 ---> 3497
5446 ---> 3498
5447 ---> 3499
5449 ---> 3500
5454 ---> 3501
5456 ---> 3502
5458 ---> 3503
5481 ---> 3504
5483 ---> 3506
5485 ---> 3507
5487 ---> 3508
5489 ---> 3509
5490 ---> 3510
5494 ---> 3511
5511 ---> 3512
5518 ---> 3513
5529 ---> 3514
5530 ---> 3515
5551 ---> 3516
5553 ---> 3517
5556 ---> 3518
5562 ---> 3519
5572 ---> 3520
5580 ---> 3521
5588 ---> 3522
5628 ---> 3523
5643 ---> 3524
5645 ---> 3525
5661 ---> 3526
5662 ---> 3527
5663 ---> 3528
5665 ---> 3529
5666 ---> 3530
5675 ---> 3531
5694 ---> 3532
5716 ---> 3533
5728 ---> 3534
5731 ---> 3535
5732 ---> 3536
5739 ---> 3537
5750 ---> 3538
5753 ---> 3539
5759 ---> 3540
5780 ---> 3541
5790 ---> 3542
5800 ---> 3543
5812 ---> 3544
5813 ---> 3545
5817 ---> 3546
5820 ---> 3547
5832 ---> 3548
5852 ---> 3549
5870 ---> 3550
5872 ---> 3551
5874 ---> 3552
5903 ---> 3553
5932 ---> 3554
5940 ---> 3555
5941 ---> 3556
5946 ---> 3557
5969 ---> 3558
5975 ---> 3559
5996 ---> 3560
6050 ---> 3561
6051 ---> 3562
6084 ---> 3563
6100 ---> 3564
6114 ---> 3565
6116 ---> 3566
6117 ---> 3567
6175 ---> 3568
6189 ---> 3569
6197 ---> 3570
6199 ---> 3571
6210 ---> 3572
6212 ---> 3573
6222 ---> 3574
6236 ---> 3575
6262 ---> 3576
6269 ---> 3577
6286 ---> 3578
6337 ---> 3579
6338 ---> 3580
6341 ---> 3581
6345 ---> 3582
6354 ---> 3583
6355 ---> 3585
6361 ---> 3586
6370 ---> 3587
6380 ---> 3588
6381 ---> 3589
6384 ---> 3590
6388 ---> 3591
6396 ---> 3592
6409 ---> 3593
6412 ---> 3594
6420 ---> 3595
6436 ---> 3596
6507 ---> 3597
6508 ---> 3598
6509 ---> 3599
6511 ---> 3600
6516 ---> 3601
6518 ---> 3602
6521 ---> 3603
6532 ---> 3604
6550 ---> 3605
6552 ---> 3606
6587 ---> 3607
6588 ---> 3608
6589 ---> 3609
6591 ---> 3610
6592 ---> 3611
6593 ---> 3612
6594 ---> 3613
6595 ---> 3614
6607 ---> 3615
6610 ---> 3616
6635 ---> 3617
6651 ---> 3618
6662 ---> 3619
6669 ---> 3620
6678 ---> 3621
6679 ---> 3622
6687 ---> 3623
6688 ---> 3624
6694 ---> 3625
6696 ---> 3626
6711 ---> 3627
6713 ---> 3628
6727 ---> 3629
6728 ---> 3630
6730 ---> 3631
6762 ---> 3632
6764 ---> 3633
6768 ---> 3634
6782 ---> 3635
6786 ---> 3636
6793 ---> 3637
6810 ---> 3638
6813 ---> 3639
6825 ---> 3640
6826 ---> 3641
6832 ---> 3642
6838 ---> 3643
6839 ---> 3644
6858 ---> 3645
6865 ---> 3646
6877 ---> 3647
6881 ---> 3648
6883 ---> 3649
6893 ---> 3650
6901 ---> 3651
6908 ---> 3652
6912 ---> 3653
6914 ---> 3654
6922 ---> 3655
6934 ---> 3656
6938 ---> 1382
6942 ---> 3657
6958 ---> 3658
6960 ---> 3659
6961 ---> 3660
6966 ---> 3661
6970 ---> 3662
6972 ---> 3663
6991 ---> 3664
7018 ---> 3665
7031 ---> 3666
7039 ---> 3667
7046 ---> 3668
7062 ---> 3669
7063 ---> 3670
7065 ---> 3671
7070 ---> 3672
7112 ---> 3673
7114 ---> 3674
7118 ---> 3675
7123 ---> 3676
7128 ---> 3677
7137 ---> 3678
7147 ---> 3679
7148 ---> 3680
7149 ---> 3681
7150 ---> 3682
7173 ---> 3683
7212 ---> 3684
7218 ---> 3685
7234 ---> 3686
7250 ---> 3687
7253 ---> 3688
7254 ---> 3689
7269 ---> 3690
7274 ---> 3691
7290 ---> 3692
7317 ---> 3693
7327 ---> 3694
7340 ---> 3695
7343 ---> 3696
7344 ---> 3697
7348 ---> 3698
7368 ---> 3699
7377 ---> 3700
7396 ---> 3701
7412 ---> 3702
7470 ---> 3703
7491 ---> 3704
7516 ---> 3705
7531 ---> 3706
7532 ---> 3707
7534 ---> 3708
7539 ---> 3709
7546 ---> 3710
7550 ---> 3711
7557 ---> 3712
7567 ---> 3713
7572 ---> 3714
7575 ---> 3715
7585 ---> 3716
7597 ---> 3717
7609 ---> 3718
7610 ---> 3719
7629 ---> 3720
7635 ---> 3721
7651 ---> 3722
7653 ---> 3723
7659 ---> 3724
7691 ---> 3725
7693 ---> 3726
7694 ---> 3727
7698 ---> 3728
7700 ---> 3729
7728 ---> 3730
7730 ---> 3731
7732 ---> 3732
7737 ---> 3733
7738 ---> 3734
7746 ---> 3735
7763 ---> 3736
7772 ---> 3737
7778 ---> 3738
7794 ---> 3739
7800 ---> 3740
7809 ---> 3741
7817 ---> 3742
7818 ---> 3743
7819 ---> 3744
7825 ---> 3745
7846 ---> 3746
7848 ---> 3747
7849 ---> 3748
7851 ---> 3749
7860 ---> 831
7870 ---> 3750
7872 ---> 3751
7913 ---> 3752
7933 ---> 3753
7940 ---> 3754
7954 ---> 3755
8024 ---> 3756
8030 ---> 3757
8100 ---> 3758
8112 ---> 3759
8125 ---> 30813
8170 ---> 3760
8222 ---> 3761
8233 ---> 3762
8235 ---> 1375
8244 ---> 3763
8252 ---> 3764
8254 ---> 3765
8275 ---> 3766
8276 ---> 3767
8277 ---> 3768
8278 ---> 3769
8285 ---> 3770
8297 ---> 3771
8298 ---> 3772
8299 ---> 3773
8305 ---> 3774
8306 ---> 3775
8307 ---> 3776
8310 ---> 3777
8314 ---> 3778
8347 ---> 3779
8354 ---> 3780
8356 ---> 3781
8363 ---> 3782
8368 ---> 3783
8379 ---> 3784
8398 ---> 3785
8404 ---> 3786
8422 ---> 3787
8424 ---> 3788
8427 ---> 3789
8438 ---> 3790
8461 ---> 3791
8466 ---> 3792
8488 ---> 3793
8490 ---> 3794
8495 ---> 3795
8496 ---> 3796
8498 ---> 3797
8504 ---> 3798
8505 ---> 3799
8507 ---> 3800
8511 ---> 3801
8514 ---> 3802
8521 ---> 3803
8522 ---> 3804
8525 ---> 3805
8529 ---> 3806
8538 ---> 3807
8539 ---> 2402
8546 ---> 3808
8559 ---> 3809
8570 ---> 3810
8573 ---> 3811
8588 ---> 3812
8589 ---> 3813
8597 ---> 3814
8601 ---> 3815
8620 ---> 3816
8626 ---> 3817
8643 ---> 2320
8669 ---> 2400
8681 ---> 3818
8682 ---> 3819
8683 ---> 3820
8686 ---> 906
8691 ---> 3821
8692 ---> 3822
8694 ---> 3823
8698 ---> 3824
8703 ---> 3825
8734 ---> 3826
8735 ---> 3827
8763 ---> 3828
8764 ---> 3829
8766 ---> 3830
8774 ---> 3831
8776 ---> 3832
8779 ---> 3833
8805 ---> 3834
8829 ---> 3835
8830 ---> 3836
8832 ---> 3837
8850 ---> 3838
8852 ---> 3839
8853 ---> 1362
8860 ---> 3840
8861 ---> 3841
8862 ---> 3842
8863 ---> 3843
8865 ---> 3844
8871 ---> 3845
8882 ---> 3846
8901 ---> 3847
8902 ---> 3848
8903 ---> 3849
8913 ---> 641
8926 ---> 640
8927 ---> 639
8932 ---> 3850
8933 ---> 3851
8938 ---> 638
8941 ---> 3852
8942 ---> 3853
8944 ---> 1325
8951 ---> 3854
8986 ---> 2000
| -1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/tests/JIT/Directed/StructABI/CMakeLists.txt
|
project (StructABILib)
include_directories(${INC_PLATFORM_DIR})
if(CLR_CMAKE_HOST_WIN32)
add_compile_options(/TC) # compile all files as C
else()
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=hidden")
endif()
# add the executable
add_library (StructABILib SHARED StructABI.c)
# add the install targets
install (TARGETS StructABILib DESTINATION bin)
|
project (StructABILib)
include_directories(${INC_PLATFORM_DIR})
if(CLR_CMAKE_HOST_WIN32)
add_compile_options(/TC) # compile all files as C
else()
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=hidden")
endif()
# add the executable
add_library (StructABILib SHARED StructABI.c)
# add the install targets
install (TARGETS StructABILib DESTINATION bin)
| -1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/libraries/System.Private.Xml/tests/Xslt/TestFiles/TestData/xsltc/baseline/fft7.txt
|
<?xml version="1.0" encoding="utf-8"?>Hello, world!
|
<?xml version="1.0" encoding="utf-8"?>Hello, world!
| -1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/libraries/System.Private.Xml/tests/Xslt/TestFiles/TestData/XsltApiV2/baseline/copy-of.txt
|
<out>
<title>XML Primer</title>
<title>XSLT Basics</title>
<title>Advanced XSLT</title>
</out>
|
<out>
<title>XML Primer</title>
<title>XSLT Basics</title>
<title>Advanced XSLT</title>
</out>
| -1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/libraries/System.Private.Xml/tests/Xslt/TestFiles/TestData/xsltc/baseline/oft22.txt
|
<?xml version="1.0" encoding="utf-8"?>Hello, world!
|
<?xml version="1.0" encoding="utf-8"?>Hello, world!
| -1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/libraries/System.Private.Xml/tests/Xslt/TestFiles/TestData/xsltc/baseline/infft11a.txt
|
@.\infft11b.txt
|
@.\infft11b.txt
| -1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/libraries/System.Private.Xml/tests/Xslt/TestFiles/TestData/xsltc/baseline/pft9.txt
|
<?xml version="1.0" encoding="utf-8"?>Hello, world!
|
<?xml version="1.0" encoding="utf-8"?>Hello, world!
| -1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/tests/Interop/PrimitiveMarshalling/UIntPtr/CMakeLists.txt
|
project (UIntPtrNative)
include ("${CLR_INTEROP_TEST_ROOT}/Interop.cmake")
set(SOURCES UIntPtrNative.cpp )
# add the executable
add_library (UIntPtrNative SHARED ${SOURCES})
target_link_libraries(UIntPtrNative ${LINK_LIBRARIES_ADDITIONAL})
# add the install targets
install (TARGETS UIntPtrNative DESTINATION bin)
|
project (UIntPtrNative)
include ("${CLR_INTEROP_TEST_ROOT}/Interop.cmake")
set(SOURCES UIntPtrNative.cpp )
# add the executable
add_library (UIntPtrNative SHARED ${SOURCES})
target_link_libraries(UIntPtrNative ${LINK_LIBRARIES_ADDITIONAL})
# add the install targets
install (TARGETS UIntPtrNative DESTINATION bin)
| -1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/coreclr/nativeaot/Bootstrap/CMakeLists.txt
|
add_subdirectory(base)
add_subdirectory(dll)
|
add_subdirectory(base)
add_subdirectory(dll)
| -1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/native/corehost/test/typelibs/CMakeLists.txt
|
# Get the current list of definitions to pass to midl
get_compile_definitions(MIDL_DEFINITIONS)
get_include_directories(MIDL_INCLUDE_DIRECTORIES)
find_program(MIDL midl.exe)
function(compile_idl idl_file tlb_out)
# Compile IDL file using MIDL
set(IDL_SOURCE ${idl_file})
get_filename_component(IDL_NAME ${IDL_SOURCE} NAME_WE)
set(tlb_out_local "${CMAKE_CURRENT_BINARY_DIR}/${IDL_NAME}.tlb")
set("${tlb_out}" "${tlb_out_local}" PARENT_SCOPE)
add_custom_command(
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${IDL_NAME}_i.c ${CMAKE_CURRENT_BINARY_DIR}/${IDL_NAME}.h ${tlb_out_local}
COMMAND ${MIDL} ${MIDL_INCLUDE_DIRECTORIES}
/h ${CMAKE_CURRENT_BINARY_DIR}/${IDL_NAME}.h ${MIDL_DEFINITIONS}
/out ${CMAKE_CURRENT_BINARY_DIR}
/tlb ${tlb_out_local}
${IDL_SOURCE}
DEPENDS ${IDL_SOURCE}
COMMENT "Compiling ${IDL_SOURCE}")
endfunction()
compile_idl(${CMAKE_CURRENT_SOURCE_DIR}/Server.idl Server_tlb)
compile_idl(${CMAKE_CURRENT_SOURCE_DIR}/Nested.idl Nested_tlb)
add_custom_target(typelibs ALL DEPENDS "${Server_tlb}" "${Nested_tlb}")
install(FILES "${Server_tlb}" "${Nested_tlb}" DESTINATION corehost_test)
|
# Get the current list of definitions to pass to midl
get_compile_definitions(MIDL_DEFINITIONS)
get_include_directories(MIDL_INCLUDE_DIRECTORIES)
find_program(MIDL midl.exe)
function(compile_idl idl_file tlb_out)
# Compile IDL file using MIDL
set(IDL_SOURCE ${idl_file})
get_filename_component(IDL_NAME ${IDL_SOURCE} NAME_WE)
set(tlb_out_local "${CMAKE_CURRENT_BINARY_DIR}/${IDL_NAME}.tlb")
set("${tlb_out}" "${tlb_out_local}" PARENT_SCOPE)
add_custom_command(
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${IDL_NAME}_i.c ${CMAKE_CURRENT_BINARY_DIR}/${IDL_NAME}.h ${tlb_out_local}
COMMAND ${MIDL} ${MIDL_INCLUDE_DIRECTORIES}
/h ${CMAKE_CURRENT_BINARY_DIR}/${IDL_NAME}.h ${MIDL_DEFINITIONS}
/out ${CMAKE_CURRENT_BINARY_DIR}
/tlb ${tlb_out_local}
${IDL_SOURCE}
DEPENDS ${IDL_SOURCE}
COMMENT "Compiling ${IDL_SOURCE}")
endfunction()
compile_idl(${CMAKE_CURRENT_SOURCE_DIR}/Server.idl Server_tlb)
compile_idl(${CMAKE_CURRENT_SOURCE_DIR}/Nested.idl Nested_tlb)
add_custom_target(typelibs ALL DEPENDS "${Server_tlb}" "${Nested_tlb}")
install(FILES "${Server_tlb}" "${Nested_tlb}" DESTINATION corehost_test)
| -1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/coreclr/pal/tests/palsuite/file_io/GetFileAttributesExW/test1/.hidden_directory/anchor.txt
|
This file is here so this directory gets checked out even with the -P
option.
|
This file is here so this directory gets checked out even with the -P
option.
| -1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/tests/Interop/StringMarshalling/VBByRefStr/CMakeLists.txt
|
project (VBByRefStrNative)
include ("${CLR_INTEROP_TEST_ROOT}/Interop.cmake")
set(SOURCES VBByRefStrNative.cpp )
# add the executable
add_library (VBByRefStrNative SHARED ${SOURCES})
target_link_libraries(VBByRefStrNative ${LINK_LIBRARIES_ADDITIONAL})
# add the install targets
install (TARGETS VBByRefStrNative DESTINATION bin)
|
project (VBByRefStrNative)
include ("${CLR_INTEROP_TEST_ROOT}/Interop.cmake")
set(SOURCES VBByRefStrNative.cpp )
# add the executable
add_library (VBByRefStrNative SHARED ${SOURCES})
target_link_libraries(VBByRefStrNative ${LINK_LIBRARIES_ADDITIONAL})
# add the install targets
install (TARGETS VBByRefStrNative DESTINATION bin)
| -1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/libraries/System.Private.Xml/tests/Xslt/TestFiles/TestData/xsltc/baseline/cnt11.txt
|
<?xml version="1.0" encoding="utf-8"?>Hello, world!
|
<?xml version="1.0" encoding="utf-8"?>Hello, world!
| -1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/tests/Interop/PrimitiveMarshalling/EnumMarshalling/CMakeLists.txt
|
project (MarshalEnumNative)
set(SOURCES MarshalEnumNative.cpp )
include ("${CLR_INTEROP_TEST_ROOT}/Interop.cmake")
# add the executable
add_library (MarshalEnumNative SHARED ${SOURCES})
target_link_libraries(MarshalEnumNative ${LINK_LIBRARIES_ADDITIONAL})
# add the install targets
install (TARGETS MarshalEnumNative DESTINATION bin)
|
project (MarshalEnumNative)
set(SOURCES MarshalEnumNative.cpp )
include ("${CLR_INTEROP_TEST_ROOT}/Interop.cmake")
# add the executable
add_library (MarshalEnumNative SHARED ${SOURCES})
target_link_libraries(MarshalEnumNative ${LINK_LIBRARIES_ADDITIONAL})
# add the install targets
install (TARGETS MarshalEnumNative DESTINATION bin)
| -1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/libraries/System.Resources.ResourceManager/src/MatchingRefApiCompatBaseline.txt
|
Compat issues with assembly System.Resources.ResourceManager:
MembersMustExist : Member 'protected System.String System.String System.Resources.ResourceManager.BaseNameField' does not exist in the reference but it does exist in the implementation.
MembersMustExist : Member 'protected System.Resources.IResourceReader System.Resources.IResourceReader System.Resources.ResourceSet.Reader' does not exist in the reference but it does exist in the implementation.
Total Issues: 2
|
Compat issues with assembly System.Resources.ResourceManager:
MembersMustExist : Member 'protected System.String System.String System.Resources.ResourceManager.BaseNameField' does not exist in the reference but it does exist in the implementation.
MembersMustExist : Member 'protected System.Resources.IResourceReader System.Resources.IResourceReader System.Resources.ResourceSet.Reader' does not exist in the reference but it does exist in the implementation.
Total Issues: 2
| -1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/libraries/System.Private.Xml/tests/Xslt/TestFiles/TestData/xsltc/baseline/dft20.txt
|
Microsoft (R) XSLT Compiler version 2.0.60220
for Microsoft (R) Windows (R) 2005 Framework version 2.0.50727
Copyright (C) Microsoft Corporation 2006. All rights reserved.
fatal error : Unrecognized option: '/debug-+-'.
|
Microsoft (R) XSLT Compiler version 2.0.60220
for Microsoft (R) Windows (R) 2005 Framework version 2.0.50727
Copyright (C) Microsoft Corporation 2006. All rights reserved.
fatal error : Unrecognized option: '/debug-+-'.
| -1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/libraries/System.Runtime.Serialization.Xml/tests/TrimmingTests/System.Runtime.Serialization.Xml.TrimmingTests.proj
|
<Project DefaultTargets="Build">
<Import Project="$([MSBuild]::GetPathOfFileAbove(Directory.Build.props))" />
<ItemGroup>
<TestConsoleAppSourceFiles Include="EndToEndTest.cs" />
</ItemGroup>
<Import Project="$([MSBuild]::GetPathOfFileAbove(Directory.Build.targets))" />
</Project>
|
<Project DefaultTargets="Build">
<Import Project="$([MSBuild]::GetPathOfFileAbove(Directory.Build.props))" />
<ItemGroup>
<TestConsoleAppSourceFiles Include="EndToEndTest.cs" />
</ItemGroup>
<Import Project="$([MSBuild]::GetPathOfFileAbove(Directory.Build.targets))" />
</Project>
| -1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/coreclr/vm/eventing/eventpipe/CMakeLists.txt
|
include(FindPythonInterp)
set(CMAKE_INCLUDE_CURRENT_DIR ON)
set(GENERATE_SCRIPT ${CLR_DIR}/scripts/genEventPipe.py)
set(GENERATE_COMMAND ${PYTHON_EXECUTABLE} ${GENERATE_SCRIPT} --man ${EVENT_MANIFEST} --exc ${EVENT_EXCLUSIONS} --intermediate ${CMAKE_CURRENT_BINARY_DIR} ${NONEXTERN_ARG})
execute_process(
COMMAND ${GENERATE_COMMAND} --dry-run
RESULT_VARIABLE GEN_EVENTPIPE_RESULT
OUTPUT_VARIABLE GEN_EVENTPIPE_SOURCE_PATHS
ERROR_VARIABLE GEN_EVENTPIPE_ERRORS
)
if (NOT GEN_EVENTPIPE_RESULT EQUAL 0)
message(FATAL_ERROR "Failed to generate EventPipe: ${GEN_EVENTPIPE_ERRORS}")
endif()
string(REPLACE "\n" ";" GEN_EVENTPIPE_SOURCE_PATHS ${GEN_EVENTPIPE_SOURCE_PATHS}) # turn the outputted list of files into a CMake list
set (GEN_EVENTPIPE_SOURCES "")
foreach(GEN_EVENTPIPE_SOURCE_PATH ${GEN_EVENTPIPE_SOURCE_PATHS})
file(TO_CMAKE_PATH ${GEN_EVENTPIPE_SOURCE_PATH} GEN_EVENTPIPE_SOURCE)
list(APPEND GEN_EVENTPIPE_SOURCES ${GEN_EVENTPIPE_SOURCE})
endforeach()
add_custom_command(OUTPUT ${GEN_EVENTPIPE_SOURCES}
COMMAND ${GENERATE_COMMAND}
DEPENDS ${GENERATE_SCRIPT} ${EVENT_MANIFEST} ${EVENT_EXCLUSIONS})
set (EVENTPIPE_SOURCES "")
set (EVENTPIPE_HEADERS "")
set (CORECLR_EVENTPIPE_SHIM_SOURCES "")
set (CORECLR_EVENTPIPE_SHIM_HEADERS "")
set (SHARED_EVENTPIPE_SOURCE_PATH "${CLR_SRC_NATIVE_DIR}/eventpipe")
set (CORECLR_EVENTPIPE_SHIM_SOURCE_PATH "${CORECLR_EVENTPIPE_SHIM_DIR}")
include (${SHARED_EVENTPIPE_SOURCE_PATH}/CMakeLists.txt)
list(APPEND EVENTPIPE_SOURCES
${SHARED_EVENTPIPE_SOURCES}
${SHARED_DIAGNOSTIC_SERVER_SOURCES}
)
list(APPEND EVENTPIPE_HEADERS
${SHARED_EVENTPIPE_HEADERS}
${SHARED_DIAGNOSTIC_SERVER_HEADERS}
)
addprefix(EVENTPIPE_SOURCES ${SHARED_EVENTPIPE_SOURCE_PATH} "${EVENTPIPE_SOURCES}")
addprefix(EVENTPIPE_HEADERS ${SHARED_EVENTPIPE_SOURCE_PATH} "${EVENTPIPE_HEADERS}")
set_source_files_properties(${SHARED_EVENTPIPE_SOURCE_PATH}/ep-sources.c PROPERTIES COMPILE_DEFINITIONS EP_FORCE_INCLUDE_SOURCE_FILES)
set_source_files_properties(${SHARED_EVENTPIPE_SOURCE_PATH}/ds-sources.c PROPERTIES COMPILE_DEFINITIONS DS_FORCE_INCLUDE_SOURCE_FILES)
set_source_files_properties(${EVENTPIPE_SOURCES} PROPERTIES LANGUAGE CXX)
if(CLR_CMAKE_HOST_UNIX)
if (CMAKE_VERSION VERSION_GREATER 3.11 OR CMAKE_VERSION VERSION_EQUAL 3.11)
set_source_files_properties(${EVENTPIPE_SOURCES} PROPERTIES COMPILE_OPTIONS -xc++)
else(CMAKE_VERSION VERSION_GREATER 3.11 OR CMAKE_VERSION VERSION_EQUAL 3.11)
add_compile_options(-xc++)
endif()
endif(CLR_CMAKE_HOST_UNIX)
list(APPEND CORECLR_EVENTPIPE_SHIM_SOURCES
ep-rt-coreclr.cpp
)
list(APPEND CORECLR_EVENTPIPE_SHIM_HEADERS
ds-rt-coreclr.h
ds-rt-types-coreclr.h
ep-rt-coreclr.h
ep-rt-config-coreclr.h
ep-rt-types-coreclr.h
)
addprefix(CORECLR_EVENTPIPE_SHIM_SOURCES ${CORECLR_EVENTPIPE_SHIM_SOURCE_PATH} "${CORECLR_EVENTPIPE_SHIM_SOURCES}")
addprefix(CORECLR_EVENTPIPE_SHIM_HEADERS ${CORECLR_EVENTPIPE_SHIM_SOURCE_PATH} "${CORECLR_EVENTPIPE_SHIM_HEADERS}")
list(APPEND EVENTPIPE_SOURCES
${CORECLR_EVENTPIPE_SHIM_SOURCES}
${CORECLR_EVENTPIPE_SHIM_HEADERS}
${EVENTPIPE_HEADERS}
${SHARED_EVENTPIPE_CONFIG_HEADERS}
)
add_library_clr(eventpipe_gen_objs OBJECT ${GEN_EVENTPIPE_SOURCES})
target_precompile_headers(eventpipe_gen_objs PRIVATE [["common.h"]])
set_target_properties(eventpipe_gen_objs PROPERTIES LINKER_LANGUAGE CXX)
add_dependencies(eventpipe_gen_objs eventing_headers)
add_library_clr(eventpipe_objs OBJECT ${EVENTPIPE_SOURCES})
target_precompile_headers(eventpipe_objs PRIVATE [["common.h"]])
set_target_properties(eventpipe_objs PROPERTIES LINKER_LANGUAGE CXX)
add_dependencies(eventpipe_objs eventing_headers)
add_library(eventpipe INTERFACE)
target_sources(eventpipe INTERFACE $<TARGET_OBJECTS:eventpipe_gen_objs> $<TARGET_OBJECTS:eventpipe_objs>)
|
include(FindPythonInterp)
set(CMAKE_INCLUDE_CURRENT_DIR ON)
set(GENERATE_SCRIPT ${CLR_DIR}/scripts/genEventPipe.py)
set(GENERATE_COMMAND ${PYTHON_EXECUTABLE} ${GENERATE_SCRIPT} --man ${EVENT_MANIFEST} --exc ${EVENT_EXCLUSIONS} --intermediate ${CMAKE_CURRENT_BINARY_DIR} ${NONEXTERN_ARG})
execute_process(
COMMAND ${GENERATE_COMMAND} --dry-run
RESULT_VARIABLE GEN_EVENTPIPE_RESULT
OUTPUT_VARIABLE GEN_EVENTPIPE_SOURCE_PATHS
ERROR_VARIABLE GEN_EVENTPIPE_ERRORS
)
if (NOT GEN_EVENTPIPE_RESULT EQUAL 0)
message(FATAL_ERROR "Failed to generate EventPipe: ${GEN_EVENTPIPE_ERRORS}")
endif()
string(REPLACE "\n" ";" GEN_EVENTPIPE_SOURCE_PATHS ${GEN_EVENTPIPE_SOURCE_PATHS}) # turn the outputted list of files into a CMake list
set (GEN_EVENTPIPE_SOURCES "")
foreach(GEN_EVENTPIPE_SOURCE_PATH ${GEN_EVENTPIPE_SOURCE_PATHS})
file(TO_CMAKE_PATH ${GEN_EVENTPIPE_SOURCE_PATH} GEN_EVENTPIPE_SOURCE)
list(APPEND GEN_EVENTPIPE_SOURCES ${GEN_EVENTPIPE_SOURCE})
endforeach()
add_custom_command(OUTPUT ${GEN_EVENTPIPE_SOURCES}
COMMAND ${GENERATE_COMMAND}
DEPENDS ${GENERATE_SCRIPT} ${EVENT_MANIFEST} ${EVENT_EXCLUSIONS})
set (EVENTPIPE_SOURCES "")
set (EVENTPIPE_HEADERS "")
set (CORECLR_EVENTPIPE_SHIM_SOURCES "")
set (CORECLR_EVENTPIPE_SHIM_HEADERS "")
set (SHARED_EVENTPIPE_SOURCE_PATH "${CLR_SRC_NATIVE_DIR}/eventpipe")
set (CORECLR_EVENTPIPE_SHIM_SOURCE_PATH "${CORECLR_EVENTPIPE_SHIM_DIR}")
include (${SHARED_EVENTPIPE_SOURCE_PATH}/CMakeLists.txt)
list(APPEND EVENTPIPE_SOURCES
${SHARED_EVENTPIPE_SOURCES}
${SHARED_DIAGNOSTIC_SERVER_SOURCES}
)
list(APPEND EVENTPIPE_HEADERS
${SHARED_EVENTPIPE_HEADERS}
${SHARED_DIAGNOSTIC_SERVER_HEADERS}
)
addprefix(EVENTPIPE_SOURCES ${SHARED_EVENTPIPE_SOURCE_PATH} "${EVENTPIPE_SOURCES}")
addprefix(EVENTPIPE_HEADERS ${SHARED_EVENTPIPE_SOURCE_PATH} "${EVENTPIPE_HEADERS}")
set_source_files_properties(${SHARED_EVENTPIPE_SOURCE_PATH}/ep-sources.c PROPERTIES COMPILE_DEFINITIONS EP_FORCE_INCLUDE_SOURCE_FILES)
set_source_files_properties(${SHARED_EVENTPIPE_SOURCE_PATH}/ds-sources.c PROPERTIES COMPILE_DEFINITIONS DS_FORCE_INCLUDE_SOURCE_FILES)
set_source_files_properties(${EVENTPIPE_SOURCES} PROPERTIES LANGUAGE CXX)
if(CLR_CMAKE_HOST_UNIX)
if (CMAKE_VERSION VERSION_GREATER 3.11 OR CMAKE_VERSION VERSION_EQUAL 3.11)
set_source_files_properties(${EVENTPIPE_SOURCES} PROPERTIES COMPILE_OPTIONS -xc++)
else(CMAKE_VERSION VERSION_GREATER 3.11 OR CMAKE_VERSION VERSION_EQUAL 3.11)
add_compile_options(-xc++)
endif()
endif(CLR_CMAKE_HOST_UNIX)
list(APPEND CORECLR_EVENTPIPE_SHIM_SOURCES
ep-rt-coreclr.cpp
)
list(APPEND CORECLR_EVENTPIPE_SHIM_HEADERS
ds-rt-coreclr.h
ds-rt-types-coreclr.h
ep-rt-coreclr.h
ep-rt-config-coreclr.h
ep-rt-types-coreclr.h
)
addprefix(CORECLR_EVENTPIPE_SHIM_SOURCES ${CORECLR_EVENTPIPE_SHIM_SOURCE_PATH} "${CORECLR_EVENTPIPE_SHIM_SOURCES}")
addprefix(CORECLR_EVENTPIPE_SHIM_HEADERS ${CORECLR_EVENTPIPE_SHIM_SOURCE_PATH} "${CORECLR_EVENTPIPE_SHIM_HEADERS}")
list(APPEND EVENTPIPE_SOURCES
${CORECLR_EVENTPIPE_SHIM_SOURCES}
${CORECLR_EVENTPIPE_SHIM_HEADERS}
${EVENTPIPE_HEADERS}
${SHARED_EVENTPIPE_CONFIG_HEADERS}
)
add_library_clr(eventpipe_gen_objs OBJECT ${GEN_EVENTPIPE_SOURCES})
target_precompile_headers(eventpipe_gen_objs PRIVATE [["common.h"]])
set_target_properties(eventpipe_gen_objs PROPERTIES LINKER_LANGUAGE CXX)
add_dependencies(eventpipe_gen_objs eventing_headers)
add_library_clr(eventpipe_objs OBJECT ${EVENTPIPE_SOURCES})
target_precompile_headers(eventpipe_objs PRIVATE [["common.h"]])
set_target_properties(eventpipe_objs PROPERTIES LINKER_LANGUAGE CXX)
add_dependencies(eventpipe_objs eventing_headers)
add_library(eventpipe INTERFACE)
target_sources(eventpipe INTERFACE $<TARGET_OBJECTS:eventpipe_gen_objs> $<TARGET_OBJECTS:eventpipe_objs>)
| -1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/tests/Loader/NativeLibs/CMakeLists.txt
|
project(FromNativePaths_lib)
set(CMAKE_SHARED_LIBRARY_PREFIX "")
set(SOURCES FromNativePaths_lib.cpp)
add_library(FromNativePaths_lib SHARED ${SOURCES})
install(TARGETS FromNativePaths_lib DESTINATION Loader/NativeLibs)
|
project(FromNativePaths_lib)
set(CMAKE_SHARED_LIBRARY_PREFIX "")
set(SOURCES FromNativePaths_lib.cpp)
add_library(FromNativePaths_lib SHARED ${SOURCES})
install(TARGETS FromNativePaths_lib DESTINATION Loader/NativeLibs)
| -1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/tests/Interop/COM/NativeClients/Licensing/CMakeLists.txt
|
project (COMClientLicensing)
include_directories( ${INC_PLATFORM_DIR} )
include_directories( "../../ServerContracts" )
include_directories( "../../NativeServer" )
include_directories("../")
set(SOURCES
LicenseTests.cpp
App.manifest)
# add the executable
add_executable (COMClientLicensing ${SOURCES})
target_link_libraries(COMClientLicensing ${LINK_LIBRARIES_ADDITIONAL})
# Copy CoreShim manifest to project output
file(GENERATE OUTPUT $<TARGET_FILE_DIR:${PROJECT_NAME}>/CoreShim.X.manifest INPUT ${CMAKE_CURRENT_SOURCE_DIR}/CoreShim.X.manifest)
# add the install targets
install (TARGETS COMClientLicensing DESTINATION bin)
|
project (COMClientLicensing)
include_directories( ${INC_PLATFORM_DIR} )
include_directories( "../../ServerContracts" )
include_directories( "../../NativeServer" )
include_directories("../")
set(SOURCES
LicenseTests.cpp
App.manifest)
# add the executable
add_executable (COMClientLicensing ${SOURCES})
target_link_libraries(COMClientLicensing ${LINK_LIBRARIES_ADDITIONAL})
# Copy CoreShim manifest to project output
file(GENERATE OUTPUT $<TARGET_FILE_DIR:${PROJECT_NAME}>/CoreShim.X.manifest INPUT ${CMAKE_CURRENT_SOURCE_DIR}/CoreShim.X.manifest)
# add the install targets
install (TARGETS COMClientLicensing DESTINATION bin)
| -1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/libraries/System.Private.Xml/tests/Xslt/TestFiles/TestData/xsltc/baseline/dft3.txt
|
Microsoft (R) XSLT Compiler version 2.0.60220
for Microsoft (R) Windows (R) 2005 Framework version 2.0.50727
Copyright (C) Microsoft Corporation 2006. All rights reserved.
fatal error : Unrecognized option: '/debug~'.
|
Microsoft (R) XSLT Compiler version 2.0.60220
for Microsoft (R) Windows (R) 2005 Framework version 2.0.50727
Copyright (C) Microsoft Corporation 2006. All rights reserved.
fatal error : Unrecognized option: '/debug~'.
| -1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/libraries/System.Private.Xml/tests/Xslt/TestFiles/TestData/XsltScenarios/Schematron/v1Test4.txt
|
In pattern @Title:
The element Person must have a Title attribute
In pattern (@Title = 'Mr' and ex:Gender = 'Male') or @Title != 'Mr':
If the Title is "Mr" then the gender of the person must be "Male".
|
In pattern @Title:
The element Person must have a Title attribute
In pattern (@Title = 'Mr' and ex:Gender = 'Male') or @Title != 'Mr':
If the Title is "Mr" then the gender of the person must be "Male".
| -1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/mono/mono/metadata/external-only.h
|
/**
* \file
* Shorthand and markers for functions only used by embedders.
* MONO_ENTER_GC_UNSAFE is also a good indication of external_only.
*
* Author:
* Jay Krell ([email protected])
*
* Copyright 2018 Microsoft
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#ifndef MONO_EXTERNAL_ONLY
#define MONO_EXTERNAL_ONLY_GC_UNSAFE(t, expr) \
t result; \
MONO_ENTER_GC_UNSAFE; \
result = expr; \
MONO_EXIT_GC_UNSAFE; \
return result;
#define MONO_EXTERNAL_ONLY_GC_UNSAFE_VOID(expr) \
MONO_ENTER_GC_UNSAFE; \
expr; \
MONO_EXIT_GC_UNSAFE; \
#define MONO_EXTERNAL_ONLY(t, expr) return expr;
#define MONO_EXTERNAL_ONLY_VOID(expr) expr;
#endif /* MONO_EXTERNAL_ONLY */
|
/**
* \file
* Shorthand and markers for functions only used by embedders.
* MONO_ENTER_GC_UNSAFE is also a good indication of external_only.
*
* Author:
* Jay Krell ([email protected])
*
* Copyright 2018 Microsoft
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#ifndef MONO_EXTERNAL_ONLY
#define MONO_EXTERNAL_ONLY_GC_UNSAFE(t, expr) \
t result; \
MONO_ENTER_GC_UNSAFE; \
result = expr; \
MONO_EXIT_GC_UNSAFE; \
return result;
#define MONO_EXTERNAL_ONLY_GC_UNSAFE_VOID(expr) \
MONO_ENTER_GC_UNSAFE; \
expr; \
MONO_EXIT_GC_UNSAFE; \
#define MONO_EXTERNAL_ONLY(t, expr) return expr;
#define MONO_EXTERNAL_ONLY_VOID(expr) expr;
#endif /* MONO_EXTERNAL_ONLY */
| -1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/tests/JIT/HardwareIntrinsics/General/Vector64/ConvertToSingle.UInt32.cs
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
namespace JIT.HardwareIntrinsics.General
{
public static partial class Program
{
private static void ConvertToSingleUInt32()
{
var test = new VectorUnaryOpTest__ConvertToSingleUInt32();
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
// Validates passing a static member works
test.RunClsVarScenario();
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
// Validates passing an instance member of a class works
test.RunClassFldScenario();
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class VectorUnaryOpTest__ConvertToSingleUInt32
{
private struct DataTable
{
private byte[] inArray1;
private byte[] outArray;
private GCHandle inHandle1;
private GCHandle outHandle;
private ulong alignment;
public DataTable(UInt32[] inArray1, Single[] outArray, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<UInt32>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Single>();
if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<UInt32, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector64<UInt32> _fld1;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt32>, byte>(ref testStruct._fld1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<UInt32>>());
return testStruct;
}
public void RunStructFldScenario(VectorUnaryOpTest__ConvertToSingleUInt32 testClass)
{
var result = Vector64.ConvertToSingle(_fld1);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr);
}
}
private static readonly int LargestVectorSize = 8;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<UInt32>>() / sizeof(UInt32);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<Single>>() / sizeof(Single);
private static UInt32[] _data1 = new UInt32[Op1ElementCount];
private static Vector64<UInt32> _clsVar1;
private Vector64<UInt32> _fld1;
private DataTable _dataTable;
static VectorUnaryOpTest__ConvertToSingleUInt32()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt32>, byte>(ref _clsVar1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<UInt32>>());
}
public VectorUnaryOpTest__ConvertToSingleUInt32()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt32>, byte>(ref _fld1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<UInt32>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); }
_dataTable = new DataTable(_data1, new Single[RetElementCount], LargestVectorSize);
}
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = Vector64.ConvertToSingle(
Unsafe.Read<Vector64<UInt32>>(_dataTable.inArray1Ptr)
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var method = typeof(Vector64).GetMethod(nameof(Vector64.ConvertToSingle), new Type[] {
typeof(Vector64<UInt32>)
});
if (method is null)
{
method = typeof(Vector64).GetMethod(nameof(Vector64.ConvertToSingle), 1, new Type[] {
typeof(Vector64<>).MakeGenericType(Type.MakeGenericMethodParameter(0))
});
}
if (method.IsGenericMethodDefinition)
{
method = method.MakeGenericMethod(typeof(Single));
}
var result = method.Invoke(null, new object[] {
Unsafe.Read<Vector64<UInt32>>(_dataTable.inArray1Ptr)
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Single>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = Vector64.ConvertToSingle(
_clsVar1
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector64<UInt32>>(_dataTable.inArray1Ptr);
var result = Vector64.ConvertToSingle(op1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new VectorUnaryOpTest__ConvertToSingleUInt32();
var result = Vector64.ConvertToSingle(test._fld1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, _dataTable.outArrayPtr);
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = Vector64.ConvertToSingle(_fld1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = Vector64.ConvertToSingle(test._fld1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
private void ValidateResult(Vector64<UInt32> op1, void* result, [CallerMemberName] string method = "")
{
UInt32[] inArray1 = new UInt32[Op1ElementCount];
Single[] outArray = new Single[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray1[0]), op1);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Single, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Single>>());
ValidateResult(inArray1, outArray, method);
}
private void ValidateResult(void* op1, void* result, [CallerMemberName] string method = "")
{
UInt32[] inArray1 = new UInt32[Op1ElementCount];
Single[] outArray = new Single[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<UInt32>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Single, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Single>>());
ValidateResult(inArray1, outArray, method);
}
private void ValidateResult(UInt32[] firstOp, Single[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
if (result[0] != (float)(firstOp[0]))
{
succeeded = false;
}
else
{
for (var i = 1; i < RetElementCount; i++)
{
if (result[i] != (float)(firstOp[i]))
{
succeeded = false;
break;
}
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(Vector64)}.{nameof(Vector64.ConvertToSingle)}<Single>(Vector64<UInt32>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
namespace JIT.HardwareIntrinsics.General
{
public static partial class Program
{
private static void ConvertToSingleUInt32()
{
var test = new VectorUnaryOpTest__ConvertToSingleUInt32();
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
// Validates passing a static member works
test.RunClsVarScenario();
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
// Validates passing an instance member of a class works
test.RunClassFldScenario();
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class VectorUnaryOpTest__ConvertToSingleUInt32
{
private struct DataTable
{
private byte[] inArray1;
private byte[] outArray;
private GCHandle inHandle1;
private GCHandle outHandle;
private ulong alignment;
public DataTable(UInt32[] inArray1, Single[] outArray, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<UInt32>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Single>();
if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<UInt32, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector64<UInt32> _fld1;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt32>, byte>(ref testStruct._fld1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<UInt32>>());
return testStruct;
}
public void RunStructFldScenario(VectorUnaryOpTest__ConvertToSingleUInt32 testClass)
{
var result = Vector64.ConvertToSingle(_fld1);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr);
}
}
private static readonly int LargestVectorSize = 8;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<UInt32>>() / sizeof(UInt32);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<Single>>() / sizeof(Single);
private static UInt32[] _data1 = new UInt32[Op1ElementCount];
private static Vector64<UInt32> _clsVar1;
private Vector64<UInt32> _fld1;
private DataTable _dataTable;
static VectorUnaryOpTest__ConvertToSingleUInt32()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt32>, byte>(ref _clsVar1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<UInt32>>());
}
public VectorUnaryOpTest__ConvertToSingleUInt32()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<UInt32>, byte>(ref _fld1), ref Unsafe.As<UInt32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<UInt32>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt32(); }
_dataTable = new DataTable(_data1, new Single[RetElementCount], LargestVectorSize);
}
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = Vector64.ConvertToSingle(
Unsafe.Read<Vector64<UInt32>>(_dataTable.inArray1Ptr)
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var method = typeof(Vector64).GetMethod(nameof(Vector64.ConvertToSingle), new Type[] {
typeof(Vector64<UInt32>)
});
if (method is null)
{
method = typeof(Vector64).GetMethod(nameof(Vector64.ConvertToSingle), 1, new Type[] {
typeof(Vector64<>).MakeGenericType(Type.MakeGenericMethodParameter(0))
});
}
if (method.IsGenericMethodDefinition)
{
method = method.MakeGenericMethod(typeof(Single));
}
var result = method.Invoke(null, new object[] {
Unsafe.Read<Vector64<UInt32>>(_dataTable.inArray1Ptr)
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Single>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = Vector64.ConvertToSingle(
_clsVar1
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector64<UInt32>>(_dataTable.inArray1Ptr);
var result = Vector64.ConvertToSingle(op1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new VectorUnaryOpTest__ConvertToSingleUInt32();
var result = Vector64.ConvertToSingle(test._fld1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, _dataTable.outArrayPtr);
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = Vector64.ConvertToSingle(_fld1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = Vector64.ConvertToSingle(test._fld1);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
private void ValidateResult(Vector64<UInt32> op1, void* result, [CallerMemberName] string method = "")
{
UInt32[] inArray1 = new UInt32[Op1ElementCount];
Single[] outArray = new Single[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray1[0]), op1);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Single, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Single>>());
ValidateResult(inArray1, outArray, method);
}
private void ValidateResult(void* op1, void* result, [CallerMemberName] string method = "")
{
UInt32[] inArray1 = new UInt32[Op1ElementCount];
Single[] outArray = new Single[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<UInt32>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Single, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Single>>());
ValidateResult(inArray1, outArray, method);
}
private void ValidateResult(UInt32[] firstOp, Single[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
if (result[0] != (float)(firstOp[0]))
{
succeeded = false;
}
else
{
for (var i = 1; i < RetElementCount; i++)
{
if (result[i] != (float)(firstOp[i]))
{
succeeded = false;
break;
}
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(Vector64)}.{nameof(Vector64.ConvertToSingle)}<Single>(Vector64<UInt32>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| -1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/libraries/System.ServiceModel.Syndication/tests/TestFeeds/RssFeeds/pubdate_pst.xml
|
<!--
Description: valid pubDate (PST)
Expect: ValidRFC2822Date{parent:channel,element:pubDate}
-->
<rss version="2.0">
<channel>
<title>Validity test</title>
<link>http://contoso.com/rss/2.0/</link>
<description>valid pubDate (PST)</description>
<pubDate>31 Dec 2002 14:20:20 PST</pubDate>
</channel>
</rss>
|
<!--
Description: valid pubDate (PST)
Expect: ValidRFC2822Date{parent:channel,element:pubDate}
-->
<rss version="2.0">
<channel>
<title>Validity test</title>
<link>http://contoso.com/rss/2.0/</link>
<description>valid pubDate (PST)</description>
<pubDate>31 Dec 2002 14:20:20 PST</pubDate>
</channel>
</rss>
| -1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/libraries/Microsoft.XmlSerializer.Generator/tests/Microsoft.XmlSerializer.Generator.Tests.csproj
|
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<DefineConstants>$(DefineConstants);XMLSERIALIZERGENERATORTESTS</DefineConstants>
<TargetFramework>$(NetCoreAppCurrent)</TargetFramework>
<SkipTestsOnPlatform Condition="'$(TargetsMobile)' == 'true' or '$(TargetOS)' == 'FreeBSD' or '$(TargetArchitecture)' == 'arm' or '$(TargetArchitecture)' == 'arm64' or '$(TargetArchitecture)' == 'armel' or '$(TargetArchitecture)' == 'wasm'">true</SkipTestsOnPlatform>
</PropertyGroup>
<ItemGroup Condition="'$(SkipTestsOnPlatform)' != 'true'">
<Compile Include="SGenTests.cs" />
<Compile Include="$(CommonTestPath)System\Runtime\Serialization\Utils.cs" />
<Compile Include="$(TestSourceFolder)..\..\System.Private.Xml\tests\XmlSerializer\XmlSerializerTests.cs" />
</ItemGroup>
<ItemGroup>
<Compile Include="AlwaysPassTest.cs" />
<None Include="Expected.SerializableAssembly.XmlSerializers.cs">
<CopyToOutputDirectory>Always</CopyToOutputDirectory>
</None>
<Content Include="$(GeneratorRuntimeConfig)">
<!-- Rename it to match the Generator application name -->
<Link>dotnet-Microsoft.XmlSerializer.Generator.runtimeconfig.json</Link>
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
</Content>
</ItemGroup>
<ItemGroup Condition=" '$(SkipTestsOnPlatform)' != 'true'">
<ProjectReference Include="..\src\Microsoft.XmlSerializer.Generator.csproj" />
<ProjectReference Include="SerializableAssembly.csproj" />
</ItemGroup>
<!-- This target runs before binplacing as it needs to provide a test assembly to binplace, and depends on CopyFilesToOutputDirectory
so that the Generator app dll and runtimeconfig will be copied to the OutputPath -->
<Target Name="GenerateSerializationAssembly" DependsOnTargets="CopyFilesToOutputDirectory" AfterTargets="PrepareForRun" Condition=" '$(SkipTestsOnPlatform)' != 'true' ">
<PropertyGroup>
<SerializerName>SerializableAssembly.XmlSerializers</SerializerName>
</PropertyGroup>
<Warning Condition="Exists('$(OutputPath)Expected.$(SerializerName).cs') != 'true'" Text="Fail to find $(OutputPath)Expected.$(SerializerName).cs" />
<Message Text="Compiling Expected Serializers" Importance="normal" />
<SetParentAssemblyId CodeFile="$(OutputPath)Expected.$(SerializerName).cs" AssemblyFile="$(OutputPath)SerializableAssembly.dll" />
<Csc Condition="Exists('$(OutputPath)Expected.$(SerializerName).cs') == 'true'" OutputAssembly="$(OutputPath)$(SerializerName).dll" References="@(ReferencePath);@(IntermediateAssembly)" EmitDebugInformation="$(DebugSymbols)" DebugType="$(DebugType)" Sources="$(OutputPath)Expected.$(SerializerName).cs" TargetType="Library" ToolExe="$(CscToolExe)" ToolPath="$(CscToolPath)" DisabledWarnings="$(NoWarn), 219" UseSharedCompilation="true" />
<Warning Condition="Exists('$(OutputPath)$(SerializerName).dll') != 'true'" Text="Fail to generate $(OutputPath)$(SerializerName).dll" />
<ItemGroup>
<!-- Include the Serializer in ReferenceCopyLocalPaths so that it will be binplaced -->
<ReferenceCopyLocalPaths Include="$(OutputPath)$(SerializerName).dll" />
</ItemGroup>
</Target>
<UsingTask TaskName="SetParentAssemblyId" TaskFactory="RoslynCodeTaskFactory" AssemblyFile="$(MSBuildBinPath)\Microsoft.Build.Tasks.Core.dll">
<ParameterGroup>
<CodeFile ParameterType="System.String" Required="true" />
<AssemblyFile ParameterType="System.String" Required="true" />
</ParameterGroup>
<Task>
<Using Namespace="System" />
<Using Namespace="System.Collections" />
<Using Namespace="System.IO" />
<Using Namespace="System.Reflection" />
<Code Type="Fragment" Language="C#"><![CDATA[
// Roughly based on System.Xml.Serialization.TempAssembly.GenerateAssemblyId()
var list = new ArrayList();
foreach (var module in Assembly.LoadFrom(AssemblyFile).GetModules())
{
list.Add(module.ModuleVersionId.ToString());
}
list.Sort();
var sb = new StringBuilder();
for (int i = 0; i < list.Count; i++)
{
sb.Append(list[i]!.ToString());
sb.Append(',');
}
string parentAssemblyId = sb.ToString();
string content = File.ReadAllText(CodeFile);
content = content.Replace("%%ParentAssemblyId%%", parentAssemblyId);
File.WriteAllText(CodeFile, content);
]]></Code>
</Task>
</UsingTask>
</Project>
|
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<DefineConstants>$(DefineConstants);XMLSERIALIZERGENERATORTESTS</DefineConstants>
<TargetFramework>$(NetCoreAppCurrent)</TargetFramework>
<SkipTestsOnPlatform Condition="'$(TargetsMobile)' == 'true' or '$(TargetOS)' == 'FreeBSD' or '$(TargetArchitecture)' == 'arm' or '$(TargetArchitecture)' == 'arm64' or '$(TargetArchitecture)' == 'armel' or '$(TargetArchitecture)' == 'wasm'">true</SkipTestsOnPlatform>
</PropertyGroup>
<ItemGroup Condition="'$(SkipTestsOnPlatform)' != 'true'">
<Compile Include="SGenTests.cs" />
<Compile Include="$(CommonTestPath)System\Runtime\Serialization\Utils.cs" />
<Compile Include="$(TestSourceFolder)..\..\System.Private.Xml\tests\XmlSerializer\XmlSerializerTests.cs" />
</ItemGroup>
<ItemGroup>
<Compile Include="AlwaysPassTest.cs" />
<None Include="Expected.SerializableAssembly.XmlSerializers.cs">
<CopyToOutputDirectory>Always</CopyToOutputDirectory>
</None>
<Content Include="$(GeneratorRuntimeConfig)">
<!-- Rename it to match the Generator application name -->
<Link>dotnet-Microsoft.XmlSerializer.Generator.runtimeconfig.json</Link>
<CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
</Content>
</ItemGroup>
<ItemGroup Condition=" '$(SkipTestsOnPlatform)' != 'true'">
<ProjectReference Include="..\src\Microsoft.XmlSerializer.Generator.csproj" />
<ProjectReference Include="SerializableAssembly.csproj" />
</ItemGroup>
<!-- This target runs before binplacing as it needs to provide a test assembly to binplace, and depends on CopyFilesToOutputDirectory
so that the Generator app dll and runtimeconfig will be copied to the OutputPath -->
<Target Name="GenerateSerializationAssembly" DependsOnTargets="CopyFilesToOutputDirectory" AfterTargets="PrepareForRun" Condition=" '$(SkipTestsOnPlatform)' != 'true' ">
<PropertyGroup>
<SerializerName>SerializableAssembly.XmlSerializers</SerializerName>
</PropertyGroup>
<Warning Condition="Exists('$(OutputPath)Expected.$(SerializerName).cs') != 'true'" Text="Fail to find $(OutputPath)Expected.$(SerializerName).cs" />
<Message Text="Compiling Expected Serializers" Importance="normal" />
<SetParentAssemblyId CodeFile="$(OutputPath)Expected.$(SerializerName).cs" AssemblyFile="$(OutputPath)SerializableAssembly.dll" />
<Csc Condition="Exists('$(OutputPath)Expected.$(SerializerName).cs') == 'true'" OutputAssembly="$(OutputPath)$(SerializerName).dll" References="@(ReferencePath);@(IntermediateAssembly)" EmitDebugInformation="$(DebugSymbols)" DebugType="$(DebugType)" Sources="$(OutputPath)Expected.$(SerializerName).cs" TargetType="Library" ToolExe="$(CscToolExe)" ToolPath="$(CscToolPath)" DisabledWarnings="$(NoWarn), 219" UseSharedCompilation="true" />
<Warning Condition="Exists('$(OutputPath)$(SerializerName).dll') != 'true'" Text="Fail to generate $(OutputPath)$(SerializerName).dll" />
<ItemGroup>
<!-- Include the Serializer in ReferenceCopyLocalPaths so that it will be binplaced -->
<ReferenceCopyLocalPaths Include="$(OutputPath)$(SerializerName).dll" />
</ItemGroup>
</Target>
<UsingTask TaskName="SetParentAssemblyId" TaskFactory="RoslynCodeTaskFactory" AssemblyFile="$(MSBuildBinPath)\Microsoft.Build.Tasks.Core.dll">
<ParameterGroup>
<CodeFile ParameterType="System.String" Required="true" />
<AssemblyFile ParameterType="System.String" Required="true" />
</ParameterGroup>
<Task>
<Using Namespace="System" />
<Using Namespace="System.Collections" />
<Using Namespace="System.IO" />
<Using Namespace="System.Reflection" />
<Code Type="Fragment" Language="C#"><![CDATA[
// Roughly based on System.Xml.Serialization.TempAssembly.GenerateAssemblyId()
var list = new ArrayList();
foreach (var module in Assembly.LoadFrom(AssemblyFile).GetModules())
{
list.Add(module.ModuleVersionId.ToString());
}
list.Sort();
var sb = new StringBuilder();
for (int i = 0; i < list.Count; i++)
{
sb.Append(list[i]!.ToString());
sb.Append(',');
}
string parentAssemblyId = sb.ToString();
string content = File.ReadAllText(CodeFile);
content = content.Replace("%%ParentAssemblyId%%", parentAssemblyId);
File.WriteAllText(CodeFile, content);
]]></Code>
</Task>
</UsingTask>
</Project>
| -1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/libraries/System.ComponentModel.Annotations/tests/System/ComponentModel/DataAnnotations/ValidationContextTests.cs
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections.Generic;
using Xunit;
namespace System.ComponentModel.DataAnnotations.Tests
{
public class ValidationContextTests
{
[Fact]
public static void Constructor_throws_if_passed_null_instance()
{
AssertExtensions.Throws<ArgumentNullException>("instance", () => new ValidationContext(null));
}
[Fact]
public static void Constructor_creates_new_instance_for_one_arg_constructor()
{
var testDataAnnotationsDerived = new TestClass();
new ValidationContext(testDataAnnotationsDerived);
}
[Fact]
public static void Constructor_creates_new_instance_for_two_arg_constructor()
{
var testDataAnnotationsDerived = new TestClass();
new ValidationContext(testDataAnnotationsDerived, null);
var items = new Dictionary<object, object>();
new ValidationContext(testDataAnnotationsDerived, items);
}
[Fact]
public static void Constructor_creates_new_instance_for_three_arg_constructor()
{
var testDataAnnotationsDerived = new TestClass();
new ValidationContext(testDataAnnotationsDerived, null, null);
var items = new Dictionary<object, object>();
new ValidationContext(testDataAnnotationsDerived, null, items);
var serviceProvider = new TestServiceProvider();
new ValidationContext(testDataAnnotationsDerived, serviceProvider, items);
}
[Fact]
public static void ObjectInstance_and_ObjectType_return_same_instance_and_type_as_passed()
{
var testDataAnnotationsDerived = new TestClass();
var validationContext = new ValidationContext(testDataAnnotationsDerived);
Assert.Same(testDataAnnotationsDerived, validationContext.ObjectInstance);
Assert.Equal(typeof(TestClass), validationContext.ObjectType);
}
[Fact]
public static void Items_returns_new_Dictionary_with_same_keys_and_values()
{
var testDataAnnotationsDerived = new TestClass();
var items = new Dictionary<object, object>();
items.Add("testKey1", "testValue1");
items.Add("testKey2", "testValue2");
var validationContext = new ValidationContext(testDataAnnotationsDerived, items);
Assert.NotSame(items, validationContext.Items);
Assert.Equal(2, validationContext.Items.Count);
Assert.Equal("testValue1", validationContext.Items["testKey1"]);
Assert.Equal("testValue2", validationContext.Items["testKey2"]);
}
[Fact]
public static void Can_get_and_set_MemberName_to_existent_and_non_existent_members_and_null()
{
var testDataAnnotationsDerived = new TestClass();
var validationContext = new ValidationContext(testDataAnnotationsDerived);
validationContext.MemberName = "ExistingMember";
Assert.Equal("ExistingMember", validationContext.MemberName);
validationContext.MemberName = "NonExistentMemberName";
Assert.Equal("NonExistentMemberName", validationContext.MemberName);
validationContext.MemberName = null;
Assert.Null(validationContext.MemberName);
}
[Fact]
public static void Can_get_and_set_DisplayName_to_existent_and_non_existent_members()
{
var testDataAnnotationsDerived = new TestClass();
var validationContext = new ValidationContext(testDataAnnotationsDerived);
validationContext.DisplayName = "ExistingMember";
Assert.Equal("ExistingMember", validationContext.DisplayName);
validationContext.DisplayName = "NonExistentDisplayName";
Assert.Equal("NonExistentDisplayName", validationContext.DisplayName);
}
[Fact]
public static void Setting_DisplayName_to_null_or_empty_throws()
{
var testDataAnnotationsDerived = new TestClass();
var validationContext = new ValidationContext(testDataAnnotationsDerived);
validationContext.DisplayName = "ExistingMember";
Assert.Equal("ExistingMember", validationContext.DisplayName);
validationContext.DisplayName = "NonExistentDisplayName";
Assert.Equal("NonExistentDisplayName", validationContext.DisplayName);
AssertExtensions.Throws<ArgumentNullException>("value", () => validationContext.DisplayName = null);
AssertExtensions.Throws<ArgumentNullException>("value", () => validationContext.DisplayName = string.Empty);
}
// DisplayName_returns_class_name_for_unset_member_name_and_can_be_overridden()
[Fact]
public static void TestDisplayName()
{
var testDataAnnotationsDerived = new TestClass();
var validationContext = new ValidationContext(testDataAnnotationsDerived);
Assert.Equal("TestClass", validationContext.DisplayName);
validationContext.DisplayName = "OverriddenDisplayName";
Assert.Equal("OverriddenDisplayName", validationContext.DisplayName);
}
// DisplayName_returns_name_from_DisplayAttribute_if_set_and_can_be_overridden
[Fact]
public static void TestDisplayNameDisplayAttribute()
{
var testDataAnnotationsDerived = new TestClass();
var validationContext = new ValidationContext(testDataAnnotationsDerived);
validationContext.MemberName = "DisplayNameMember";
Assert.Equal("DisplayNameMemberDisplayName", validationContext.DisplayName);
validationContext.DisplayName = "OverriddenDisplayName";
Assert.Equal("OverriddenDisplayName", validationContext.DisplayName);
}
// DisplayName_returns_name_of_member_if_DisplayAttribute_not_set_and_can_be_overridden
[Fact]
public static void TestDisplayNameNoDisplayAttribute()
{
var testDataAnnotationsDerived = new TestClass();
var validationContext = new ValidationContext(testDataAnnotationsDerived);
validationContext.MemberName = "ExistingMember";
Assert.Equal("ExistingMember", validationContext.DisplayName);
validationContext.DisplayName = "OverriddenDisplayName";
Assert.Equal("OverriddenDisplayName", validationContext.DisplayName);
}
[Fact]
public void DisplayName_NoSuchMemberName_ReturnsMemberName()
{
var validationContext = new ValidationContext(new object()) { MemberName = "test" };
Assert.Equal("test", validationContext.DisplayName);
}
[Fact]
public void GetService_CustomServiceProvider_ReturnsNull()
{
var validationContext = new ValidationContext(new object());
validationContext.InitializeServiceProvider(type =>
{
Assert.Equal(typeof(int), type);
return typeof(bool);
});
Assert.Equal(typeof(bool), validationContext.GetService(typeof(int)));
}
[Fact]
public void GetService_NullServiceProvider_ReturnsNull()
{
var validationContext = new ValidationContext(new object());
Assert.Null(validationContext.GetService(typeof(int)));
}
}
public class TestClass
{
[Display(Name = "DisplayNameMemberDisplayName")]
public int DisplayNameMember { get; set; }
public int ExistingMember { get; set; }
}
public class TestServiceProvider : IServiceProvider
{
public object GetService(Type serviceType)
{
return null;
}
}
}
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections.Generic;
using Xunit;
namespace System.ComponentModel.DataAnnotations.Tests
{
public class ValidationContextTests
{
[Fact]
public static void Constructor_throws_if_passed_null_instance()
{
AssertExtensions.Throws<ArgumentNullException>("instance", () => new ValidationContext(null));
}
[Fact]
public static void Constructor_creates_new_instance_for_one_arg_constructor()
{
var testDataAnnotationsDerived = new TestClass();
new ValidationContext(testDataAnnotationsDerived);
}
[Fact]
public static void Constructor_creates_new_instance_for_two_arg_constructor()
{
var testDataAnnotationsDerived = new TestClass();
new ValidationContext(testDataAnnotationsDerived, null);
var items = new Dictionary<object, object>();
new ValidationContext(testDataAnnotationsDerived, items);
}
[Fact]
public static void Constructor_creates_new_instance_for_three_arg_constructor()
{
var testDataAnnotationsDerived = new TestClass();
new ValidationContext(testDataAnnotationsDerived, null, null);
var items = new Dictionary<object, object>();
new ValidationContext(testDataAnnotationsDerived, null, items);
var serviceProvider = new TestServiceProvider();
new ValidationContext(testDataAnnotationsDerived, serviceProvider, items);
}
[Fact]
public static void ObjectInstance_and_ObjectType_return_same_instance_and_type_as_passed()
{
var testDataAnnotationsDerived = new TestClass();
var validationContext = new ValidationContext(testDataAnnotationsDerived);
Assert.Same(testDataAnnotationsDerived, validationContext.ObjectInstance);
Assert.Equal(typeof(TestClass), validationContext.ObjectType);
}
[Fact]
public static void Items_returns_new_Dictionary_with_same_keys_and_values()
{
var testDataAnnotationsDerived = new TestClass();
var items = new Dictionary<object, object>();
items.Add("testKey1", "testValue1");
items.Add("testKey2", "testValue2");
var validationContext = new ValidationContext(testDataAnnotationsDerived, items);
Assert.NotSame(items, validationContext.Items);
Assert.Equal(2, validationContext.Items.Count);
Assert.Equal("testValue1", validationContext.Items["testKey1"]);
Assert.Equal("testValue2", validationContext.Items["testKey2"]);
}
[Fact]
public static void Can_get_and_set_MemberName_to_existent_and_non_existent_members_and_null()
{
var testDataAnnotationsDerived = new TestClass();
var validationContext = new ValidationContext(testDataAnnotationsDerived);
validationContext.MemberName = "ExistingMember";
Assert.Equal("ExistingMember", validationContext.MemberName);
validationContext.MemberName = "NonExistentMemberName";
Assert.Equal("NonExistentMemberName", validationContext.MemberName);
validationContext.MemberName = null;
Assert.Null(validationContext.MemberName);
}
[Fact]
public static void Can_get_and_set_DisplayName_to_existent_and_non_existent_members()
{
var testDataAnnotationsDerived = new TestClass();
var validationContext = new ValidationContext(testDataAnnotationsDerived);
validationContext.DisplayName = "ExistingMember";
Assert.Equal("ExistingMember", validationContext.DisplayName);
validationContext.DisplayName = "NonExistentDisplayName";
Assert.Equal("NonExistentDisplayName", validationContext.DisplayName);
}
[Fact]
public static void Setting_DisplayName_to_null_or_empty_throws()
{
var testDataAnnotationsDerived = new TestClass();
var validationContext = new ValidationContext(testDataAnnotationsDerived);
validationContext.DisplayName = "ExistingMember";
Assert.Equal("ExistingMember", validationContext.DisplayName);
validationContext.DisplayName = "NonExistentDisplayName";
Assert.Equal("NonExistentDisplayName", validationContext.DisplayName);
AssertExtensions.Throws<ArgumentNullException>("value", () => validationContext.DisplayName = null);
AssertExtensions.Throws<ArgumentNullException>("value", () => validationContext.DisplayName = string.Empty);
}
// DisplayName_returns_class_name_for_unset_member_name_and_can_be_overridden()
[Fact]
public static void TestDisplayName()
{
var testDataAnnotationsDerived = new TestClass();
var validationContext = new ValidationContext(testDataAnnotationsDerived);
Assert.Equal("TestClass", validationContext.DisplayName);
validationContext.DisplayName = "OverriddenDisplayName";
Assert.Equal("OverriddenDisplayName", validationContext.DisplayName);
}
// DisplayName_returns_name_from_DisplayAttribute_if_set_and_can_be_overridden
[Fact]
public static void TestDisplayNameDisplayAttribute()
{
var testDataAnnotationsDerived = new TestClass();
var validationContext = new ValidationContext(testDataAnnotationsDerived);
validationContext.MemberName = "DisplayNameMember";
Assert.Equal("DisplayNameMemberDisplayName", validationContext.DisplayName);
validationContext.DisplayName = "OverriddenDisplayName";
Assert.Equal("OverriddenDisplayName", validationContext.DisplayName);
}
// DisplayName_returns_name_of_member_if_DisplayAttribute_not_set_and_can_be_overridden
[Fact]
public static void TestDisplayNameNoDisplayAttribute()
{
var testDataAnnotationsDerived = new TestClass();
var validationContext = new ValidationContext(testDataAnnotationsDerived);
validationContext.MemberName = "ExistingMember";
Assert.Equal("ExistingMember", validationContext.DisplayName);
validationContext.DisplayName = "OverriddenDisplayName";
Assert.Equal("OverriddenDisplayName", validationContext.DisplayName);
}
[Fact]
public void DisplayName_NoSuchMemberName_ReturnsMemberName()
{
var validationContext = new ValidationContext(new object()) { MemberName = "test" };
Assert.Equal("test", validationContext.DisplayName);
}
[Fact]
public void GetService_CustomServiceProvider_ReturnsNull()
{
var validationContext = new ValidationContext(new object());
validationContext.InitializeServiceProvider(type =>
{
Assert.Equal(typeof(int), type);
return typeof(bool);
});
Assert.Equal(typeof(bool), validationContext.GetService(typeof(int)));
}
[Fact]
public void GetService_NullServiceProvider_ReturnsNull()
{
var validationContext = new ValidationContext(new object());
Assert.Null(validationContext.GetService(typeof(int)));
}
}
public class TestClass
{
[Display(Name = "DisplayNameMemberDisplayName")]
public int DisplayNameMember { get; set; }
public int ExistingMember { get; set; }
}
public class TestServiceProvider : IServiceProvider
{
public object GetService(Type serviceType)
{
return null;
}
}
}
| -1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/tests/JIT/Directed/shift/uint32_r.csproj
|
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType />
<Optimize />
</PropertyGroup>
<ItemGroup>
<Compile Include="uint32.cs" />
</ItemGroup>
</Project>
|
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<OutputType>Exe</OutputType>
<CLRTestPriority>1</CLRTestPriority>
</PropertyGroup>
<PropertyGroup>
<DebugType />
<Optimize />
</PropertyGroup>
<ItemGroup>
<Compile Include="uint32.cs" />
</ItemGroup>
</Project>
| -1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/tests/JIT/Directed/nullabletypes/hashcode.cs
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//<Title>Nullable types lift the GetHashCode() method from the underlying struct</Title>
//<Description>
// A nullable type with a value returns the GetHashCode() from the underlying struct
//</Description>
#pragma warning disable 0649
using System;
interface BaseInter { }
interface GenInter<T> { }
struct Struct { }
struct ImplStruct : BaseInter { }
struct OpenGenImplStruct<T> : GenInter<T> { }
struct CloseGenImplStruct : GenInter<int> { }
class Foo { }
class NullableTest1
{
static int? i = new int?(1);
static Struct? s = new Struct?(new Struct());
static ImplStruct? imps = new ImplStruct?(new ImplStruct());
static OpenGenImplStruct<Foo>? genfoo = new OpenGenImplStruct<Foo>?(new OpenGenImplStruct<Foo>());
static CloseGenImplStruct? genint = new CloseGenImplStruct?(new CloseGenImplStruct());
public static void Run()
{
Test_nullabletypes.Eval(i.GetHashCode(), 1.GetHashCode());
Test_nullabletypes.Eval(s.GetHashCode(), default(Struct).GetHashCode());
Test_nullabletypes.Eval(imps.GetHashCode(), default(ImplStruct).GetHashCode());
Test_nullabletypes.Eval(genfoo.GetHashCode(), default(OpenGenImplStruct<Foo>).GetHashCode());
Test_nullabletypes.Eval(genint.GetHashCode(), default(CloseGenImplStruct).GetHashCode());
}
}
class NullableTest2
{
static int? i;
static Struct? s;
static ImplStruct? imps;
static OpenGenImplStruct<Foo>? genfoo;
static CloseGenImplStruct? genint;
public static void Run()
{
Test_nullabletypes.Eval(i.GetHashCode(), 0);
Test_nullabletypes.Eval(s.GetHashCode(), 0);
Test_nullabletypes.Eval(imps.GetHashCode(), 0);
Test_nullabletypes.Eval(genfoo.GetHashCode(), 0);
Test_nullabletypes.Eval(genint.GetHashCode(), 0);
}
}
public class NullableTests
{
public static void Run()
{
NullableTest1.Run();
NullableTest2.Run();
}
}
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//<Title>Nullable types lift the GetHashCode() method from the underlying struct</Title>
//<Description>
// A nullable type with a value returns the GetHashCode() from the underlying struct
//</Description>
#pragma warning disable 0649
using System;
interface BaseInter { }
interface GenInter<T> { }
struct Struct { }
struct ImplStruct : BaseInter { }
struct OpenGenImplStruct<T> : GenInter<T> { }
struct CloseGenImplStruct : GenInter<int> { }
class Foo { }
class NullableTest1
{
static int? i = new int?(1);
static Struct? s = new Struct?(new Struct());
static ImplStruct? imps = new ImplStruct?(new ImplStruct());
static OpenGenImplStruct<Foo>? genfoo = new OpenGenImplStruct<Foo>?(new OpenGenImplStruct<Foo>());
static CloseGenImplStruct? genint = new CloseGenImplStruct?(new CloseGenImplStruct());
public static void Run()
{
Test_nullabletypes.Eval(i.GetHashCode(), 1.GetHashCode());
Test_nullabletypes.Eval(s.GetHashCode(), default(Struct).GetHashCode());
Test_nullabletypes.Eval(imps.GetHashCode(), default(ImplStruct).GetHashCode());
Test_nullabletypes.Eval(genfoo.GetHashCode(), default(OpenGenImplStruct<Foo>).GetHashCode());
Test_nullabletypes.Eval(genint.GetHashCode(), default(CloseGenImplStruct).GetHashCode());
}
}
class NullableTest2
{
static int? i;
static Struct? s;
static ImplStruct? imps;
static OpenGenImplStruct<Foo>? genfoo;
static CloseGenImplStruct? genint;
public static void Run()
{
Test_nullabletypes.Eval(i.GetHashCode(), 0);
Test_nullabletypes.Eval(s.GetHashCode(), 0);
Test_nullabletypes.Eval(imps.GetHashCode(), 0);
Test_nullabletypes.Eval(genfoo.GetHashCode(), 0);
Test_nullabletypes.Eval(genint.GetHashCode(), 0);
}
}
public class NullableTests
{
public static void Run()
{
NullableTest1.Run();
NullableTest2.Run();
}
}
| -1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd/AbsoluteDifferenceWideningLowerAndAdd.Vector64.Int16.cs
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
using System.Runtime.Intrinsics.Arm;
namespace JIT.HardwareIntrinsics.Arm
{
public static partial class Program
{
private static void AbsoluteDifferenceWideningLowerAndAdd_Vector64_Int16()
{
var test = new SimpleTernaryOpTest__AbsoluteDifferenceWideningLowerAndAdd_Vector64_Int16();
if (test.IsSupported)
{
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates basic functionality works, using Load
test.RunBasicScenario_Load();
}
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates calling via reflection works, using Load
test.RunReflectionScenario_Load();
}
// Validates passing a static member works
test.RunClsVarScenario();
if (AdvSimd.IsSupported)
{
// Validates passing a static member works, using pinning and Load
test.RunClsVarScenario_Load();
}
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates passing a local works, using Load
test.RunLclVarScenario_Load();
}
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local class works, using pinning and Load
test.RunClassLclFldScenario_Load();
}
// Validates passing an instance member of a class works
test.RunClassFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a class works, using pinning and Load
test.RunClassFldScenario_Load();
}
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local struct works, using pinning and Load
test.RunStructLclFldScenario_Load();
}
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a struct works, using pinning and Load
test.RunStructFldScenario_Load();
}
}
else
{
// Validates we throw on unsupported hardware
test.RunUnsupportedScenario();
}
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class SimpleTernaryOpTest__AbsoluteDifferenceWideningLowerAndAdd_Vector64_Int16
{
private struct DataTable
{
private byte[] inArray1;
private byte[] inArray2;
private byte[] inArray3;
private byte[] outArray;
private GCHandle inHandle1;
private GCHandle inHandle2;
private GCHandle inHandle3;
private GCHandle outHandle;
private ulong alignment;
public DataTable(Int32[] inArray1, Int16[] inArray2, Int16[] inArray3, Int32[] outArray, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int32>();
int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int16>();
int sizeOfinArray3 = inArray3.Length * Unsafe.SizeOf<Int16>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int32>();
if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfinArray3 || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.inArray2 = new byte[alignment * 2];
this.inArray3 = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned);
this.inHandle3 = GCHandle.Alloc(this.inArray3, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int32, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int16, byte>(ref inArray2[0]), (uint)sizeOfinArray2);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray3Ptr), ref Unsafe.As<Int16, byte>(ref inArray3[0]), (uint)sizeOfinArray3);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray3Ptr => Align((byte*)(inHandle3.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
inHandle2.Free();
inHandle3.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector128<Int32> _fld1;
public Vector64<Int16> _fld2;
public Vector64<Int16> _fld3;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref testStruct._fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref testStruct._fld2), ref Unsafe.As<Int16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>());
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref testStruct._fld3), ref Unsafe.As<Int16, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>());
return testStruct;
}
public void RunStructFldScenario(SimpleTernaryOpTest__AbsoluteDifferenceWideningLowerAndAdd_Vector64_Int16 testClass)
{
var result = AdvSimd.AbsoluteDifferenceWideningLowerAndAdd(_fld1, _fld2, _fld3);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr);
}
public void RunStructFldScenario_Load(SimpleTernaryOpTest__AbsoluteDifferenceWideningLowerAndAdd_Vector64_Int16 testClass)
{
fixed (Vector128<Int32>* pFld1 = &_fld1)
fixed (Vector64<Int16>* pFld2 = &_fld2)
fixed (Vector64<Int16>* pFld3 = &_fld3)
{
var result = AdvSimd.AbsoluteDifferenceWideningLowerAndAdd(
AdvSimd.LoadVector128((Int32*)(pFld1)),
AdvSimd.LoadVector64((Int16*)(pFld2)),
AdvSimd.LoadVector64((Int16*)(pFld3))
);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr);
}
}
}
private static readonly int LargestVectorSize = 16;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32);
private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector64<Int16>>() / sizeof(Int16);
private static readonly int Op3ElementCount = Unsafe.SizeOf<Vector64<Int16>>() / sizeof(Int16);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32);
private static Int32[] _data1 = new Int32[Op1ElementCount];
private static Int16[] _data2 = new Int16[Op2ElementCount];
private static Int16[] _data3 = new Int16[Op3ElementCount];
private static Vector128<Int32> _clsVar1;
private static Vector64<Int16> _clsVar2;
private static Vector64<Int16> _clsVar3;
private Vector128<Int32> _fld1;
private Vector64<Int16> _fld2;
private Vector64<Int16> _fld3;
private DataTable _dataTable;
static SimpleTernaryOpTest__AbsoluteDifferenceWideningLowerAndAdd_Vector64_Int16()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _clsVar1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _clsVar2), ref Unsafe.As<Int16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>());
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _clsVar3), ref Unsafe.As<Int16, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>());
}
public SimpleTernaryOpTest__AbsoluteDifferenceWideningLowerAndAdd_Vector64_Int16()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _fld2), ref Unsafe.As<Int16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>());
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _fld3), ref Unsafe.As<Int16, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); }
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt16(); }
_dataTable = new DataTable(_data1, _data2, _data3, new Int32[RetElementCount], LargestVectorSize);
}
public bool IsSupported => AdvSimd.IsSupported;
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = AdvSimd.AbsoluteDifferenceWideningLowerAndAdd(
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector64<Int16>>(_dataTable.inArray2Ptr),
Unsafe.Read<Vector64<Int16>>(_dataTable.inArray3Ptr)
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunBasicScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load));
var result = AdvSimd.AbsoluteDifferenceWideningLowerAndAdd(
AdvSimd.LoadVector128((Int32*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector64((Int16*)(_dataTable.inArray2Ptr)),
AdvSimd.LoadVector64((Int16*)(_dataTable.inArray3Ptr))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.AbsoluteDifferenceWideningLowerAndAdd), new Type[] { typeof(Vector128<Int32>), typeof(Vector64<Int16>), typeof(Vector64<Int16>) })
.Invoke(null, new object[] {
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector64<Int16>>(_dataTable.inArray2Ptr),
Unsafe.Read<Vector64<Int16>>(_dataTable.inArray3Ptr)
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int32>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.AbsoluteDifferenceWideningLowerAndAdd), new Type[] { typeof(Vector128<Int32>), typeof(Vector64<Int16>), typeof(Vector64<Int16>) })
.Invoke(null, new object[] {
AdvSimd.LoadVector128((Int32*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector64((Int16*)(_dataTable.inArray2Ptr)),
AdvSimd.LoadVector64((Int16*)(_dataTable.inArray3Ptr))
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int32>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = AdvSimd.AbsoluteDifferenceWideningLowerAndAdd(
_clsVar1,
_clsVar2,
_clsVar3
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr);
}
public void RunClsVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load));
fixed (Vector128<Int32>* pClsVar1 = &_clsVar1)
fixed (Vector64<Int16>* pClsVar2 = &_clsVar2)
fixed (Vector64<Int16>* pClsVar3 = &_clsVar3)
{
var result = AdvSimd.AbsoluteDifferenceWideningLowerAndAdd(
AdvSimd.LoadVector128((Int32*)(pClsVar1)),
AdvSimd.LoadVector64((Int16*)(pClsVar2)),
AdvSimd.LoadVector64((Int16*)(pClsVar3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr);
}
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr);
var op2 = Unsafe.Read<Vector64<Int16>>(_dataTable.inArray2Ptr);
var op3 = Unsafe.Read<Vector64<Int16>>(_dataTable.inArray3Ptr);
var result = AdvSimd.AbsoluteDifferenceWideningLowerAndAdd(op1, op2, op3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, op3, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load));
var op1 = AdvSimd.LoadVector128((Int32*)(_dataTable.inArray1Ptr));
var op2 = AdvSimd.LoadVector64((Int16*)(_dataTable.inArray2Ptr));
var op3 = AdvSimd.LoadVector64((Int16*)(_dataTable.inArray3Ptr));
var result = AdvSimd.AbsoluteDifferenceWideningLowerAndAdd(op1, op2, op3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, op3, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new SimpleTernaryOpTest__AbsoluteDifferenceWideningLowerAndAdd_Vector64_Int16();
var result = AdvSimd.AbsoluteDifferenceWideningLowerAndAdd(test._fld1, test._fld2, test._fld3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load));
var test = new SimpleTernaryOpTest__AbsoluteDifferenceWideningLowerAndAdd_Vector64_Int16();
fixed (Vector128<Int32>* pFld1 = &test._fld1)
fixed (Vector64<Int16>* pFld2 = &test._fld2)
fixed (Vector64<Int16>* pFld3 = &test._fld3)
{
var result = AdvSimd.AbsoluteDifferenceWideningLowerAndAdd(
AdvSimd.LoadVector128((Int32*)(pFld1)),
AdvSimd.LoadVector64((Int16*)(pFld2)),
AdvSimd.LoadVector64((Int16*)(pFld3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = AdvSimd.AbsoluteDifferenceWideningLowerAndAdd(_fld1, _fld2, _fld3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr);
}
public void RunClassFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load));
fixed (Vector128<Int32>* pFld1 = &_fld1)
fixed (Vector64<Int16>* pFld2 = &_fld2)
fixed (Vector64<Int16>* pFld3 = &_fld3)
{
var result = AdvSimd.AbsoluteDifferenceWideningLowerAndAdd(
AdvSimd.LoadVector128((Int32*)(pFld1)),
AdvSimd.LoadVector64((Int16*)(pFld2)),
AdvSimd.LoadVector64((Int16*)(pFld3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr);
}
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = AdvSimd.AbsoluteDifferenceWideningLowerAndAdd(test._fld1, test._fld2, test._fld3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load));
var test = TestStruct.Create();
var result = AdvSimd.AbsoluteDifferenceWideningLowerAndAdd(
AdvSimd.LoadVector128((Int32*)(&test._fld1)),
AdvSimd.LoadVector64((Int16*)(&test._fld2)),
AdvSimd.LoadVector64((Int16*)(&test._fld3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
public void RunStructFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load));
var test = TestStruct.Create();
test.RunStructFldScenario_Load(this);
}
public void RunUnsupportedScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario));
bool succeeded = false;
try
{
RunBasicScenario_UnsafeRead();
}
catch (PlatformNotSupportedException)
{
succeeded = true;
}
if (!succeeded)
{
Succeeded = false;
}
}
private void ValidateResult(Vector128<Int32> op1, Vector64<Int16> op2, Vector64<Int16> op3, void* result, [CallerMemberName] string method = "")
{
Int32[] inArray1 = new Int32[Op1ElementCount];
Int16[] inArray2 = new Int16[Op2ElementCount];
Int16[] inArray3 = new Int16[Op3ElementCount];
Int32[] outArray = new Int32[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), op1);
Unsafe.WriteUnaligned(ref Unsafe.As<Int16, byte>(ref inArray2[0]), op2);
Unsafe.WriteUnaligned(ref Unsafe.As<Int16, byte>(ref inArray3[0]), op3);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int32>>());
ValidateResult(inArray1, inArray2, inArray3, outArray, method);
}
private void ValidateResult(void* op1, void* op2, void* op3, void* result, [CallerMemberName] string method = "")
{
Int32[] inArray1 = new Int32[Op1ElementCount];
Int16[] inArray2 = new Int16[Op2ElementCount];
Int16[] inArray3 = new Int16[Op3ElementCount];
Int32[] outArray = new Int32[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<Int32>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector64<Int16>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref inArray3[0]), ref Unsafe.AsRef<byte>(op3), (uint)Unsafe.SizeOf<Vector64<Int16>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int32>>());
ValidateResult(inArray1, inArray2, inArray3, outArray, method);
}
private void ValidateResult(Int32[] firstOp, Int16[] secondOp, Int16[] thirdOp, Int32[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
for (var i = 0; i < RetElementCount; i++)
{
if (Helpers.AbsoluteDifferenceWideningAndAdd(firstOp[i], secondOp[i], thirdOp[i]) != result[i])
{
succeeded = false;
break;
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.AbsoluteDifferenceWideningLowerAndAdd)}<Int32>(Vector128<Int32>, Vector64<Int16>, Vector64<Int16>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})");
TestLibrary.TestFramework.LogInformation($"secondOp: ({string.Join(", ", secondOp)})");
TestLibrary.TestFramework.LogInformation($" thirdOp: ({string.Join(", ", thirdOp)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
using System.Runtime.Intrinsics.Arm;
namespace JIT.HardwareIntrinsics.Arm
{
public static partial class Program
{
private static void AbsoluteDifferenceWideningLowerAndAdd_Vector64_Int16()
{
var test = new SimpleTernaryOpTest__AbsoluteDifferenceWideningLowerAndAdd_Vector64_Int16();
if (test.IsSupported)
{
// Validates basic functionality works, using Unsafe.Read
test.RunBasicScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates basic functionality works, using Load
test.RunBasicScenario_Load();
}
// Validates calling via reflection works, using Unsafe.Read
test.RunReflectionScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates calling via reflection works, using Load
test.RunReflectionScenario_Load();
}
// Validates passing a static member works
test.RunClsVarScenario();
if (AdvSimd.IsSupported)
{
// Validates passing a static member works, using pinning and Load
test.RunClsVarScenario_Load();
}
// Validates passing a local works, using Unsafe.Read
test.RunLclVarScenario_UnsafeRead();
if (AdvSimd.IsSupported)
{
// Validates passing a local works, using Load
test.RunLclVarScenario_Load();
}
// Validates passing the field of a local class works
test.RunClassLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local class works, using pinning and Load
test.RunClassLclFldScenario_Load();
}
// Validates passing an instance member of a class works
test.RunClassFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a class works, using pinning and Load
test.RunClassFldScenario_Load();
}
// Validates passing the field of a local struct works
test.RunStructLclFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing the field of a local struct works, using pinning and Load
test.RunStructLclFldScenario_Load();
}
// Validates passing an instance member of a struct works
test.RunStructFldScenario();
if (AdvSimd.IsSupported)
{
// Validates passing an instance member of a struct works, using pinning and Load
test.RunStructFldScenario_Load();
}
}
else
{
// Validates we throw on unsupported hardware
test.RunUnsupportedScenario();
}
if (!test.Succeeded)
{
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
public sealed unsafe class SimpleTernaryOpTest__AbsoluteDifferenceWideningLowerAndAdd_Vector64_Int16
{
private struct DataTable
{
private byte[] inArray1;
private byte[] inArray2;
private byte[] inArray3;
private byte[] outArray;
private GCHandle inHandle1;
private GCHandle inHandle2;
private GCHandle inHandle3;
private GCHandle outHandle;
private ulong alignment;
public DataTable(Int32[] inArray1, Int16[] inArray2, Int16[] inArray3, Int32[] outArray, int alignment)
{
int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int32>();
int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<Int16>();
int sizeOfinArray3 = inArray3.Length * Unsafe.SizeOf<Int16>();
int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int32>();
if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfinArray3 || (alignment * 2) < sizeOfoutArray)
{
throw new ArgumentException("Invalid value of alignment");
}
this.inArray1 = new byte[alignment * 2];
this.inArray2 = new byte[alignment * 2];
this.inArray3 = new byte[alignment * 2];
this.outArray = new byte[alignment * 2];
this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned);
this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned);
this.inHandle3 = GCHandle.Alloc(this.inArray3, GCHandleType.Pinned);
this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned);
this.alignment = (ulong)alignment;
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int32, byte>(ref inArray1[0]), (uint)sizeOfinArray1);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<Int16, byte>(ref inArray2[0]), (uint)sizeOfinArray2);
Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray3Ptr), ref Unsafe.As<Int16, byte>(ref inArray3[0]), (uint)sizeOfinArray3);
}
public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment);
public void* inArray3Ptr => Align((byte*)(inHandle3.AddrOfPinnedObject().ToPointer()), alignment);
public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment);
public void Dispose()
{
inHandle1.Free();
inHandle2.Free();
inHandle3.Free();
outHandle.Free();
}
private static unsafe void* Align(byte* buffer, ulong expectedAlignment)
{
return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1));
}
}
private struct TestStruct
{
public Vector128<Int32> _fld1;
public Vector64<Int16> _fld2;
public Vector64<Int16> _fld3;
public static TestStruct Create()
{
var testStruct = new TestStruct();
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref testStruct._fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref testStruct._fld2), ref Unsafe.As<Int16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>());
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref testStruct._fld3), ref Unsafe.As<Int16, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>());
return testStruct;
}
public void RunStructFldScenario(SimpleTernaryOpTest__AbsoluteDifferenceWideningLowerAndAdd_Vector64_Int16 testClass)
{
var result = AdvSimd.AbsoluteDifferenceWideningLowerAndAdd(_fld1, _fld2, _fld3);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr);
}
public void RunStructFldScenario_Load(SimpleTernaryOpTest__AbsoluteDifferenceWideningLowerAndAdd_Vector64_Int16 testClass)
{
fixed (Vector128<Int32>* pFld1 = &_fld1)
fixed (Vector64<Int16>* pFld2 = &_fld2)
fixed (Vector64<Int16>* pFld3 = &_fld3)
{
var result = AdvSimd.AbsoluteDifferenceWideningLowerAndAdd(
AdvSimd.LoadVector128((Int32*)(pFld1)),
AdvSimd.LoadVector64((Int16*)(pFld2)),
AdvSimd.LoadVector64((Int16*)(pFld3))
);
Unsafe.Write(testClass._dataTable.outArrayPtr, result);
testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr);
}
}
}
private static readonly int LargestVectorSize = 16;
private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32);
private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector64<Int16>>() / sizeof(Int16);
private static readonly int Op3ElementCount = Unsafe.SizeOf<Vector64<Int16>>() / sizeof(Int16);
private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32);
private static Int32[] _data1 = new Int32[Op1ElementCount];
private static Int16[] _data2 = new Int16[Op2ElementCount];
private static Int16[] _data3 = new Int16[Op3ElementCount];
private static Vector128<Int32> _clsVar1;
private static Vector64<Int16> _clsVar2;
private static Vector64<Int16> _clsVar3;
private Vector128<Int32> _fld1;
private Vector64<Int16> _fld2;
private Vector64<Int16> _fld3;
private DataTable _dataTable;
static SimpleTernaryOpTest__AbsoluteDifferenceWideningLowerAndAdd_Vector64_Int16()
{
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _clsVar1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _clsVar2), ref Unsafe.As<Int16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>());
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _clsVar3), ref Unsafe.As<Int16, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>());
}
public SimpleTernaryOpTest__AbsoluteDifferenceWideningLowerAndAdd_Vector64_Int16()
{
Succeeded = true;
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>());
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _fld2), ref Unsafe.As<Int16, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>());
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt16(); }
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _fld3), ref Unsafe.As<Int16, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>());
for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); }
for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetInt16(); }
for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt16(); }
_dataTable = new DataTable(_data1, _data2, _data3, new Int32[RetElementCount], LargestVectorSize);
}
public bool IsSupported => AdvSimd.IsSupported;
public bool Succeeded { get; set; }
public void RunBasicScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead));
var result = AdvSimd.AbsoluteDifferenceWideningLowerAndAdd(
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector64<Int16>>(_dataTable.inArray2Ptr),
Unsafe.Read<Vector64<Int16>>(_dataTable.inArray3Ptr)
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunBasicScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load));
var result = AdvSimd.AbsoluteDifferenceWideningLowerAndAdd(
AdvSimd.LoadVector128((Int32*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector64((Int16*)(_dataTable.inArray2Ptr)),
AdvSimd.LoadVector64((Int16*)(_dataTable.inArray3Ptr))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.AbsoluteDifferenceWideningLowerAndAdd), new Type[] { typeof(Vector128<Int32>), typeof(Vector64<Int16>), typeof(Vector64<Int16>) })
.Invoke(null, new object[] {
Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr),
Unsafe.Read<Vector64<Int16>>(_dataTable.inArray2Ptr),
Unsafe.Read<Vector64<Int16>>(_dataTable.inArray3Ptr)
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int32>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunReflectionScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load));
var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.AbsoluteDifferenceWideningLowerAndAdd), new Type[] { typeof(Vector128<Int32>), typeof(Vector64<Int16>), typeof(Vector64<Int16>) })
.Invoke(null, new object[] {
AdvSimd.LoadVector128((Int32*)(_dataTable.inArray1Ptr)),
AdvSimd.LoadVector64((Int16*)(_dataTable.inArray2Ptr)),
AdvSimd.LoadVector64((Int16*)(_dataTable.inArray3Ptr))
});
Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int32>)(result));
ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr);
}
public void RunClsVarScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario));
var result = AdvSimd.AbsoluteDifferenceWideningLowerAndAdd(
_clsVar1,
_clsVar2,
_clsVar3
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr);
}
public void RunClsVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load));
fixed (Vector128<Int32>* pClsVar1 = &_clsVar1)
fixed (Vector64<Int16>* pClsVar2 = &_clsVar2)
fixed (Vector64<Int16>* pClsVar3 = &_clsVar3)
{
var result = AdvSimd.AbsoluteDifferenceWideningLowerAndAdd(
AdvSimd.LoadVector128((Int32*)(pClsVar1)),
AdvSimd.LoadVector64((Int16*)(pClsVar2)),
AdvSimd.LoadVector64((Int16*)(pClsVar3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr);
}
}
public void RunLclVarScenario_UnsafeRead()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead));
var op1 = Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr);
var op2 = Unsafe.Read<Vector64<Int16>>(_dataTable.inArray2Ptr);
var op3 = Unsafe.Read<Vector64<Int16>>(_dataTable.inArray3Ptr);
var result = AdvSimd.AbsoluteDifferenceWideningLowerAndAdd(op1, op2, op3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, op3, _dataTable.outArrayPtr);
}
public void RunLclVarScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load));
var op1 = AdvSimd.LoadVector128((Int32*)(_dataTable.inArray1Ptr));
var op2 = AdvSimd.LoadVector64((Int16*)(_dataTable.inArray2Ptr));
var op3 = AdvSimd.LoadVector64((Int16*)(_dataTable.inArray3Ptr));
var result = AdvSimd.AbsoluteDifferenceWideningLowerAndAdd(op1, op2, op3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(op1, op2, op3, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario));
var test = new SimpleTernaryOpTest__AbsoluteDifferenceWideningLowerAndAdd_Vector64_Int16();
var result = AdvSimd.AbsoluteDifferenceWideningLowerAndAdd(test._fld1, test._fld2, test._fld3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
public void RunClassLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load));
var test = new SimpleTernaryOpTest__AbsoluteDifferenceWideningLowerAndAdd_Vector64_Int16();
fixed (Vector128<Int32>* pFld1 = &test._fld1)
fixed (Vector64<Int16>* pFld2 = &test._fld2)
fixed (Vector64<Int16>* pFld3 = &test._fld3)
{
var result = AdvSimd.AbsoluteDifferenceWideningLowerAndAdd(
AdvSimd.LoadVector128((Int32*)(pFld1)),
AdvSimd.LoadVector64((Int16*)(pFld2)),
AdvSimd.LoadVector64((Int16*)(pFld3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
}
public void RunClassFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario));
var result = AdvSimd.AbsoluteDifferenceWideningLowerAndAdd(_fld1, _fld2, _fld3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr);
}
public void RunClassFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load));
fixed (Vector128<Int32>* pFld1 = &_fld1)
fixed (Vector64<Int16>* pFld2 = &_fld2)
fixed (Vector64<Int16>* pFld3 = &_fld3)
{
var result = AdvSimd.AbsoluteDifferenceWideningLowerAndAdd(
AdvSimd.LoadVector128((Int32*)(pFld1)),
AdvSimd.LoadVector64((Int16*)(pFld2)),
AdvSimd.LoadVector64((Int16*)(pFld3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr);
}
}
public void RunStructLclFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario));
var test = TestStruct.Create();
var result = AdvSimd.AbsoluteDifferenceWideningLowerAndAdd(test._fld1, test._fld2, test._fld3);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
public void RunStructLclFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load));
var test = TestStruct.Create();
var result = AdvSimd.AbsoluteDifferenceWideningLowerAndAdd(
AdvSimd.LoadVector128((Int32*)(&test._fld1)),
AdvSimd.LoadVector64((Int16*)(&test._fld2)),
AdvSimd.LoadVector64((Int16*)(&test._fld3))
);
Unsafe.Write(_dataTable.outArrayPtr, result);
ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr);
}
public void RunStructFldScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario));
var test = TestStruct.Create();
test.RunStructFldScenario(this);
}
public void RunStructFldScenario_Load()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load));
var test = TestStruct.Create();
test.RunStructFldScenario_Load(this);
}
public void RunUnsupportedScenario()
{
TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario));
bool succeeded = false;
try
{
RunBasicScenario_UnsafeRead();
}
catch (PlatformNotSupportedException)
{
succeeded = true;
}
if (!succeeded)
{
Succeeded = false;
}
}
private void ValidateResult(Vector128<Int32> op1, Vector64<Int16> op2, Vector64<Int16> op3, void* result, [CallerMemberName] string method = "")
{
Int32[] inArray1 = new Int32[Op1ElementCount];
Int16[] inArray2 = new Int16[Op2ElementCount];
Int16[] inArray3 = new Int16[Op3ElementCount];
Int32[] outArray = new Int32[RetElementCount];
Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), op1);
Unsafe.WriteUnaligned(ref Unsafe.As<Int16, byte>(ref inArray2[0]), op2);
Unsafe.WriteUnaligned(ref Unsafe.As<Int16, byte>(ref inArray3[0]), op3);
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int32>>());
ValidateResult(inArray1, inArray2, inArray3, outArray, method);
}
private void ValidateResult(void* op1, void* op2, void* op3, void* result, [CallerMemberName] string method = "")
{
Int32[] inArray1 = new Int32[Op1ElementCount];
Int16[] inArray2 = new Int16[Op2ElementCount];
Int16[] inArray3 = new Int16[Op3ElementCount];
Int32[] outArray = new Int32[RetElementCount];
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<Int32>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector64<Int16>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref inArray3[0]), ref Unsafe.AsRef<byte>(op3), (uint)Unsafe.SizeOf<Vector64<Int16>>());
Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int32>>());
ValidateResult(inArray1, inArray2, inArray3, outArray, method);
}
private void ValidateResult(Int32[] firstOp, Int16[] secondOp, Int16[] thirdOp, Int32[] result, [CallerMemberName] string method = "")
{
bool succeeded = true;
for (var i = 0; i < RetElementCount; i++)
{
if (Helpers.AbsoluteDifferenceWideningAndAdd(firstOp[i], secondOp[i], thirdOp[i]) != result[i])
{
succeeded = false;
break;
}
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.AbsoluteDifferenceWideningLowerAndAdd)}<Int32>(Vector128<Int32>, Vector64<Int16>, Vector64<Int16>): {method} failed:");
TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})");
TestLibrary.TestFramework.LogInformation($"secondOp: ({string.Join(", ", secondOp)})");
TestLibrary.TestFramework.LogInformation($" thirdOp: ({string.Join(", ", thirdOp)})");
TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})");
TestLibrary.TestFramework.LogInformation(string.Empty);
Succeeded = false;
}
}
}
}
| -1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/tests/JIT/CodeGenBringUpTests/Ge1.cs
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
using System;
using System.Runtime.CompilerServices;
public class BringUpTest_Ge1
{
const int Pass = 100;
const int Fail = -1;
[MethodImplAttribute(MethodImplOptions.NoInlining)]
public static bool Ge1(int x)
{
return x >= 1;
}
public static int Main()
{
bool y = Ge1(1);
if (y == true) return Pass;
else return Fail;
}
}
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
using System;
using System.Runtime.CompilerServices;
public class BringUpTest_Ge1
{
const int Pass = 100;
const int Fail = -1;
[MethodImplAttribute(MethodImplOptions.NoInlining)]
public static bool Ge1(int x)
{
return x >= 1;
}
public static int Main()
{
bool y = Ge1(1);
if (y == true) return Pass;
else return Fail;
}
}
| -1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/libraries/System.Private.CoreLib/src/System/Collections/Generic/Comparer.cs
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Diagnostics.CodeAnalysis;
using System.Runtime.CompilerServices;
using System.Runtime.Serialization;
namespace System.Collections.Generic
{
[Serializable]
[TypeForwardedFrom("mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089")]
public abstract partial class Comparer<T> : IComparer, IComparer<T>
{
// public static Comparer<T> Default is runtime-specific
public static Comparer<T> Create(Comparison<T> comparison!!)
{
return new ComparisonComparer<T>(comparison);
}
public abstract int Compare(T? x, T? y);
int IComparer.Compare(object? x, object? y)
{
if (x == null) return y == null ? 0 : -1;
if (y == null) return 1;
if (x is T && y is T) return Compare((T)x, (T)y);
ThrowHelper.ThrowArgumentException(ExceptionResource.Argument_InvalidArgumentForComparison);
return 0;
}
}
internal sealed class ComparisonComparer<T> : Comparer<T>
{
private readonly Comparison<T> _comparison;
public ComparisonComparer(Comparison<T> comparison)
{
_comparison = comparison;
}
public override int Compare(T? x, T? y) => _comparison(x!, y!);
}
// Note: although there is a lot of shared code in the following
// comparers, we do not incorporate it into a base class for perf
// reasons. Adding another base class (even one with no fields)
// means another generic instantiation, which can be costly esp.
// for value types.
[Serializable]
[TypeForwardedFrom("mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089")]
// Needs to be public to support binary serialization compatibility
public sealed partial class GenericComparer<T> : Comparer<T> where T : IComparable<T>
{
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public override int Compare(T? x, T? y)
{
if (x != null)
{
if (y != null) return x.CompareTo(y);
return 1;
}
if (y != null) return -1;
return 0;
}
// Equals method for the comparer itself.
public override bool Equals([NotNullWhen(true)] object? obj) =>
obj != null && GetType() == obj.GetType();
public override int GetHashCode() =>
GetType().GetHashCode();
}
[Serializable]
[TypeForwardedFrom("mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089")]
// Needs to be public to support binary serialization compatibility
public sealed partial class NullableComparer<T> : Comparer<T?> where T : struct, IComparable<T>
{
public override int Compare(T? x, T? y)
{
if (x.HasValue)
{
if (y.HasValue) return x.value.CompareTo(y.value);
return 1;
}
if (y.HasValue) return -1;
return 0;
}
// Equals method for the comparer itself.
public override bool Equals([NotNullWhen(true)] object? obj) =>
obj != null && GetType() == obj.GetType();
public override int GetHashCode() =>
GetType().GetHashCode();
}
[Serializable]
[TypeForwardedFrom("mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089")]
// Needs to be public to support binary serialization compatibility
public sealed partial class ObjectComparer<T> : Comparer<T>
{
public override int Compare(T? x, T? y)
{
return System.Collections.Comparer.Default.Compare(x, y);
}
// Equals method for the comparer itself.
public override bool Equals([NotNullWhen(true)] object? obj) =>
obj != null && GetType() == obj.GetType();
public override int GetHashCode() =>
GetType().GetHashCode();
}
[Serializable]
internal sealed partial class EnumComparer<T> : Comparer<T>, ISerializable where T : struct, Enum
{
public EnumComparer() { }
// Used by the serialization engine.
private EnumComparer(SerializationInfo info, StreamingContext context) { }
// public override int Compare(T x, T y) is runtime-specific
// Equals method for the comparer itself.
public override bool Equals([NotNullWhen(true)] object? obj) =>
obj != null && GetType() == obj.GetType();
public override int GetHashCode() =>
GetType().GetHashCode();
public void GetObjectData(SerializationInfo info, StreamingContext context)
{
// Previously Comparer<T> was not specialized for enums,
// and instead fell back to ObjectComparer which uses boxing.
// Set the type as ObjectComparer here so code that serializes
// Comparer for enums will not break.
info.SetType(typeof(ObjectComparer<T>));
}
}
}
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Diagnostics.CodeAnalysis;
using System.Runtime.CompilerServices;
using System.Runtime.Serialization;
namespace System.Collections.Generic
{
[Serializable]
[TypeForwardedFrom("mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089")]
public abstract partial class Comparer<T> : IComparer, IComparer<T>
{
// public static Comparer<T> Default is runtime-specific
public static Comparer<T> Create(Comparison<T> comparison!!)
{
return new ComparisonComparer<T>(comparison);
}
public abstract int Compare(T? x, T? y);
int IComparer.Compare(object? x, object? y)
{
if (x == null) return y == null ? 0 : -1;
if (y == null) return 1;
if (x is T && y is T) return Compare((T)x, (T)y);
ThrowHelper.ThrowArgumentException(ExceptionResource.Argument_InvalidArgumentForComparison);
return 0;
}
}
internal sealed class ComparisonComparer<T> : Comparer<T>
{
private readonly Comparison<T> _comparison;
public ComparisonComparer(Comparison<T> comparison)
{
_comparison = comparison;
}
public override int Compare(T? x, T? y) => _comparison(x!, y!);
}
// Note: although there is a lot of shared code in the following
// comparers, we do not incorporate it into a base class for perf
// reasons. Adding another base class (even one with no fields)
// means another generic instantiation, which can be costly esp.
// for value types.
[Serializable]
[TypeForwardedFrom("mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089")]
// Needs to be public to support binary serialization compatibility
public sealed partial class GenericComparer<T> : Comparer<T> where T : IComparable<T>
{
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public override int Compare(T? x, T? y)
{
if (x != null)
{
if (y != null) return x.CompareTo(y);
return 1;
}
if (y != null) return -1;
return 0;
}
// Equals method for the comparer itself.
public override bool Equals([NotNullWhen(true)] object? obj) =>
obj != null && GetType() == obj.GetType();
public override int GetHashCode() =>
GetType().GetHashCode();
}
[Serializable]
[TypeForwardedFrom("mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089")]
// Needs to be public to support binary serialization compatibility
public sealed partial class NullableComparer<T> : Comparer<T?> where T : struct, IComparable<T>
{
public override int Compare(T? x, T? y)
{
if (x.HasValue)
{
if (y.HasValue) return x.value.CompareTo(y.value);
return 1;
}
if (y.HasValue) return -1;
return 0;
}
// Equals method for the comparer itself.
public override bool Equals([NotNullWhen(true)] object? obj) =>
obj != null && GetType() == obj.GetType();
public override int GetHashCode() =>
GetType().GetHashCode();
}
[Serializable]
[TypeForwardedFrom("mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089")]
// Needs to be public to support binary serialization compatibility
public sealed partial class ObjectComparer<T> : Comparer<T>
{
public override int Compare(T? x, T? y)
{
return System.Collections.Comparer.Default.Compare(x, y);
}
// Equals method for the comparer itself.
public override bool Equals([NotNullWhen(true)] object? obj) =>
obj != null && GetType() == obj.GetType();
public override int GetHashCode() =>
GetType().GetHashCode();
}
[Serializable]
internal sealed partial class EnumComparer<T> : Comparer<T>, ISerializable where T : struct, Enum
{
public EnumComparer() { }
// Used by the serialization engine.
private EnumComparer(SerializationInfo info, StreamingContext context) { }
// public override int Compare(T x, T y) is runtime-specific
// Equals method for the comparer itself.
public override bool Equals([NotNullWhen(true)] object? obj) =>
obj != null && GetType() == obj.GetType();
public override int GetHashCode() =>
GetType().GetHashCode();
public void GetObjectData(SerializationInfo info, StreamingContext context)
{
// Previously Comparer<T> was not specialized for enums,
// and instead fell back to ObjectComparer which uses boxing.
// Set the type as ObjectComparer here so code that serializes
// Comparer for enums will not break.
info.SetType(typeof(ObjectComparer<T>));
}
}
}
| -1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/tests/JIT/Directed/rvastatics/rvastatic3.il
|
.assembly extern mscorlib{}
.assembly extern xunit.core {}
.assembly rvastatic3{}
.class public A{
.method static native int Call1(int64) {.maxstack 50
ldarg.0
conv.i8
ldc.i8 2
mul
ldc.i4 1
shr.un
conv.i
conv.i
ret
}
.method static native int Call2(float64) {.maxstack 50
ldarg.0
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
conv.i
ret
}
.method static void V1() {.maxstack 50
ldsfld int64 [rvastatic3]A::a0100
ldc.i8 0
beq a0101
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0101:
ldsfld int64 [rvastatic3]A::a0101
ldc.i8 1
beq a0102
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0102:
ldsfld float32 [rvastatic3]A::a0102
ldc.r4 2.0
beq a0103
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0103:
ldsfld int64 [rvastatic3]A::a0103
ldc.i8 3
beq a0104
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0104:
ldsfld int64 [rvastatic3]A::a0104
ldc.i8 4
beq a0105
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0105:
ldsfld int16 [rvastatic3]A::a0105
ldc.i4 5
beq a0106
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0106:
ldsfld int16 [rvastatic3]A::a0106
ldc.i4 6
beq a0107
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0107:
ldsfld float32 [rvastatic3]A::a0107
ldc.r4 7.0
beq a0108
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0108:
ldsfld int32 [rvastatic3]A::a0108
ldc.i4 8
beq a0109
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0109:
ldsfld int8 [rvastatic3]A::a0109
ldc.i4 9
beq a01010
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01010:
ldsfld int32 [rvastatic3]A::a01010
ldc.i4 10
beq a01011
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01011:
ldsfld int64 [rvastatic3]A::a01011
ldc.i8 11
beq a01012
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01012:
ldsfld int32 [rvastatic3]A::a01012
ldc.i4 12
beq a01013
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01013:
ldsfld int8 [rvastatic3]A::a01013
ldc.i4 13
beq a01014
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01014:
ldsfld int16 [rvastatic3]A::a01014
ldc.i4 14
beq a01015
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01015:
ldsfld int16 [rvastatic3]A::a01015
ldc.i4 15
beq a01016
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01016:
ldsfld float32 [rvastatic3]A::a01016
ldc.r4 16.0
beq a01017
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01017:
ldsfld float32 [rvastatic3]A::a01017
ldc.r4 17.0
beq a01018
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01018:
ldsfld int32 [rvastatic3]A::a01018
ldc.i4 18
beq a01019
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01019:
ldsfld int8 [rvastatic3]A::a01019
ldc.i4 19
beq a01020
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01020:
ldsfld int32 [rvastatic3]A::a01020
ldc.i4 20
beq a01021
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01021:
ldsfld int32 [rvastatic3]A::a01021
ldc.i4 21
beq a01022
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01022:
ldsfld int64 [rvastatic3]A::a01022
ldc.i8 22
beq a01023
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01023:
ldsfld int32 [rvastatic3]A::a01023
ldc.i4 23
beq a01024
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01024:
ldsfld int8 [rvastatic3]A::a01024
ldc.i4 24
beq a01025
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01025:
ldsfld int8 [rvastatic3]A::a01025
ldc.i4 25
beq a01026
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01026:
ldsfld int16 [rvastatic3]A::a01026
ldc.i4 26
beq a01027
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01027:
ldsfld int8 [rvastatic3]A::a01027
ldc.i4 27
beq a01028
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01028:
ldsfld int16 [rvastatic3]A::a01028
ldc.i4 28
beq a01029
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01029:
ldsfld int64 [rvastatic3]A::a01029
ldc.i8 29
beq a01030
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01030:
ldsfld int32 [rvastatic3]A::a01030
ldc.i4 30
beq a01031
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01031:
ldsfld int32 [rvastatic3]A::a01031
ldc.i4 31
beq a01032
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01032:
ldsfld int32 [rvastatic3]A::a01032
ldc.i4 32
beq a01033
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01033:
ldsfld int8 [rvastatic3]A::a01033
ldc.i4 33
beq a01034
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01034:
ldsfld int16 [rvastatic3]A::a01034
ldc.i4 34
beq a01035
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01035:
ldsfld int32 [rvastatic3]A::a01035
ldc.i4 35
beq a01036
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01036:
ldsfld int32 [rvastatic3]A::a01036
ldc.i4 36
beq a01037
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01037:
ldsfld int16 [rvastatic3]A::a01037
ldc.i4 37
beq a01038
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01038:
ldsfld float32 [rvastatic3]A::a01038
ldc.r4 38.0
beq a01039
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01039:
ldsfld int8 [rvastatic3]A::a01039
ldc.i4 39
beq a01040
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01040:
ldsfld int8 [rvastatic3]A::a01040
ldc.i4 40
beq a01041
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01041:
ldsfld float32 [rvastatic3]A::a01041
ldc.r4 41.0
beq a01042
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01042:
ldsfld int32 [rvastatic3]A::a01042
ldc.i4 42
beq a01043
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01043:
ldsfld int32 [rvastatic3]A::a01043
ldc.i4 43
beq a01044
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01044:
ldsfld int32 [rvastatic3]A::a01044
ldc.i4 44
beq a01045
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01045:
ldsfld int64 [rvastatic3]A::a01045
ldc.i8 45
beq a01046
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01046:
ldsfld int64 [rvastatic3]A::a01046
ldc.i8 46
beq a01047
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01047:
ldsfld int64 [rvastatic3]A::a01047
ldc.i8 47
beq a01048
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01048:
ldsfld float32 [rvastatic3]A::a01048
ldc.r4 48.0
beq a01049
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01049:
ldsfld int64 [rvastatic3]A::a01049
ldc.i8 49
beq a01050
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01050:
ldsfld int32 [rvastatic3]A::a01050
ldc.i4 50
beq a01051
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01051:
ldsfld float32 [rvastatic3]A::a01051
ldc.r4 51.0
beq a01052
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01052:
ldsfld int32 [rvastatic3]A::a01052
ldc.i4 52
beq a01053
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01053:
ldsfld int64 [rvastatic3]A::a01053
ldc.i8 53
beq a01054
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01054:
ldsfld int8 [rvastatic3]A::a01054
ldc.i4 54
beq a01055
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01055:
ldsfld int8 [rvastatic3]A::a01055
ldc.i4 55
beq a01056
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01056:
ldsfld float32 [rvastatic3]A::a01056
ldc.r4 56.0
beq a01057
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01057:
ldsfld int32 [rvastatic3]A::a01057
ldc.i4 57
beq a01058
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01058:
ldsfld int64 [rvastatic3]A::a01058
ldc.i8 58
beq a01059
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01059:
ldsfld int64 [rvastatic3]A::a01059
ldc.i8 59
beq a01060
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01060:
ldsfld int8 [rvastatic3]A::a01060
ldc.i4 60
beq a01061
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01061:
ldsfld int16 [rvastatic3]A::a01061
ldc.i4 61
beq a01062
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01062:
ldsfld int64 [rvastatic3]A::a01062
ldc.i8 62
beq a01063
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01063:
ldsfld float32 [rvastatic3]A::a01063
ldc.r4 63.0
beq a01064
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01064:
ldsfld int64 [rvastatic3]A::a01064
ldc.i8 64
beq a01065
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01065:
ldsfld float32 [rvastatic3]A::a01065
ldc.r4 65.0
beq a01066
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01066:
ldsfld int8 [rvastatic3]A::a01066
ldc.i4 66
beq a01067
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01067:
ldsfld int16 [rvastatic3]A::a01067
ldc.i4 67
beq a01068
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01068:
ldsfld int64 [rvastatic3]A::a01068
ldc.i8 68
beq a01069
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01069:
ldsfld int64 [rvastatic3]A::a01069
ldc.i8 69
beq a01070
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01070:
ldsfld int64 [rvastatic3]A::a01070
ldc.i8 70
beq a01071
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01071:
ldsfld int32 [rvastatic3]A::a01071
ldc.i4 71
beq a01072
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01072:
ldsfld int8 [rvastatic3]A::a01072
ldc.i4 72
beq a01073
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01073:
ldsfld int32 [rvastatic3]A::a01073
ldc.i4 73
beq a01074
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01074:
ldsfld float32 [rvastatic3]A::a01074
ldc.r4 74.0
beq a01075
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01075:
ldsfld int64 [rvastatic3]A::a01075
ldc.i8 75
beq a01076
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01076:
ldsfld int8 [rvastatic3]A::a01076
ldc.i4 76
beq a01077
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01077:
ldsfld int8 [rvastatic3]A::a01077
ldc.i4 77
beq a01078
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01078:
ldsfld int8 [rvastatic3]A::a01078
ldc.i4 78
beq a01079
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01079:
ldsfld int32 [rvastatic3]A::a01079
ldc.i4 79
beq a01080
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01080:
ldsfld int16 [rvastatic3]A::a01080
ldc.i4 80
beq a01081
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01081:
ldsfld int32 [rvastatic3]A::a01081
ldc.i4 81
beq a01082
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01082:
ldsfld int8 [rvastatic3]A::a01082
ldc.i4 82
beq a01083
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01083:
ldsfld int32 [rvastatic3]A::a01083
ldc.i4 83
beq a01084
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01084:
ldsfld int8 [rvastatic3]A::a01084
ldc.i4 84
beq a01085
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01085:
ldsfld int32 [rvastatic3]A::a01085
ldc.i4 85
beq a01086
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01086:
ldsfld int8 [rvastatic3]A::a01086
ldc.i4 86
beq a01087
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01087:
ldsfld int64 [rvastatic3]A::a01087
ldc.i8 87
beq a01088
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01088:
ldsfld int8 [rvastatic3]A::a01088
ldc.i4 88
beq a01089
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01089:
ldsfld int16 [rvastatic3]A::a01089
ldc.i4 89
beq a01090
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01090:
ldsfld int64 [rvastatic3]A::a01090
ldc.i8 90
beq a01091
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01091:
ldsfld int8 [rvastatic3]A::a01091
ldc.i4 91
beq a01092
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01092:
ldsfld int64 [rvastatic3]A::a01092
ldc.i8 92
beq a01093
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01093:
ldsfld int16 [rvastatic3]A::a01093
ldc.i4 93
beq a01094
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01094:
ldsfld int8 [rvastatic3]A::a01094
ldc.i4 94
beq a01095
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01095:
ldsfld float32 [rvastatic3]A::a01095
ldc.r4 95.0
beq a01096
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01096:
ldsfld int16 [rvastatic3]A::a01096
ldc.i4 96
beq a01097
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01097:
ldsfld int64 [rvastatic3]A::a01097
ldc.i8 97
beq a01098
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01098:
ldsfld float32 [rvastatic3]A::a01098
ldc.r4 98.0
beq a01099
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01099:
ldsfld int32 [rvastatic3]A::a01099
ldc.i4 99
beq a010100
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010100:
ldsfld int32 [rvastatic3]A::a010100
ldc.i4 100
beq a010101
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010101:
ldsfld int32 [rvastatic3]A::a010101
ldc.i4 101
beq a010102
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010102:
ldsfld int8 [rvastatic3]A::a010102
ldc.i4 102
beq a010103
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010103:
ldsfld int16 [rvastatic3]A::a010103
ldc.i4 103
beq a010104
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010104:
ldsfld int8 [rvastatic3]A::a010104
ldc.i4 104
beq a010105
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010105:
ldsfld float32 [rvastatic3]A::a010105
ldc.r4 105.0
beq a010106
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010106:
ldsfld float32 [rvastatic3]A::a010106
ldc.r4 106.0
beq a010107
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010107:
ldsfld int8 [rvastatic3]A::a010107
ldc.i4 107
beq a010108
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010108:
ldsfld float32 [rvastatic3]A::a010108
ldc.r4 108.0
beq a010109
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010109:
ldsfld int16 [rvastatic3]A::a010109
ldc.i4 109
beq a010110
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010110:
ldsfld int8 [rvastatic3]A::a010110
ldc.i4 110
beq a010111
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010111:
ldsfld int64 [rvastatic3]A::a010111
ldc.i8 111
beq a010112
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010112:
ldsfld int16 [rvastatic3]A::a010112
ldc.i4 112
beq a010113
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010113:
ldsfld int16 [rvastatic3]A::a010113
ldc.i4 113
beq a010114
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010114:
ldsfld int32 [rvastatic3]A::a010114
ldc.i4 114
beq a010115
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010115:
ldsfld int32 [rvastatic3]A::a010115
ldc.i4 115
beq a010116
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010116:
ldsfld int8 [rvastatic3]A::a010116
ldc.i4 116
beq a010117
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010117:
ldsfld int16 [rvastatic3]A::a010117
ldc.i4 117
beq a010118
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010118:
ldsfld int64 [rvastatic3]A::a010118
ldc.i8 118
beq a010119
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010119:
ldsfld int8 [rvastatic3]A::a010119
ldc.i4 119
beq a010120
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010120:
ldsfld int64 [rvastatic3]A::a010120
ldc.i8 120
beq a010121
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010121:
ldsfld int32 [rvastatic3]A::a010121
ldc.i4 121
beq a010122
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010122:
ldsfld int8 [rvastatic3]A::a010122
ldc.i4 2
beq a010123
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010123:
ldsfld int32 [rvastatic3]A::a010123
ldc.i4 123
beq a010124
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010124:
ldsfld int8 [rvastatic3]A::a010124
ldc.i4 4
beq a010125
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010125:
ldsfld int64 [rvastatic3]A::a010125
ldc.i8 125
beq a010126
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010126:
ldsfld float32 [rvastatic3]A::a010126
ldc.r4 126.0
beq a010127
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010127:
ldsfld int64 [rvastatic3]A::a010127
ldc.i8 127
beq a010128
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010128:
ret}
.method static void V2() {.maxstack 50
ldsflda int64 [rvastatic3]A::a0100
ldind.i8
ldc.i8 0
beq a0100
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0100:
ldsflda int64 [rvastatic3]A::a0101
ldind.i8
ldc.i8 1
beq a0101
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0101:
ldsflda float32 [rvastatic3]A::a0102
ldind.r4
ldc.r4 2.0
beq a0102
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0102:
ldsflda int64 [rvastatic3]A::a0103
ldind.i8
ldc.i8 3
beq a0103
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0103:
ldsflda int64 [rvastatic3]A::a0104
ldind.i8
ldc.i8 4
beq a0104
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0104:
ldsflda int16 [rvastatic3]A::a0105
ldind.i2
ldc.i4 5
beq a0105
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0105:
ldsflda int16 [rvastatic3]A::a0106
ldind.i2
ldc.i4 6
beq a0106
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0106:
ldsflda float32 [rvastatic3]A::a0107
ldind.r4
ldc.r4 7.0
beq a0107
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0107:
ldsflda int32 [rvastatic3]A::a0108
ldind.i4
ldc.i4 8
beq a0108
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0108:
ldsflda int8 [rvastatic3]A::a0109
ldind.i1
ldc.i4 9
beq a0109
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0109:
ldsflda int32 [rvastatic3]A::a01010
ldind.i4
ldc.i4 10
beq a01010
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01010:
ldsflda int64 [rvastatic3]A::a01011
ldind.i8
ldc.i8 11
beq a01011
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01011:
ldsflda int32 [rvastatic3]A::a01012
ldind.i4
ldc.i4 12
beq a01012
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01012:
ldsflda int8 [rvastatic3]A::a01013
ldind.i1
ldc.i4 13
beq a01013
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01013:
ldsflda int16 [rvastatic3]A::a01014
ldind.i2
ldc.i4 14
beq a01014
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01014:
ldsflda int16 [rvastatic3]A::a01015
ldind.i2
ldc.i4 15
beq a01015
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01015:
ldsflda float32 [rvastatic3]A::a01016
ldind.r4
ldc.r4 16.0
beq a01016
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01016:
ldsflda float32 [rvastatic3]A::a01017
ldind.r4
ldc.r4 17.0
beq a01017
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01017:
ldsflda int32 [rvastatic3]A::a01018
ldind.i4
ldc.i4 18
beq a01018
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01018:
ldsflda int8 [rvastatic3]A::a01019
ldind.i1
ldc.i4 19
beq a01019
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01019:
ldsflda int32 [rvastatic3]A::a01020
ldind.i4
ldc.i4 20
beq a01020
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01020:
ldsflda int32 [rvastatic3]A::a01021
ldind.i4
ldc.i4 21
beq a01021
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01021:
ldsflda int64 [rvastatic3]A::a01022
ldind.i8
ldc.i8 22
beq a01022
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01022:
ldsflda int32 [rvastatic3]A::a01023
ldind.i4
ldc.i4 23
beq a01023
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01023:
ldsflda int8 [rvastatic3]A::a01024
ldind.i1
ldc.i4 24
beq a01024
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01024:
ldsflda int8 [rvastatic3]A::a01025
ldind.i1
ldc.i4 25
beq a01025
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01025:
ldsflda int16 [rvastatic3]A::a01026
ldind.i2
ldc.i4 26
beq a01026
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01026:
ldsflda int8 [rvastatic3]A::a01027
ldind.i1
ldc.i4 27
beq a01027
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01027:
ldsflda int16 [rvastatic3]A::a01028
ldind.i2
ldc.i4 28
beq a01028
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01028:
ldsflda int64 [rvastatic3]A::a01029
ldind.i8
ldc.i8 29
beq a01029
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01029:
ldsflda int32 [rvastatic3]A::a01030
ldind.i4
ldc.i4 30
beq a01030
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01030:
ldsflda int32 [rvastatic3]A::a01031
ldind.i4
ldc.i4 31
beq a01031
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01031:
ldsflda int32 [rvastatic3]A::a01032
ldind.i4
ldc.i4 32
beq a01032
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01032:
ldsflda int8 [rvastatic3]A::a01033
ldind.i1
ldc.i4 33
beq a01033
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01033:
ldsflda int16 [rvastatic3]A::a01034
ldind.i2
ldc.i4 34
beq a01034
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01034:
ldsflda int32 [rvastatic3]A::a01035
ldind.i4
ldc.i4 35
beq a01035
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01035:
ldsflda int32 [rvastatic3]A::a01036
ldind.i4
ldc.i4 36
beq a01036
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01036:
ldsflda int16 [rvastatic3]A::a01037
ldind.i2
ldc.i4 37
beq a01037
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01037:
ldsflda float32 [rvastatic3]A::a01038
ldind.r4
ldc.r4 38.0
beq a01038
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01038:
ldsflda int8 [rvastatic3]A::a01039
ldind.i1
ldc.i4 39
beq a01039
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01039:
ldsflda int8 [rvastatic3]A::a01040
ldind.i1
ldc.i4 40
beq a01040
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01040:
ldsflda float32 [rvastatic3]A::a01041
ldind.r4
ldc.r4 41.0
beq a01041
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01041:
ldsflda int32 [rvastatic3]A::a01042
ldind.i4
ldc.i4 42
beq a01042
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01042:
ldsflda int32 [rvastatic3]A::a01043
ldind.i4
ldc.i4 43
beq a01043
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01043:
ldsflda int32 [rvastatic3]A::a01044
ldind.i4
ldc.i4 44
beq a01044
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01044:
ldsflda int64 [rvastatic3]A::a01045
ldind.i8
ldc.i8 45
beq a01045
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01045:
ldsflda int64 [rvastatic3]A::a01046
ldind.i8
ldc.i8 46
beq a01046
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01046:
ldsflda int64 [rvastatic3]A::a01047
ldind.i8
ldc.i8 47
beq a01047
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01047:
ldsflda float32 [rvastatic3]A::a01048
ldind.r4
ldc.r4 48.0
beq a01048
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01048:
ldsflda int64 [rvastatic3]A::a01049
ldind.i8
ldc.i8 49
beq a01049
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01049:
ldsflda int32 [rvastatic3]A::a01050
ldind.i4
ldc.i4 50
beq a01050
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01050:
ldsflda float32 [rvastatic3]A::a01051
ldind.r4
ldc.r4 51.0
beq a01051
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01051:
ldsflda int32 [rvastatic3]A::a01052
ldind.i4
ldc.i4 52
beq a01052
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01052:
ldsflda int64 [rvastatic3]A::a01053
ldind.i8
ldc.i8 53
beq a01053
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01053:
ldsflda int8 [rvastatic3]A::a01054
ldind.i1
ldc.i4 54
beq a01054
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01054:
ldsflda int8 [rvastatic3]A::a01055
ldind.i1
ldc.i4 55
beq a01055
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01055:
ldsflda float32 [rvastatic3]A::a01056
ldind.r4
ldc.r4 56.0
beq a01056
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01056:
ldsflda int32 [rvastatic3]A::a01057
ldind.i4
ldc.i4 57
beq a01057
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01057:
ldsflda int64 [rvastatic3]A::a01058
ldind.i8
ldc.i8 58
beq a01058
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01058:
ldsflda int64 [rvastatic3]A::a01059
ldind.i8
ldc.i8 59
beq a01059
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01059:
ldsflda int8 [rvastatic3]A::a01060
ldind.i1
ldc.i4 60
beq a01060
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01060:
ldsflda int16 [rvastatic3]A::a01061
ldind.i2
ldc.i4 61
beq a01061
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01061:
ldsflda int64 [rvastatic3]A::a01062
ldind.i8
ldc.i8 62
beq a01062
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01062:
ldsflda float32 [rvastatic3]A::a01063
ldind.r4
ldc.r4 63.0
beq a01063
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01063:
ldsflda int64 [rvastatic3]A::a01064
ldind.i8
ldc.i8 64
beq a01064
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01064:
ldsflda float32 [rvastatic3]A::a01065
ldind.r4
ldc.r4 65.0
beq a01065
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01065:
ldsflda int8 [rvastatic3]A::a01066
ldind.i1
ldc.i4 66
beq a01066
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01066:
ldsflda int16 [rvastatic3]A::a01067
ldind.i2
ldc.i4 67
beq a01067
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01067:
ldsflda int64 [rvastatic3]A::a01068
ldind.i8
ldc.i8 68
beq a01068
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01068:
ldsflda int64 [rvastatic3]A::a01069
ldind.i8
ldc.i8 69
beq a01069
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01069:
ldsflda int64 [rvastatic3]A::a01070
ldind.i8
ldc.i8 70
beq a01070
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01070:
ldsflda int32 [rvastatic3]A::a01071
ldind.i4
ldc.i4 71
beq a01071
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01071:
ldsflda int8 [rvastatic3]A::a01072
ldind.i1
ldc.i4 72
beq a01072
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01072:
ldsflda int32 [rvastatic3]A::a01073
ldind.i4
ldc.i4 73
beq a01073
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01073:
ldsflda float32 [rvastatic3]A::a01074
ldind.r4
ldc.r4 74.0
beq a01074
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01074:
ldsflda int64 [rvastatic3]A::a01075
ldind.i8
ldc.i8 75
beq a01075
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01075:
ldsflda int8 [rvastatic3]A::a01076
ldind.i1
ldc.i4 76
beq a01076
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01076:
ldsflda int8 [rvastatic3]A::a01077
ldind.i1
ldc.i4 77
beq a01077
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01077:
ldsflda int8 [rvastatic3]A::a01078
ldind.i1
ldc.i4 78
beq a01078
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01078:
ldsflda int32 [rvastatic3]A::a01079
ldind.i4
ldc.i4 79
beq a01079
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01079:
ldsflda int16 [rvastatic3]A::a01080
ldind.i2
ldc.i4 80
beq a01080
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01080:
ldsflda int32 [rvastatic3]A::a01081
ldind.i4
ldc.i4 81
beq a01081
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01081:
ldsflda int8 [rvastatic3]A::a01082
ldind.i1
ldc.i4 82
beq a01082
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01082:
ldsflda int32 [rvastatic3]A::a01083
ldind.i4
ldc.i4 83
beq a01083
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01083:
ldsflda int8 [rvastatic3]A::a01084
ldind.i1
ldc.i4 84
beq a01084
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01084:
ldsflda int32 [rvastatic3]A::a01085
ldind.i4
ldc.i4 85
beq a01085
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01085:
ldsflda int8 [rvastatic3]A::a01086
ldind.i1
ldc.i4 86
beq a01086
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01086:
ldsflda int64 [rvastatic3]A::a01087
ldind.i8
ldc.i8 87
beq a01087
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01087:
ldsflda int8 [rvastatic3]A::a01088
ldind.i1
ldc.i4 88
beq a01088
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01088:
ldsflda int16 [rvastatic3]A::a01089
ldind.i2
ldc.i4 89
beq a01089
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01089:
ldsflda int64 [rvastatic3]A::a01090
ldind.i8
ldc.i8 90
beq a01090
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01090:
ldsflda int8 [rvastatic3]A::a01091
ldind.i1
ldc.i4 91
beq a01091
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01091:
ldsflda int64 [rvastatic3]A::a01092
ldind.i8
ldc.i8 92
beq a01092
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01092:
ldsflda int16 [rvastatic3]A::a01093
ldind.i2
ldc.i4 93
beq a01093
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01093:
ldsflda int8 [rvastatic3]A::a01094
ldind.i1
ldc.i4 94
beq a01094
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01094:
ldsflda float32 [rvastatic3]A::a01095
ldind.r4
ldc.r4 95.0
beq a01095
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01095:
ldsflda int16 [rvastatic3]A::a01096
ldind.i2
ldc.i4 96
beq a01096
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01096:
ldsflda int64 [rvastatic3]A::a01097
ldind.i8
ldc.i8 97
beq a01097
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01097:
ldsflda float32 [rvastatic3]A::a01098
ldind.r4
ldc.r4 98.0
beq a01098
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01098:
ldsflda int32 [rvastatic3]A::a01099
ldind.i4
ldc.i4 99
beq a01099
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01099:
ldsflda int32 [rvastatic3]A::a010100
ldind.i4
ldc.i4 100
beq a010100
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010100:
ldsflda int32 [rvastatic3]A::a010101
ldind.i4
ldc.i4 101
beq a010101
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010101:
ldsflda int8 [rvastatic3]A::a010102
ldind.i1
ldc.i4 102
beq a010102
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010102:
ldsflda int16 [rvastatic3]A::a010103
ldind.i2
ldc.i4 103
beq a010103
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010103:
ldsflda int8 [rvastatic3]A::a010104
ldind.i1
ldc.i4 104
beq a010104
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010104:
ldsflda float32 [rvastatic3]A::a010105
ldind.r4
ldc.r4 105.0
beq a010105
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010105:
ldsflda float32 [rvastatic3]A::a010106
ldind.r4
ldc.r4 106.0
beq a010106
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010106:
ldsflda int8 [rvastatic3]A::a010107
ldind.i1
ldc.i4 107
beq a010107
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010107:
ldsflda float32 [rvastatic3]A::a010108
ldind.r4
ldc.r4 108.0
beq a010108
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010108:
ldsflda int16 [rvastatic3]A::a010109
ldind.i2
ldc.i4 109
beq a010109
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010109:
ldsflda int8 [rvastatic3]A::a010110
ldind.i1
ldc.i4 110
beq a010110
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010110:
ldsflda int64 [rvastatic3]A::a010111
ldind.i8
ldc.i8 111
beq a010111
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010111:
ldsflda int16 [rvastatic3]A::a010112
ldind.i2
ldc.i4 112
beq a010112
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010112:
ldsflda int16 [rvastatic3]A::a010113
ldind.i2
ldc.i4 113
beq a010113
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010113:
ldsflda int32 [rvastatic3]A::a010114
ldind.i4
ldc.i4 114
beq a010114
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010114:
ldsflda int32 [rvastatic3]A::a010115
ldind.i4
ldc.i4 115
beq a010115
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010115:
ldsflda int8 [rvastatic3]A::a010116
ldind.i1
ldc.i4 116
beq a010116
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010116:
ldsflda int16 [rvastatic3]A::a010117
ldind.i2
ldc.i4 117
beq a010117
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010117:
ldsflda int64 [rvastatic3]A::a010118
ldind.i8
ldc.i8 118
beq a010118
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010118:
ldsflda int8 [rvastatic3]A::a010119
ldind.i1
ldc.i4 119
beq a010119
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010119:
ldsflda int64 [rvastatic3]A::a010120
ldind.i8
ldc.i8 120
beq a010120
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010120:
ldsflda int32 [rvastatic3]A::a010121
ldind.i4
ldc.i4 121
beq a010121
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010121:
ldsflda int8 [rvastatic3]A::a010122
ldind.i1
ldc.i4 2
beq a010122
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010122:
ldsflda int32 [rvastatic3]A::a010123
ldind.i4
ldc.i4 123
beq a010123
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010123:
ldsflda int8 [rvastatic3]A::a010124
ldind.i1
ldc.i4 4
beq a010124
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010124:
ldsflda int64 [rvastatic3]A::a010125
ldind.i8
ldc.i8 125
beq a010125
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010125:
ldsflda float32 [rvastatic3]A::a010126
ldind.r4
ldc.r4 126.0
beq a010126
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010126:
ldsflda int64 [rvastatic3]A::a010127
ldind.i8
ldc.i8 127
beq a010127
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010127:
ret}
.method static void V3() {.maxstack 50
ldsfld int32 [rvastatic3]A::a01044
ldc.i4 44
beq a010129
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010129:
ldsfld float32 [rvastatic3]A::a01056
ldc.r4 56.0
beq a010130
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010130:
ldsfld int16 [rvastatic3]A::a01028
ldc.i4 28
beq a010131
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010131:
ldsfld int8 [rvastatic3]A::a01066
ldc.i4 66
beq a010132
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010132:
ldsfld int64 [rvastatic3]A::a010125
ldc.i8 125
beq a010133
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010133:
ldsfld int16 [rvastatic3]A::a010109
ldc.i4 109
beq a010134
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010134:
ldsfld int64 [rvastatic3]A::a01058
ldc.i8 58
beq a010135
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010135:
ldsfld int8 [rvastatic3]A::a010107
ldc.i4 107
beq a010136
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010136:
ldsfld int64 [rvastatic3]A::a01047
ldc.i8 47
beq a010137
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010137:
ldsfld int32 [rvastatic3]A::a01030
ldc.i4 30
beq a010138
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010138:
ldsfld int16 [rvastatic3]A::a01067
ldc.i4 67
beq a010139
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010139:
ldsfld int32 [rvastatic3]A::a01012
ldc.i4 12
beq a010140
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010140:
ldsfld int32 [rvastatic3]A::a010100
ldc.i4 100
beq a010141
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010141:
ldsfld int32 [rvastatic3]A::a010100
ldc.i4 100
beq a010142
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010142:
ldsfld int8 [rvastatic3]A::a01019
ldc.i4 19
beq a010143
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010143:
ldsfld int16 [rvastatic3]A::a010103
ldc.i4 103
beq a010144
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010144:
ldsfld int16 [rvastatic3]A::a01093
ldc.i4 93
beq a010145
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010145:
ldsfld int64 [rvastatic3]A::a01058
ldc.i8 58
beq a010146
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010146:
ldsfld float32 [rvastatic3]A::a010106
ldc.r4 106.0
beq a010147
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010147:
ldsfld int16 [rvastatic3]A::a01061
ldc.i4 61
beq a010148
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010148:
ldsfld int16 [rvastatic3]A::a010103
ldc.i4 103
beq a010149
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010149:
ldsfld float32 [rvastatic3]A::a0102
ldc.r4 2.0
beq a010150
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010150:
ldsfld int16 [rvastatic3]A::a010109
ldc.i4 109
beq a010151
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010151:
ldsfld float32 [rvastatic3]A::a010105
ldc.r4 105.0
beq a010152
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010152:
ldsfld int64 [rvastatic3]A::a01097
ldc.i8 97
beq a010153
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010153:
ldsfld int32 [rvastatic3]A::a010114
ldc.i4 114
beq a010154
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010154:
ldsfld int32 [rvastatic3]A::a01043
ldc.i4 43
beq a010155
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010155:
ldsfld int32 [rvastatic3]A::a01073
ldc.i4 73
beq a010156
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010156:
ldsfld float32 [rvastatic3]A::a0102
ldc.r4 2.0
beq a010157
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010157:
ldsfld int64 [rvastatic3]A::a01046
ldc.i8 46
beq a010158
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010158:
ldsfld int16 [rvastatic3]A::a01015
ldc.i4 15
beq a010159
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010159:
ldsfld int32 [rvastatic3]A::a01023
ldc.i4 23
beq a010160
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010160:
ldsfld int64 [rvastatic3]A::a01068
ldc.i8 68
beq a010161
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010161:
ldsfld int64 [rvastatic3]A::a010120
ldc.i8 120
beq a010162
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010162:
ldsfld int16 [rvastatic3]A::a0106
ldc.i4 6
beq a010163
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010163:
ldsfld int32 [rvastatic3]A::a01079
ldc.i4 79
beq a010164
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010164:
ldsfld int32 [rvastatic3]A::a01073
ldc.i4 73
beq a010165
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010165:
ldsfld int8 [rvastatic3]A::a010104
ldc.i4 104
beq a010166
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010166:
ldsfld int8 [rvastatic3]A::a01033
ldc.i4 33
beq a010167
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010167:
ldsfld int32 [rvastatic3]A::a01071
ldc.i4 71
beq a010168
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010168:
ldsfld int32 [rvastatic3]A::a0108
ldc.i4 8
beq a010169
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010169:
ldsfld float32 [rvastatic3]A::a01041
ldc.r4 41.0
beq a010170
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010170:
ldsfld int16 [rvastatic3]A::a010112
ldc.i4 112
beq a010171
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010171:
ldsfld float32 [rvastatic3]A::a01065
ldc.r4 65.0
beq a010172
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010172:
ldsfld int8 [rvastatic3]A::a01078
ldc.i4 78
beq a010173
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010173:
ldsfld int16 [rvastatic3]A::a01089
ldc.i4 89
beq a010174
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010174:
ldsfld float32 [rvastatic3]A::a01095
ldc.r4 95.0
beq a010175
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010175:
ldsfld int8 [rvastatic3]A::a01039
ldc.i4 39
beq a010176
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010176:
ldsfld int32 [rvastatic3]A::a01043
ldc.i4 43
beq a010177
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010177:
ldsfld float32 [rvastatic3]A::a01048
ldc.r4 48.0
beq a010178
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010178:
ldsfld int8 [rvastatic3]A::a010122
ldc.i4 2
beq a010179
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010179:
ldsfld int64 [rvastatic3]A::a01047
ldc.i8 47
beq a010180
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010180:
ldsfld int8 [rvastatic3]A::a010119
ldc.i4 119
beq a010181
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010181:
ldsfld float32 [rvastatic3]A::a010126
ldc.r4 126.0
beq a010182
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010182:
ldsfld float32 [rvastatic3]A::a01017
ldc.r4 17.0
beq a010183
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010183:
ldsfld float32 [rvastatic3]A::a01041
ldc.r4 41.0
beq a010184
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010184:
ldsfld int64 [rvastatic3]A::a01075
ldc.i8 75
beq a010185
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010185:
ldsfld int32 [rvastatic3]A::a01050
ldc.i4 50
beq a010186
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010186:
ldsfld int16 [rvastatic3]A::a01096
ldc.i4 96
beq a010187
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010187:
ldsfld int32 [rvastatic3]A::a01010
ldc.i4 10
beq a010188
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010188:
ldsfld int8 [rvastatic3]A::a01066
ldc.i4 66
beq a010189
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010189:
ldsfld int16 [rvastatic3]A::a010113
ldc.i4 113
beq a010190
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010190:
ldsfld int8 [rvastatic3]A::a010104
ldc.i4 104
beq a010191
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010191:
ldsfld int64 [rvastatic3]A::a0101
ldc.i8 1
beq a010192
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010192:
ldsfld int16 [rvastatic3]A::a01015
ldc.i4 15
beq a010193
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010193:
ldsfld int32 [rvastatic3]A::a01043
ldc.i4 43
beq a010194
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010194:
ldsfld int32 [rvastatic3]A::a01071
ldc.i4 71
beq a010195
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010195:
ldsfld int8 [rvastatic3]A::a010107
ldc.i4 107
beq a010196
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010196:
ldsfld int8 [rvastatic3]A::a01094
ldc.i4 94
beq a010197
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010197:
ldsfld int64 [rvastatic3]A::a01068
ldc.i8 68
beq a010198
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010198:
ldsfld int64 [rvastatic3]A::a01029
ldc.i8 29
beq a010199
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010199:
ldsfld int8 [rvastatic3]A::a010116
ldc.i4 116
beq a010200
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010200:
ldsfld int8 [rvastatic3]A::a01025
ldc.i4 25
beq a010201
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010201:
ldsfld int16 [rvastatic3]A::a01034
ldc.i4 34
beq a010202
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010202:
ldsfld int32 [rvastatic3]A::a01052
ldc.i4 52
beq a010203
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010203:
ldsfld int16 [rvastatic3]A::a01061
ldc.i4 61
beq a010204
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010204:
ldsfld int32 [rvastatic3]A::a01018
ldc.i4 18
beq a010205
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010205:
ldsfld int32 [rvastatic3]A::a01043
ldc.i4 43
beq a010206
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010206:
ldsfld int16 [rvastatic3]A::a01026
ldc.i4 26
beq a010207
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010207:
ldsfld float32 [rvastatic3]A::a0107
ldc.r4 7.0
beq a010208
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010208:
ldsfld int32 [rvastatic3]A::a01018
ldc.i4 18
beq a010209
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010209:
ldsfld int64 [rvastatic3]A::a0104
ldc.i8 4
beq a010210
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010210:
ldsfld int64 [rvastatic3]A::a01029
ldc.i8 29
beq a010211
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010211:
ldsfld int8 [rvastatic3]A::a01082
ldc.i4 82
beq a010212
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010212:
ldsfld int32 [rvastatic3]A::a01052
ldc.i4 52
beq a010213
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010213:
ldsfld float32 [rvastatic3]A::a01095
ldc.r4 95.0
beq a010214
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010214:
ldsfld int32 [rvastatic3]A::a01032
ldc.i4 32
beq a010215
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010215:
ldsfld int64 [rvastatic3]A::a01069
ldc.i8 69
beq a010216
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010216:
ldsfld int16 [rvastatic3]A::a010103
ldc.i4 103
beq a010217
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010217:
ldsfld int64 [rvastatic3]A::a01092
ldc.i8 92
beq a010218
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010218:
ldsfld int64 [rvastatic3]A::a0104
ldc.i8 4
beq a010219
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010219:
ldsfld int64 [rvastatic3]A::a01022
ldc.i8 22
beq a010220
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010220:
ldsfld float32 [rvastatic3]A::a0107
ldc.r4 7.0
beq a010221
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010221:
ldsfld int64 [rvastatic3]A::a01022
ldc.i8 22
beq a010222
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010222:
ldsfld int16 [rvastatic3]A::a0105
ldc.i4 5
beq a010223
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010223:
ldsfld int32 [rvastatic3]A::a01023
ldc.i4 23
beq a010224
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010224:
ldsfld int64 [rvastatic3]A::a01064
ldc.i8 64
beq a010225
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010225:
ldsfld int8 [rvastatic3]A::a010110
ldc.i4 110
beq a010226
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010226:
ldsfld int32 [rvastatic3]A::a01050
ldc.i4 50
beq a010227
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010227:
ldsfld int32 [rvastatic3]A::a01035
ldc.i4 35
beq a010228
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010228:
ldsfld float32 [rvastatic3]A::a01017
ldc.r4 17.0
beq a010229
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010229:
ldsfld int8 [rvastatic3]A::a010116
ldc.i4 116
beq a010230
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010230:
ldsfld int8 [rvastatic3]A::a01072
ldc.i4 72
beq a010231
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010231:
ldsfld int16 [rvastatic3]A::a010103
ldc.i4 103
beq a010232
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010232:
ldsfld int32 [rvastatic3]A::a01018
ldc.i4 18
beq a010233
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010233:
ldsfld int16 [rvastatic3]A::a0105
ldc.i4 5
beq a010234
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010234:
ldsfld int64 [rvastatic3]A::a01022
ldc.i8 22
beq a010235
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010235:
ldsfld int8 [rvastatic3]A::a01084
ldc.i4 84
beq a010236
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010236:
ldsfld int32 [rvastatic3]A::a01073
ldc.i4 73
beq a010237
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010237:
ldsfld int32 [rvastatic3]A::a01083
ldc.i4 83
beq a010238
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010238:
ldsfld int32 [rvastatic3]A::a01023
ldc.i4 23
beq a010239
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010239:
ldsfld int32 [rvastatic3]A::a01031
ldc.i4 31
beq a010240
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010240:
ldsfld int32 [rvastatic3]A::a01023
ldc.i4 23
beq a010241
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010241:
ldsfld int16 [rvastatic3]A::a01089
ldc.i4 89
beq a010242
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010242:
ldsfld int64 [rvastatic3]A::a010120
ldc.i8 120
beq a010243
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010243:
ldsfld int16 [rvastatic3]A::a01061
ldc.i4 61
beq a010244
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010244:
ldsfld int32 [rvastatic3]A::a01083
ldc.i4 83
beq a010245
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010245:
ldsfld int64 [rvastatic3]A::a01022
ldc.i8 22
beq a010246
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010246:
ldsfld int8 [rvastatic3]A::a01076
ldc.i4 76
beq a010247
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010247:
ldsfld int64 [rvastatic3]A::a01047
ldc.i8 47
beq a010248
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010248:
ldsfld int32 [rvastatic3]A::a01010
ldc.i4 10
beq a010249
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010249:
ldsfld int64 [rvastatic3]A::a0101
ldc.i8 1
beq a010250
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010250:
ldsfld int64 [rvastatic3]A::a0103
ldc.i8 3
beq a010251
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010251:
ldsfld int64 [rvastatic3]A::a0101
ldc.i8 1
beq a010252
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010252:
ldsfld int64 [rvastatic3]A::a01064
ldc.i8 64
beq a010253
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010253:
ldsfld int16 [rvastatic3]A::a0106
ldc.i4 6
beq a010254
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010254:
ldsfld int16 [rvastatic3]A::a010109
ldc.i4 109
beq a010255
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010255:
ldsfld int64 [rvastatic3]A::a0103
ldc.i8 3
beq a010256
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010256:
ret}
.method static void V4() {.maxstack 50
ldsflda int64 [rvastatic3]A::a0100
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i8
ldc.i8 0
beq a0100
ldstr "a0100"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a0100:
ldsflda int64 [rvastatic3]A::a0101
conv.r8
ldc.r8 234.098
add
conv.r8
ldc.r8 -234.098
add
conv.i
ldind.i8
ldc.i8 1
beq a0101
ldstr "a0101"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a0101:
ldsflda float32 [rvastatic3]A::a0102
conv.i8
dup
ldc.i8 0xffffffff00000000
and
ldc.i4 32
shr.un
conv.i8
ldc.i4 32
shl
or
conv.i
ldind.r4
ldc.r4 2.0
beq a0102
ldstr "a0102"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a0102:
ldsflda int64 [rvastatic3]A::a0103
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i8
ldc.i8 3
beq a0103
ldstr "a0103"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a0103:
ldsflda int64 [rvastatic3]A::a0104
conv.i8
dup
ldc.i8 0x0000000000ffffff
and
ldc.i4 32
shl
conv.i8
ldc.i4 32
shr.un
or
conv.i
ldind.i8
ldc.i8 4
beq a0104
ldstr "a0104"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a0104:
ldsflda int16 [rvastatic3]A::a0105
conv.i8
dup
dup
xor
xor
conv.i
ldind.i2
ldc.i4 5
beq a0105
ldstr "a0105"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a0105:
ldsflda int16 [rvastatic3]A::a0106
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i2
ldc.i4 6
beq a0106
ldstr "a0106"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a0106:
ldsflda float32 [rvastatic3]A::a0107
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.r4
ldc.r4 7.0
beq a0107
ldstr "a0107"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a0107:
ldsflda int32 [rvastatic3]A::a0108
conv.i8
dup
dup
xor
xor
conv.i
ldind.i4
ldc.i4 8
beq a0108
ldstr "a0108"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a0108:
ldsflda int8 [rvastatic3]A::a0109
conv.i8
dup
ldc.i8 0xffffffff00000000
and
ldc.i4 32
shr.un
conv.i8
ldc.i4 32
shl
or
conv.i
ldind.i1
ldc.i4 9
beq a0109
ldstr "a0109"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a0109:
ldsflda int32 [rvastatic3]A::a01010
conv.i8
ldc.i8 40202
add
conv.i8
ldc.i8 40202
sub
conv.i
ldind.i4
ldc.i4 10
beq a01010
ldstr "a01010"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01010:
ldsflda int64 [rvastatic3]A::a01011
conv.i8
ldc.i8 2
mul
ldc.i4 1
shr.un
conv.i
ldind.i8
ldc.i8 11
beq a01011
ldstr "a01011"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01011:
ldsflda int32 [rvastatic3]A::a01012
conv.i8
dup
ldc.i8 0xffffffff00000000
and
ldc.i4 32
shr.un
conv.i8
ldc.i4 32
shl
or
conv.i
ldind.i4
ldc.i4 12
beq a01012
ldstr "a01012"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01012:
ldsflda int8 [rvastatic3]A::a01013
conv.i8
dup
ldc.i8 0xffffffff00000000
and
ldc.i4 32
shr.un
conv.i8
ldc.i4 32
shl
or
conv.i
ldind.i1
ldc.i4 13
beq a01013
ldstr "a01013"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01013:
ldsflda int16 [rvastatic3]A::a01014
conv.i8
dup
ldc.i8 0xffffffff00000000
and
ldc.i4 32
shr.un
conv.i8
ldc.i4 32
shl
or
conv.i
ldind.i2
ldc.i4 14
beq a01014
ldstr "a01014"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01014:
ldsflda int16 [rvastatic3]A::a01015
conv.i8
ldc.i8 37800
add
conv.i8
ldc.i8 37800
sub
conv.i
ldind.i2
ldc.i4 15
beq a01015
ldstr "a01015"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01015:
ldsflda float32 [rvastatic3]A::a01016
conv.i8
dup
ldc.i8 0x0000000000ffffff
and
ldc.i4 32
shl
conv.i8
ldc.i4 32
shr.un
or
conv.i
ldind.r4
ldc.r4 16.0
beq a01016
ldstr "a01016"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01016:
ldsflda float32 [rvastatic3]A::a01017
conv.i8
ldc.i8 2
mul
ldc.i4 1
shr.un
conv.i
ldind.r4
ldc.r4 17.0
beq a01017
ldstr "a01017"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01017:
ldsflda int32 [rvastatic3]A::a01018
conv.i8
ldc.i8 2058
add
conv.i8
ldc.i8 2058
sub
conv.i
ldind.i4
ldc.i4 18
beq a01018
ldstr "a01018"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01018:
ldsflda int8 [rvastatic3]A::a01019
conv.i8
ldc.i8 8916
add
conv.i8
ldc.i8 8916
sub
conv.i
ldind.i1
ldc.i4 19
beq a01019
ldstr "a01019"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01019:
ldsflda int32 [rvastatic3]A::a01020
conv.i8
dup
ldc.i8 0xffffffff00000000
and
ldc.i4 32
shr.un
conv.i8
ldc.i4 32
shl
or
conv.i
ldind.i4
ldc.i4 20
beq a01020
ldstr "a01020"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01020:
ldsflda int32 [rvastatic3]A::a01021
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i4
ldc.i4 21
beq a01021
ldstr "a01021"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01021:
ldsflda int64 [rvastatic3]A::a01022
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i8
ldc.i8 22
beq a01022
ldstr "a01022"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01022:
ldsflda int32 [rvastatic3]A::a01023
conv.i8
dup
ldc.i8 0xffffffff00000000
and
ldc.i4 32
shr.un
conv.i8
ldc.i4 32
shl
or
conv.i
ldind.i4
ldc.i4 23
beq a01023
ldstr "a01023"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01023:
ldsflda int8 [rvastatic3]A::a01024
conv.i8
dup
dup
xor
xor
conv.i
ldind.i1
ldc.i4 24
beq a01024
ldstr "a01024"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01024:
ldsflda int8 [rvastatic3]A::a01025
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i1
ldc.i4 25
beq a01025
ldstr "a01025"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01025:
ldsflda int16 [rvastatic3]A::a01026
conv.r8
ldc.r8 234.098
add
conv.r8
ldc.r8 -234.098
add
conv.i
ldind.i2
ldc.i4 26
beq a01026
ldstr "a01026"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01026:
ldsflda int8 [rvastatic3]A::a01027
conv.i8
dup
ldc.i8 0xffffffff00000000
and
ldc.i4 32
shr.un
conv.i8
ldc.i4 32
shl
or
conv.i
ldind.i1
ldc.i4 27
beq a01027
ldstr "a01027"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01027:
ldsflda int16 [rvastatic3]A::a01028
conv.i8
dup
ldc.i8 0xffffffff00000000
and
ldc.i4 32
shr.un
conv.i8
ldc.i4 32
shl
or
conv.i
ldind.i2
ldc.i4 28
beq a01028
ldstr "a01028"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01028:
ldsflda int64 [rvastatic3]A::a01029
conv.i8
ldc.i8 877
add
conv.i8
ldc.i8 877
sub
conv.i
ldind.i8
ldc.i8 29
beq a01029
ldstr "a01029"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01029:
ldsflda int32 [rvastatic3]A::a01030
conv.i8
ldc.i8 47449
add
conv.i8
ldc.i8 47449
sub
conv.i
ldind.i4
ldc.i4 30
beq a01030
ldstr "a01030"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01030:
ldsflda int32 [rvastatic3]A::a01031
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i4
ldc.i4 31
beq a01031
ldstr "a01031"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01031:
ldsflda int32 [rvastatic3]A::a01032
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i4
ldc.i4 32
beq a01032
ldstr "a01032"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01032:
ldsflda int8 [rvastatic3]A::a01033
conv.i8
dup
dup
xor
xor
conv.i
ldind.i1
ldc.i4 33
beq a01033
ldstr "a01033"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01033:
ldsflda int16 [rvastatic3]A::a01034
conv.i8
ldc.i8 2
mul
ldc.i4 1
shr.un
conv.i
ldind.i2
ldc.i4 34
beq a01034
ldstr "a01034"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01034:
ldsflda int32 [rvastatic3]A::a01035
conv.i8
ldc.i8 2
mul
ldc.i4 1
shr.un
conv.i
ldind.i4
ldc.i4 35
beq a01035
ldstr "a01035"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01035:
ldsflda int32 [rvastatic3]A::a01036
conv.i8
dup
ldc.i8 0x0000000000ffffff
and
ldc.i4 32
shl
conv.i8
ldc.i4 32
shr.un
or
conv.i
ldind.i4
ldc.i4 36
beq a01036
ldstr "a01036"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01036:
ldsflda int16 [rvastatic3]A::a01037
conv.i8
ldc.i8 25670
add
conv.i8
ldc.i8 25670
sub
conv.i
ldind.i2
ldc.i4 37
beq a01037
ldstr "a01037"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01037:
ldsflda float32 [rvastatic3]A::a01038
conv.i8
dup
ldc.i8 0x0000000000ffffff
and
ldc.i4 32
shl
conv.i8
ldc.i4 32
shr.un
or
conv.i
ldind.r4
ldc.r4 38.0
beq a01038
ldstr "a01038"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01038:
ldsflda int8 [rvastatic3]A::a01039
conv.i8
ldc.i8 2
mul
ldc.i4 1
shr.un
conv.i
ldind.i1
ldc.i4 39
beq a01039
ldstr "a01039"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01039:
ldsflda int8 [rvastatic3]A::a01040
conv.i8
ldc.i8 63628
add
conv.i8
ldc.i8 63628
sub
conv.i
ldind.i1
ldc.i4 40
beq a01040
ldstr "a01040"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01040:
ldsflda float32 [rvastatic3]A::a01041
conv.r8
ldc.r8 234.098
add
conv.r8
ldc.r8 -234.098
add
conv.i
ldind.r4
ldc.r4 41.0
beq a01041
ldstr "a01041"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01041:
ldsflda int32 [rvastatic3]A::a01042
conv.i8
dup
dup
xor
xor
conv.i
ldind.i4
ldc.i4 42
beq a01042
ldstr "a01042"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01042:
ldsflda int32 [rvastatic3]A::a01043
conv.i8
dup
ldc.i8 0x0000000000ffffff
and
ldc.i4 32
shl
conv.i8
ldc.i4 32
shr.un
or
conv.i
ldind.i4
ldc.i4 43
beq a01043
ldstr "a01043"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01043:
ldsflda int32 [rvastatic3]A::a01044
conv.i8
dup
ldc.i8 0x0000000000ffffff
and
ldc.i4 32
shl
conv.i8
ldc.i4 32
shr.un
or
conv.i
ldind.i4
ldc.i4 44
beq a01044
ldstr "a01044"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01044:
ldsflda int64 [rvastatic3]A::a01045
conv.i8
dup
ldc.i8 0x0000000000ffffff
and
ldc.i4 32
shl
conv.i8
ldc.i4 32
shr.un
or
conv.i
ldind.i8
ldc.i8 45
beq a01045
ldstr "a01045"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01045:
ldsflda int64 [rvastatic3]A::a01046
conv.i8
ldc.i8 53032
add
conv.i8
ldc.i8 53032
sub
conv.i
ldind.i8
ldc.i8 46
beq a01046
ldstr "a01046"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01046:
ldsflda int64 [rvastatic3]A::a01047
conv.i8
dup
ldc.i8 0xffffffff00000000
and
ldc.i4 32
shr.un
conv.i8
ldc.i4 32
shl
or
conv.i
ldind.i8
ldc.i8 47
beq a01047
ldstr "a01047"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01047:
ldsflda float32 [rvastatic3]A::a01048
conv.i8
ldc.i8 48333
add
conv.i8
ldc.i8 48333
sub
conv.i
ldind.r4
ldc.r4 48.0
beq a01048
ldstr "a01048"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01048:
ldsflda int64 [rvastatic3]A::a01049
conv.r8
ldc.r8 234.098
add
conv.r8
ldc.r8 -234.098
add
conv.i
ldind.i8
ldc.i8 49
beq a01049
ldstr "a01049"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01049:
ldsflda int32 [rvastatic3]A::a01050
conv.i8
dup
ldc.i8 0xffffffff00000000
and
ldc.i4 32
shr.un
conv.i8
ldc.i4 32
shl
or
conv.i
ldind.i4
ldc.i4 50
beq a01050
ldstr "a01050"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01050:
ldsflda float32 [rvastatic3]A::a01051
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.r4
ldc.r4 51.0
beq a01051
ldstr "a01051"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01051:
ldsflda int32 [rvastatic3]A::a01052
conv.i8
dup
ldc.i8 0x0000000000ffffff
and
ldc.i4 32
shl
conv.i8
ldc.i4 32
shr.un
or
conv.i
ldind.i4
ldc.i4 52
beq a01052
ldstr "a01052"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01052:
ldsflda int64 [rvastatic3]A::a01053
conv.i8
dup
ldc.i8 0xffffffff00000000
and
ldc.i4 32
shr.un
conv.i8
ldc.i4 32
shl
or
conv.i
ldind.i8
ldc.i8 53
beq a01053
ldstr "a01053"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01053:
ldsflda int8 [rvastatic3]A::a01054
conv.i8
dup
ldc.i8 0x0000000000ffffff
and
ldc.i4 32
shl
conv.i8
ldc.i4 32
shr.un
or
conv.i
ldind.i1
ldc.i4 54
beq a01054
ldstr "a01054"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01054:
ldsflda int8 [rvastatic3]A::a01055
conv.r8
ldc.r8 234.098
add
conv.r8
ldc.r8 -234.098
add
conv.i
ldind.i1
ldc.i4 55
beq a01055
ldstr "a01055"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01055:
ldsflda float32 [rvastatic3]A::a01056
conv.i8
ldc.i8 4395
add
conv.i8
ldc.i8 4395
sub
conv.i
ldind.r4
ldc.r4 56.0
beq a01056
ldstr "a01056"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01056:
ldsflda int32 [rvastatic3]A::a01057
conv.r8
ldc.r8 234.098
add
conv.r8
ldc.r8 -234.098
add
conv.i
ldind.i4
ldc.i4 57
beq a01057
ldstr "a01057"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01057:
ldsflda int64 [rvastatic3]A::a01058
conv.i8
ldc.i8 2
mul
ldc.i4 1
shr.un
conv.i
ldind.i8
ldc.i8 58
beq a01058
ldstr "a01058"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01058:
ldsflda int64 [rvastatic3]A::a01059
conv.i8
ldc.i8 18075
add
conv.i8
ldc.i8 18075
sub
conv.i
ldind.i8
ldc.i8 59
beq a01059
ldstr "a01059"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01059:
ldsflda int8 [rvastatic3]A::a01060
conv.i8
dup
ldc.i8 0xffffffff00000000
and
ldc.i4 32
shr.un
conv.i8
ldc.i4 32
shl
or
conv.i
ldind.i1
ldc.i4 60
beq a01060
ldstr "a01060"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01060:
ldsflda int16 [rvastatic3]A::a01061
conv.r8
ldc.r8 234.098
add
conv.r8
ldc.r8 -234.098
add
conv.i
ldind.i2
ldc.i4 61
beq a01061
ldstr "a01061"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01061:
ldsflda int64 [rvastatic3]A::a01062
conv.i8
dup
ldc.i8 0x0000000000ffffff
and
ldc.i4 32
shl
conv.i8
ldc.i4 32
shr.un
or
conv.i
ldind.i8
ldc.i8 62
beq a01062
ldstr "a01062"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01062:
ldsflda float32 [rvastatic3]A::a01063
conv.r8
ldc.r8 234.098
add
conv.r8
ldc.r8 -234.098
add
conv.i
ldind.r4
ldc.r4 63.0
beq a01063
ldstr "a01063"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01063:
ldsflda int64 [rvastatic3]A::a01064
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i8
ldc.i8 64
beq a01064
ldstr "a01064"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01064:
ldsflda float32 [rvastatic3]A::a01065
conv.i8
ldc.i8 46752
add
conv.i8
ldc.i8 46752
sub
conv.i
ldind.r4
ldc.r4 65.0
beq a01065
ldstr "a01065"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01065:
ldsflda int8 [rvastatic3]A::a01066
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i1
ldc.i4 66
beq a01066
ldstr "a01066"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01066:
ldsflda int16 [rvastatic3]A::a01067
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i2
ldc.i4 67
beq a01067
ldstr "a01067"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01067:
ldsflda int64 [rvastatic3]A::a01068
conv.i8
dup
dup
xor
xor
conv.i
ldind.i8
ldc.i8 68
beq a01068
ldstr "a01068"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01068:
ldsflda int64 [rvastatic3]A::a01069
conv.i8
dup
ldc.i8 0xffffffff00000000
and
ldc.i4 32
shr.un
conv.i8
ldc.i4 32
shl
or
conv.i
ldind.i8
ldc.i8 69
beq a01069
ldstr "a01069"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01069:
ldsflda int64 [rvastatic3]A::a01070
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i8
ldc.i8 70
beq a01070
ldstr "a01070"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01070:
ldsflda int32 [rvastatic3]A::a01071
conv.i8
ldc.i8 2
mul
ldc.i4 1
shr.un
conv.i
ldind.i4
ldc.i4 71
beq a01071
ldstr "a01071"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01071:
ldsflda int8 [rvastatic3]A::a01072
conv.i8
ldc.i8 2
mul
ldc.i4 1
shr.un
conv.i
ldind.i1
ldc.i4 72
beq a01072
ldstr "a01072"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01072:
ldsflda int32 [rvastatic3]A::a01073
conv.i8
dup
ldc.i8 0xffffffff00000000
and
ldc.i4 32
shr.un
conv.i8
ldc.i4 32
shl
or
conv.i
ldind.i4
ldc.i4 73
beq a01073
ldstr "a01073"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01073:
ldsflda float32 [rvastatic3]A::a01074
conv.r8
ldc.r8 234.098
add
conv.r8
ldc.r8 -234.098
add
conv.i
ldind.r4
ldc.r4 74.0
beq a01074
ldstr "a01074"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01074:
ldsflda int64 [rvastatic3]A::a01075
conv.i8
dup
dup
xor
xor
conv.i
ldind.i8
ldc.i8 75
beq a01075
ldstr "a01075"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01075:
ldsflda int8 [rvastatic3]A::a01076
conv.r8
ldc.r8 234.098
add
conv.r8
ldc.r8 -234.098
add
conv.i
ldind.i1
ldc.i4 76
beq a01076
ldstr "a01076"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01076:
ldsflda int8 [rvastatic3]A::a01077
conv.i8
dup
ldc.i8 0x0000000000ffffff
and
ldc.i4 32
shl
conv.i8
ldc.i4 32
shr.un
or
conv.i
ldind.i1
ldc.i4 77
beq a01077
ldstr "a01077"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01077:
ldsflda int8 [rvastatic3]A::a01078
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i1
ldc.i4 78
beq a01078
ldstr "a01078"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01078:
ldsflda int32 [rvastatic3]A::a01079
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i4
ldc.i4 79
beq a01079
ldstr "a01079"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01079:
ldsflda int16 [rvastatic3]A::a01080
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i2
ldc.i4 80
beq a01080
ldstr "a01080"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01080:
ldsflda int32 [rvastatic3]A::a01081
conv.i8
ldc.i8 2
mul
ldc.i4 1
shr.un
conv.i
ldind.i4
ldc.i4 81
beq a01081
ldstr "a01081"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01081:
ldsflda int8 [rvastatic3]A::a01082
conv.r8
ldc.r8 234.098
add
conv.r8
ldc.r8 -234.098
add
conv.i
ldind.i1
ldc.i4 82
beq a01082
ldstr "a01082"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01082:
ldsflda int32 [rvastatic3]A::a01083
conv.i8
dup
dup
xor
xor
conv.i
ldind.i4
ldc.i4 83
beq a01083
ldstr "a01083"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01083:
ldsflda int8 [rvastatic3]A::a01084
conv.i8
ldc.i8 2
mul
ldc.i4 1
shr.un
conv.i
ldind.i1
ldc.i4 84
beq a01084
ldstr "a01084"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01084:
ldsflda int32 [rvastatic3]A::a01085
conv.i8
dup
dup
xor
xor
conv.i
ldind.i4
ldc.i4 85
beq a01085
ldstr "a01085"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01085:
ldsflda int8 [rvastatic3]A::a01086
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i1
ldc.i4 86
beq a01086
ldstr "a01086"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01086:
ldsflda int64 [rvastatic3]A::a01087
conv.i8
ldc.i8 13996
add
conv.i8
ldc.i8 13996
sub
conv.i
ldind.i8
ldc.i8 87
beq a01087
ldstr "a01087"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01087:
ldsflda int8 [rvastatic3]A::a01088
conv.i8
dup
dup
xor
xor
conv.i
ldind.i1
ldc.i4 88
beq a01088
ldstr "a01088"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01088:
ldsflda int16 [rvastatic3]A::a01089
conv.i8
dup
ldc.i8 0x0000000000ffffff
and
ldc.i4 32
shl
conv.i8
ldc.i4 32
shr.un
or
conv.i
ldind.i2
ldc.i4 89
beq a01089
ldstr "a01089"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01089:
ldsflda int64 [rvastatic3]A::a01090
conv.i8
ldc.i8 38561
add
conv.i8
ldc.i8 38561
sub
conv.i
ldind.i8
ldc.i8 90
beq a01090
ldstr "a01090"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01090:
ldsflda int8 [rvastatic3]A::a01091
conv.i8
dup
ldc.i8 0xffffffff00000000
and
ldc.i4 32
shr.un
conv.i8
ldc.i4 32
shl
or
conv.i
ldind.i1
ldc.i4 91
beq a01091
ldstr "a01091"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01091:
ldsflda int64 [rvastatic3]A::a01092
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i8
ldc.i8 92
beq a01092
ldstr "a01092"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01092:
ldsflda int16 [rvastatic3]A::a01093
conv.r8
ldc.r8 234.098
add
conv.r8
ldc.r8 -234.098
add
conv.i
ldind.i2
ldc.i4 93
beq a01093
ldstr "a01093"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01093:
ldsflda int8 [rvastatic3]A::a01094
conv.i8
ldc.i8 16915
add
conv.i8
ldc.i8 16915
sub
conv.i
ldind.i1
ldc.i4 94
beq a01094
ldstr "a01094"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01094:
ldsflda float32 [rvastatic3]A::a01095
conv.i8
dup
dup
xor
xor
conv.i
ldind.r4
ldc.r4 95.0
beq a01095
ldstr "a01095"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01095:
ldsflda int16 [rvastatic3]A::a01096
conv.i8
dup
dup
xor
xor
conv.i
ldind.i2
ldc.i4 96
beq a01096
ldstr "a01096"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01096:
ldsflda int64 [rvastatic3]A::a01097
conv.i8
ldc.i8 53815
add
conv.i8
ldc.i8 53815
sub
conv.i
ldind.i8
ldc.i8 97
beq a01097
ldstr "a01097"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01097:
ldsflda float32 [rvastatic3]A::a01098
conv.i8
dup
ldc.i8 0xffffffff00000000
and
ldc.i4 32
shr.un
conv.i8
ldc.i4 32
shl
or
conv.i
ldind.r4
ldc.r4 98.0
beq a01098
ldstr "a01098"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01098:
ldsflda int32 [rvastatic3]A::a01099
conv.i8
dup
ldc.i8 0x0000000000ffffff
and
ldc.i4 32
shl
conv.i8
ldc.i4 32
shr.un
or
conv.i
ldind.i4
ldc.i4 99
beq a01099
ldstr "a01099"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01099:
ldsflda int32 [rvastatic3]A::a010100
conv.r8
ldc.r8 234.098
add
conv.r8
ldc.r8 -234.098
add
conv.i
ldind.i4
ldc.i4 100
beq a010100
ldstr "a010100"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010100:
ldsflda int32 [rvastatic3]A::a010101
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i4
ldc.i4 101
beq a010101
ldstr "a010101"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010101:
ldsflda int8 [rvastatic3]A::a010102
conv.i8
dup
dup
xor
xor
conv.i
ldind.i1
ldc.i4 102
beq a010102
ldstr "a010102"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010102:
ldsflda int16 [rvastatic3]A::a010103
conv.i8
ldc.i8 2
mul
ldc.i4 1
shr.un
conv.i
ldind.i2
ldc.i4 103
beq a010103
ldstr "a010103"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010103:
ldsflda int8 [rvastatic3]A::a010104
conv.i8
dup
ldc.i8 0x0000000000ffffff
and
ldc.i4 32
shl
conv.i8
ldc.i4 32
shr.un
or
conv.i
ldind.i1
ldc.i4 104
beq a010104
ldstr "a010104"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010104:
ldsflda float32 [rvastatic3]A::a010105
conv.i8
dup
ldc.i8 0x0000000000ffffff
and
ldc.i4 32
shl
conv.i8
ldc.i4 32
shr.un
or
conv.i
ldind.r4
ldc.r4 105.0
beq a010105
ldstr "a010105"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010105:
ldsflda float32 [rvastatic3]A::a010106
conv.i8
dup
ldc.i8 0x0000000000ffffff
and
ldc.i4 32
shl
conv.i8
ldc.i4 32
shr.un
or
conv.i
ldind.r4
ldc.r4 106.0
beq a010106
ldstr "a010106"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010106:
ldsflda int8 [rvastatic3]A::a010107
conv.i8
dup
dup
xor
xor
conv.i
ldind.i1
ldc.i4 107
beq a010107
ldstr "a010107"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010107:
ldsflda float32 [rvastatic3]A::a010108
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.r4
ldc.r4 108.0
beq a010108
ldstr "a010108"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010108:
ldsflda int16 [rvastatic3]A::a010109
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i2
ldc.i4 109
beq a010109
ldstr "a010109"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010109:
ldsflda int8 [rvastatic3]A::a010110
conv.r8
ldc.r8 234.098
add
conv.r8
ldc.r8 -234.098
add
conv.i
ldind.i1
ldc.i4 110
beq a010110
ldstr "a010110"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010110:
ldsflda int64 [rvastatic3]A::a010111
conv.i8
dup
dup
xor
xor
conv.i
ldind.i8
ldc.i8 111
beq a010111
ldstr "a010111"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010111:
ldsflda int16 [rvastatic3]A::a010112
conv.i8
dup
ldc.i8 0x0000000000ffffff
and
ldc.i4 32
shl
conv.i8
ldc.i4 32
shr.un
or
conv.i
ldind.i2
ldc.i4 112
beq a010112
ldstr "a010112"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010112:
ldsflda int16 [rvastatic3]A::a010113
conv.i8
ldc.i8 2
mul
ldc.i4 1
shr.un
conv.i
ldind.i2
ldc.i4 113
beq a010113
ldstr "a010113"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010113:
ldsflda int32 [rvastatic3]A::a010114
conv.r8
ldc.r8 234.098
add
conv.r8
ldc.r8 -234.098
add
conv.i
ldind.i4
ldc.i4 114
beq a010114
ldstr "a010114"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010114:
ldsflda int32 [rvastatic3]A::a010115
conv.i8
dup
dup
xor
xor
conv.i
ldind.i4
ldc.i4 115
beq a010115
ldstr "a010115"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010115:
ldsflda int8 [rvastatic3]A::a010116
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i1
ldc.i4 116
beq a010116
ldstr "a010116"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010116:
ldsflda int16 [rvastatic3]A::a010117
conv.i8
dup
ldc.i8 0x0000000000ffffff
and
ldc.i4 32
shl
conv.i8
ldc.i4 32
shr.un
or
conv.i
ldind.i2
ldc.i4 117
beq a010117
ldstr "a010117"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010117:
ldsflda int64 [rvastatic3]A::a010118
conv.i8
ldc.i8 2
mul
ldc.i4 1
shr.un
conv.i
ldind.i8
ldc.i8 118
beq a010118
ldstr "a010118"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010118:
ldsflda int8 [rvastatic3]A::a010119
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i1
ldc.i4 119
beq a010119
ldstr "a010119"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010119:
ldsflda int64 [rvastatic3]A::a010120
conv.r8
ldc.r8 234.098
add
conv.r8
ldc.r8 -234.098
add
conv.i
ldind.i8
ldc.i8 120
beq a010120
ldstr "a010120"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010120:
ldsflda int32 [rvastatic3]A::a010121
conv.i8
dup
dup
xor
xor
conv.i
ldind.i4
ldc.i4 121
beq a010121
ldstr "a010121"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010121:
ldsflda int8 [rvastatic3]A::a010122
conv.i8
ldc.i8 51550
add
conv.i8
ldc.i8 51550
sub
conv.i
ldind.i1
ldc.i4 2
beq a010122
ldstr "a010122"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010122:
ldsflda int32 [rvastatic3]A::a010123
conv.i8
dup
ldc.i8 0x0000000000ffffff
and
ldc.i4 32
shl
conv.i8
ldc.i4 32
shr.un
or
conv.i
ldind.i4
ldc.i4 123
beq a010123
ldstr "a010123"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010123:
ldsflda int8 [rvastatic3]A::a010124
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i1
ldc.i4 4
beq a010124
ldstr "a010124"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010124:
ldsflda int64 [rvastatic3]A::a010125
conv.i8
ldc.i8 47464
add
conv.i8
ldc.i8 47464
sub
conv.i
ldind.i8
ldc.i8 125
beq a010125
ldstr "a010125"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010125:
ldsflda float32 [rvastatic3]A::a010126
conv.i8
ldc.i8 24077
add
conv.i8
ldc.i8 24077
sub
conv.i
ldind.r4
ldc.r4 126.0
beq a010126
ldstr "a010126"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010126:
ldsflda int64 [rvastatic3]A::a010127
conv.i8
ldc.i8 28583
add
conv.i8
ldc.i8 28583
sub
conv.i
ldind.i8
ldc.i8 127
beq a010127
ldstr "a010127"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010127:
ret}
.method static void V5() {.maxstack 50
ldsflda int64 [rvastatic3]A::a0100
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i8
ldc.i8 0
beq a0100
ldstr "a0100"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a0100:
ldsflda int64 [rvastatic3]A::a0101
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 1
beq a0101
ldstr "a0101"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a0101:
ldsflda float32 [rvastatic3]A::a0102
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.r4
ldc.r4 2.0
beq a0102
ldstr "a0102"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a0102:
ldsflda int64 [rvastatic3]A::a0103
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 3
beq a0103
ldstr "a0103"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a0103:
ldsflda int64 [rvastatic3]A::a0104
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 4
beq a0104
ldstr "a0104"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a0104:
ldsflda int16 [rvastatic3]A::a0105
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i2
ldc.i4 5
beq a0105
ldstr "a0105"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a0105:
ldsflda int16 [rvastatic3]A::a0106
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i2
ldc.i4 6
beq a0106
ldstr "a0106"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a0106:
ldsflda float32 [rvastatic3]A::a0107
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.r4
ldc.r4 7.0
beq a0107
ldstr "a0107"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a0107:
ldsflda int32 [rvastatic3]A::a0108
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i4
ldc.i4 8
beq a0108
ldstr "a0108"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a0108:
ldsflda int8 [rvastatic3]A::a0109
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 9
beq a0109
ldstr "a0109"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a0109:
ldsflda int32 [rvastatic3]A::a01010
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i4
ldc.i4 10
beq a01010
ldstr "a01010"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01010:
ldsflda int64 [rvastatic3]A::a01011
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 11
beq a01011
ldstr "a01011"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01011:
ldsflda int32 [rvastatic3]A::a01012
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i4
ldc.i4 12
beq a01012
ldstr "a01012"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01012:
ldsflda int8 [rvastatic3]A::a01013
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 13
beq a01013
ldstr "a01013"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01013:
ldsflda int16 [rvastatic3]A::a01014
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i2
ldc.i4 14
beq a01014
ldstr "a01014"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01014:
ldsflda int16 [rvastatic3]A::a01015
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i2
ldc.i4 15
beq a01015
ldstr "a01015"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01015:
ldsflda float32 [rvastatic3]A::a01016
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.r4
ldc.r4 16.0
beq a01016
ldstr "a01016"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01016:
ldsflda float32 [rvastatic3]A::a01017
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.r4
ldc.r4 17.0
beq a01017
ldstr "a01017"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01017:
ldsflda int32 [rvastatic3]A::a01018
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i4
ldc.i4 18
beq a01018
ldstr "a01018"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01018:
ldsflda int8 [rvastatic3]A::a01019
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 19
beq a01019
ldstr "a01019"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01019:
ldsflda int32 [rvastatic3]A::a01020
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i4
ldc.i4 20
beq a01020
ldstr "a01020"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01020:
ldsflda int32 [rvastatic3]A::a01021
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i4
ldc.i4 21
beq a01021
ldstr "a01021"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01021:
ldsflda int64 [rvastatic3]A::a01022
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 22
beq a01022
ldstr "a01022"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01022:
ldsflda int32 [rvastatic3]A::a01023
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i4
ldc.i4 23
beq a01023
ldstr "a01023"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01023:
ldsflda int8 [rvastatic3]A::a01024
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i1
ldc.i4 24
beq a01024
ldstr "a01024"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01024:
ldsflda int8 [rvastatic3]A::a01025
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i1
ldc.i4 25
beq a01025
ldstr "a01025"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01025:
ldsflda int16 [rvastatic3]A::a01026
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i2
ldc.i4 26
beq a01026
ldstr "a01026"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01026:
ldsflda int8 [rvastatic3]A::a01027
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 27
beq a01027
ldstr "a01027"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01027:
ldsflda int16 [rvastatic3]A::a01028
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i2
ldc.i4 28
beq a01028
ldstr "a01028"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01028:
ldsflda int64 [rvastatic3]A::a01029
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 29
beq a01029
ldstr "a01029"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01029:
ldsflda int32 [rvastatic3]A::a01030
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i4
ldc.i4 30
beq a01030
ldstr "a01030"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01030:
ldsflda int32 [rvastatic3]A::a01031
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i4
ldc.i4 31
beq a01031
ldstr "a01031"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01031:
ldsflda int32 [rvastatic3]A::a01032
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i4
ldc.i4 32
beq a01032
ldstr "a01032"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01032:
ldsflda int8 [rvastatic3]A::a01033
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 33
beq a01033
ldstr "a01033"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01033:
ldsflda int16 [rvastatic3]A::a01034
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i2
ldc.i4 34
beq a01034
ldstr "a01034"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01034:
ldsflda int32 [rvastatic3]A::a01035
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i4
ldc.i4 35
beq a01035
ldstr "a01035"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01035:
ldsflda int32 [rvastatic3]A::a01036
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i4
ldc.i4 36
beq a01036
ldstr "a01036"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01036:
ldsflda int16 [rvastatic3]A::a01037
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i2
ldc.i4 37
beq a01037
ldstr "a01037"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01037:
ldsflda float32 [rvastatic3]A::a01038
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.r4
ldc.r4 38.0
beq a01038
ldstr "a01038"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01038:
ldsflda int8 [rvastatic3]A::a01039
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i1
ldc.i4 39
beq a01039
ldstr "a01039"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01039:
ldsflda int8 [rvastatic3]A::a01040
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i1
ldc.i4 40
beq a01040
ldstr "a01040"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01040:
ldsflda float32 [rvastatic3]A::a01041
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.r4
ldc.r4 41.0
beq a01041
ldstr "a01041"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01041:
ldsflda int32 [rvastatic3]A::a01042
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i4
ldc.i4 42
beq a01042
ldstr "a01042"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01042:
ldsflda int32 [rvastatic3]A::a01043
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i4
ldc.i4 43
beq a01043
ldstr "a01043"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01043:
ldsflda int32 [rvastatic3]A::a01044
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i4
ldc.i4 44
beq a01044
ldstr "a01044"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01044:
ldsflda int64 [rvastatic3]A::a01045
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 45
beq a01045
ldstr "a01045"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01045:
ldsflda int64 [rvastatic3]A::a01046
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 46
beq a01046
ldstr "a01046"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01046:
ldsflda int64 [rvastatic3]A::a01047
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i8
ldc.i8 47
beq a01047
ldstr "a01047"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01047:
ldsflda float32 [rvastatic3]A::a01048
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.r4
ldc.r4 48.0
beq a01048
ldstr "a01048"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01048:
ldsflda int64 [rvastatic3]A::a01049
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 49
beq a01049
ldstr "a01049"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01049:
ldsflda int32 [rvastatic3]A::a01050
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i4
ldc.i4 50
beq a01050
ldstr "a01050"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01050:
ldsflda float32 [rvastatic3]A::a01051
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.r4
ldc.r4 51.0
beq a01051
ldstr "a01051"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01051:
ldsflda int32 [rvastatic3]A::a01052
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i4
ldc.i4 52
beq a01052
ldstr "a01052"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01052:
ldsflda int64 [rvastatic3]A::a01053
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 53
beq a01053
ldstr "a01053"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01053:
ldsflda int8 [rvastatic3]A::a01054
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i1
ldc.i4 54
beq a01054
ldstr "a01054"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01054:
ldsflda int8 [rvastatic3]A::a01055
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 55
beq a01055
ldstr "a01055"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01055:
ldsflda float32 [rvastatic3]A::a01056
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.r4
ldc.r4 56.0
beq a01056
ldstr "a01056"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01056:
ldsflda int32 [rvastatic3]A::a01057
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i4
ldc.i4 57
beq a01057
ldstr "a01057"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01057:
ldsflda int64 [rvastatic3]A::a01058
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 58
beq a01058
ldstr "a01058"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01058:
ldsflda int64 [rvastatic3]A::a01059
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 59
beq a01059
ldstr "a01059"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01059:
ldsflda int8 [rvastatic3]A::a01060
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 60
beq a01060
ldstr "a01060"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01060:
ldsflda int16 [rvastatic3]A::a01061
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i2
ldc.i4 61
beq a01061
ldstr "a01061"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01061:
ldsflda int64 [rvastatic3]A::a01062
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 62
beq a01062
ldstr "a01062"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01062:
ldsflda float32 [rvastatic3]A::a01063
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.r4
ldc.r4 63.0
beq a01063
ldstr "a01063"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01063:
ldsflda int64 [rvastatic3]A::a01064
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 64
beq a01064
ldstr "a01064"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01064:
ldsflda float32 [rvastatic3]A::a01065
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.r4
ldc.r4 65.0
beq a01065
ldstr "a01065"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01065:
ldsflda int8 [rvastatic3]A::a01066
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i1
ldc.i4 66
beq a01066
ldstr "a01066"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01066:
ldsflda int16 [rvastatic3]A::a01067
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i2
ldc.i4 67
beq a01067
ldstr "a01067"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01067:
ldsflda int64 [rvastatic3]A::a01068
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i8
ldc.i8 68
beq a01068
ldstr "a01068"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01068:
ldsflda int64 [rvastatic3]A::a01069
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i8
ldc.i8 69
beq a01069
ldstr "a01069"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01069:
ldsflda int64 [rvastatic3]A::a01070
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 70
beq a01070
ldstr "a01070"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01070:
ldsflda int32 [rvastatic3]A::a01071
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i4
ldc.i4 71
beq a01071
ldstr "a01071"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01071:
ldsflda int8 [rvastatic3]A::a01072
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 72
beq a01072
ldstr "a01072"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01072:
ldsflda int32 [rvastatic3]A::a01073
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i4
ldc.i4 73
beq a01073
ldstr "a01073"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01073:
ldsflda float32 [rvastatic3]A::a01074
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.r4
ldc.r4 74.0
beq a01074
ldstr "a01074"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01074:
ldsflda int64 [rvastatic3]A::a01075
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 75
beq a01075
ldstr "a01075"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01075:
ldsflda int8 [rvastatic3]A::a01076
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 76
beq a01076
ldstr "a01076"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01076:
ldsflda int8 [rvastatic3]A::a01077
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 77
beq a01077
ldstr "a01077"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01077:
ldsflda int8 [rvastatic3]A::a01078
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i1
ldc.i4 78
beq a01078
ldstr "a01078"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01078:
ldsflda int32 [rvastatic3]A::a01079
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i4
ldc.i4 79
beq a01079
ldstr "a01079"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01079:
ldsflda int16 [rvastatic3]A::a01080
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i2
ldc.i4 80
beq a01080
ldstr "a01080"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01080:
ldsflda int32 [rvastatic3]A::a01081
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i4
ldc.i4 81
beq a01081
ldstr "a01081"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01081:
ldsflda int8 [rvastatic3]A::a01082
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 82
beq a01082
ldstr "a01082"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01082:
ldsflda int32 [rvastatic3]A::a01083
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i4
ldc.i4 83
beq a01083
ldstr "a01083"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01083:
ldsflda int8 [rvastatic3]A::a01084
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 84
beq a01084
ldstr "a01084"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01084:
ldsflda int32 [rvastatic3]A::a01085
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i4
ldc.i4 85
beq a01085
ldstr "a01085"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01085:
ldsflda int8 [rvastatic3]A::a01086
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 86
beq a01086
ldstr "a01086"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01086:
ldsflda int64 [rvastatic3]A::a01087
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i8
ldc.i8 87
beq a01087
ldstr "a01087"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01087:
ldsflda int8 [rvastatic3]A::a01088
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 88
beq a01088
ldstr "a01088"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01088:
ldsflda int16 [rvastatic3]A::a01089
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i2
ldc.i4 89
beq a01089
ldstr "a01089"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01089:
ldsflda int64 [rvastatic3]A::a01090
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 90
beq a01090
ldstr "a01090"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01090:
ldsflda int8 [rvastatic3]A::a01091
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 91
beq a01091
ldstr "a01091"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01091:
ldsflda int64 [rvastatic3]A::a01092
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 92
beq a01092
ldstr "a01092"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01092:
ldsflda int16 [rvastatic3]A::a01093
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i2
ldc.i4 93
beq a01093
ldstr "a01093"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01093:
ldsflda int8 [rvastatic3]A::a01094
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 94
beq a01094
ldstr "a01094"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01094:
ldsflda float32 [rvastatic3]A::a01095
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.r4
ldc.r4 95.0
beq a01095
ldstr "a01095"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01095:
ldsflda int16 [rvastatic3]A::a01096
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i2
ldc.i4 96
beq a01096
ldstr "a01096"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01096:
ldsflda int64 [rvastatic3]A::a01097
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 97
beq a01097
ldstr "a01097"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01097:
ldsflda float32 [rvastatic3]A::a01098
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.r4
ldc.r4 98.0
beq a01098
ldstr "a01098"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01098:
ldsflda int32 [rvastatic3]A::a01099
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i4
ldc.i4 99
beq a01099
ldstr "a01099"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01099:
ldsflda int32 [rvastatic3]A::a010100
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i4
ldc.i4 100
beq a010100
ldstr "a010100"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010100:
ldsflda int32 [rvastatic3]A::a010101
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i4
ldc.i4 101
beq a010101
ldstr "a010101"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010101:
ldsflda int8 [rvastatic3]A::a010102
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i1
ldc.i4 102
beq a010102
ldstr "a010102"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010102:
ldsflda int16 [rvastatic3]A::a010103
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i2
ldc.i4 103
beq a010103
ldstr "a010103"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010103:
ldsflda int8 [rvastatic3]A::a010104
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 104
beq a010104
ldstr "a010104"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010104:
ldsflda float32 [rvastatic3]A::a010105
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.r4
ldc.r4 105.0
beq a010105
ldstr "a010105"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010105:
ldsflda float32 [rvastatic3]A::a010106
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.r4
ldc.r4 106.0
beq a010106
ldstr "a010106"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010106:
ldsflda int8 [rvastatic3]A::a010107
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 107
beq a010107
ldstr "a010107"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010107:
ldsflda float32 [rvastatic3]A::a010108
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.r4
ldc.r4 108.0
beq a010108
ldstr "a010108"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010108:
ldsflda int16 [rvastatic3]A::a010109
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i2
ldc.i4 109
beq a010109
ldstr "a010109"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010109:
ldsflda int8 [rvastatic3]A::a010110
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 110
beq a010110
ldstr "a010110"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010110:
ldsflda int64 [rvastatic3]A::a010111
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 111
beq a010111
ldstr "a010111"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010111:
ldsflda int16 [rvastatic3]A::a010112
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i2
ldc.i4 112
beq a010112
ldstr "a010112"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010112:
ldsflda int16 [rvastatic3]A::a010113
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i2
ldc.i4 113
beq a010113
ldstr "a010113"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010113:
ldsflda int32 [rvastatic3]A::a010114
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i4
ldc.i4 114
beq a010114
ldstr "a010114"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010114:
ldsflda int32 [rvastatic3]A::a010115
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i4
ldc.i4 115
beq a010115
ldstr "a010115"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010115:
ldsflda int8 [rvastatic3]A::a010116
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 116
beq a010116
ldstr "a010116"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010116:
ldsflda int16 [rvastatic3]A::a010117
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i2
ldc.i4 117
beq a010117
ldstr "a010117"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010117:
ldsflda int64 [rvastatic3]A::a010118
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 118
beq a010118
ldstr "a010118"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010118:
ldsflda int8 [rvastatic3]A::a010119
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 119
beq a010119
ldstr "a010119"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010119:
ldsflda int64 [rvastatic3]A::a010120
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 120
beq a010120
ldstr "a010120"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010120:
ldsflda int32 [rvastatic3]A::a010121
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i4
ldc.i4 121
beq a010121
ldstr "a010121"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010121:
ldsflda int8 [rvastatic3]A::a010122
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 2
beq a010122
ldstr "a010122"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010122:
ldsflda int32 [rvastatic3]A::a010123
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i4
ldc.i4 123
beq a010123
ldstr "a010123"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010123:
ldsflda int8 [rvastatic3]A::a010124
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 4
beq a010124
ldstr "a010124"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010124:
ldsflda int64 [rvastatic3]A::a010125
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 125
beq a010125
ldstr "a010125"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010125:
ldsflda float32 [rvastatic3]A::a010126
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.r4
ldc.r4 126.0
beq a010126
ldstr "a010126"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010126:
ldsflda int64 [rvastatic3]A::a010127
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i8
ldc.i8 127
beq a010127
ldstr "a010127"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010127:
ret}
.method static void V6() {.maxstack 50
ldsfld int64 [rvastatic3]A::a0100
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a0100
ldsfld int64 [rvastatic3]A::a0100
ldc.i8 1
beq a0100
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0100:
ldsfld int64 [rvastatic3]A::a0101
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a0101
ldsfld int64 [rvastatic3]A::a0101
ldc.i8 2
beq a0101
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0101:
ldsfld float32 [rvastatic3]A::a0102
ldc.r4 1
add
stsfld float32 [rvastatic3]A::a0102
ldsfld float32 [rvastatic3]A::a0102
ldc.r4 3.0
beq a0102
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0102:
ldsfld int64 [rvastatic3]A::a0103
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a0103
ldsfld int64 [rvastatic3]A::a0103
ldc.i8 4
beq a0103
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0103:
ldsfld int64 [rvastatic3]A::a0104
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a0104
ldsfld int64 [rvastatic3]A::a0104
ldc.i8 5
beq a0104
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0104:
ldsfld int16 [rvastatic3]A::a0105
ldc.i4 1
add
stsfld int16 [rvastatic3]A::a0105
ldsfld int16 [rvastatic3]A::a0105
ldc.i4 6
beq a0105
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0105:
ldsfld int16 [rvastatic3]A::a0106
ldc.i4 1
add
stsfld int16 [rvastatic3]A::a0106
ldsfld int16 [rvastatic3]A::a0106
ldc.i4 7
beq a0106
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0106:
ldsfld float32 [rvastatic3]A::a0107
ldc.r4 1
add
stsfld float32 [rvastatic3]A::a0107
ldsfld float32 [rvastatic3]A::a0107
ldc.r4 8.0
beq a0107
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0107:
ldsfld int32 [rvastatic3]A::a0108
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a0108
ldsfld int32 [rvastatic3]A::a0108
ldc.i4 9
beq a0108
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0108:
ldsfld int8 [rvastatic3]A::a0109
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a0109
ldsfld int8 [rvastatic3]A::a0109
ldc.i4 10
beq a0109
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0109:
ldsfld int32 [rvastatic3]A::a01010
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01010
ldsfld int32 [rvastatic3]A::a01010
ldc.i4 11
beq a01010
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01010:
ldsfld int64 [rvastatic3]A::a01011
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a01011
ldsfld int64 [rvastatic3]A::a01011
ldc.i8 12
beq a01011
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01011:
ldsfld int32 [rvastatic3]A::a01012
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01012
ldsfld int32 [rvastatic3]A::a01012
ldc.i4 13
beq a01012
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01012:
ldsfld int8 [rvastatic3]A::a01013
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01013
ldsfld int8 [rvastatic3]A::a01013
ldc.i4 14
beq a01013
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01013:
ldsfld int16 [rvastatic3]A::a01014
ldc.i4 1
add
stsfld int16 [rvastatic3]A::a01014
ldsfld int16 [rvastatic3]A::a01014
ldc.i4 15
beq a01014
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01014:
ldsfld int16 [rvastatic3]A::a01015
ldc.i4 1
add
stsfld int16 [rvastatic3]A::a01015
ldsfld int16 [rvastatic3]A::a01015
ldc.i4 16
beq a01015
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01015:
ldsfld float32 [rvastatic3]A::a01016
ldc.r4 1
add
stsfld float32 [rvastatic3]A::a01016
ldsfld float32 [rvastatic3]A::a01016
ldc.r4 17.0
beq a01016
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01016:
ldsfld float32 [rvastatic3]A::a01017
ldc.r4 1
add
stsfld float32 [rvastatic3]A::a01017
ldsfld float32 [rvastatic3]A::a01017
ldc.r4 18.0
beq a01017
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01017:
ldsfld int32 [rvastatic3]A::a01018
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01018
ldsfld int32 [rvastatic3]A::a01018
ldc.i4 19
beq a01018
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01018:
ldsfld int8 [rvastatic3]A::a01019
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01019
ldsfld int8 [rvastatic3]A::a01019
ldc.i4 20
beq a01019
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01019:
ldsfld int32 [rvastatic3]A::a01020
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01020
ldsfld int32 [rvastatic3]A::a01020
ldc.i4 21
beq a01020
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01020:
ldsfld int32 [rvastatic3]A::a01021
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01021
ldsfld int32 [rvastatic3]A::a01021
ldc.i4 22
beq a01021
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01021:
ldsfld int64 [rvastatic3]A::a01022
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a01022
ldsfld int64 [rvastatic3]A::a01022
ldc.i8 23
beq a01022
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01022:
ldsfld int32 [rvastatic3]A::a01023
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01023
ldsfld int32 [rvastatic3]A::a01023
ldc.i4 24
beq a01023
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01023:
ldsfld int8 [rvastatic3]A::a01024
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01024
ldsfld int8 [rvastatic3]A::a01024
ldc.i4 25
beq a01024
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01024:
ldsfld int8 [rvastatic3]A::a01025
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01025
ldsfld int8 [rvastatic3]A::a01025
ldc.i4 26
beq a01025
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01025:
ldsfld int16 [rvastatic3]A::a01026
ldc.i4 1
add
stsfld int16 [rvastatic3]A::a01026
ldsfld int16 [rvastatic3]A::a01026
ldc.i4 27
beq a01026
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01026:
ldsfld int8 [rvastatic3]A::a01027
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01027
ldsfld int8 [rvastatic3]A::a01027
ldc.i4 28
beq a01027
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01027:
ldsfld int16 [rvastatic3]A::a01028
ldc.i4 1
add
stsfld int16 [rvastatic3]A::a01028
ldsfld int16 [rvastatic3]A::a01028
ldc.i4 29
beq a01028
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01028:
ldsfld int64 [rvastatic3]A::a01029
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a01029
ldsfld int64 [rvastatic3]A::a01029
ldc.i8 30
beq a01029
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01029:
ldsfld int32 [rvastatic3]A::a01030
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01030
ldsfld int32 [rvastatic3]A::a01030
ldc.i4 31
beq a01030
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01030:
ldsfld int32 [rvastatic3]A::a01031
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01031
ldsfld int32 [rvastatic3]A::a01031
ldc.i4 32
beq a01031
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01031:
ldsfld int32 [rvastatic3]A::a01032
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01032
ldsfld int32 [rvastatic3]A::a01032
ldc.i4 33
beq a01032
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01032:
ldsfld int8 [rvastatic3]A::a01033
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01033
ldsfld int8 [rvastatic3]A::a01033
ldc.i4 34
beq a01033
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01033:
ldsfld int16 [rvastatic3]A::a01034
ldc.i4 1
add
stsfld int16 [rvastatic3]A::a01034
ldsfld int16 [rvastatic3]A::a01034
ldc.i4 35
beq a01034
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01034:
ldsfld int32 [rvastatic3]A::a01035
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01035
ldsfld int32 [rvastatic3]A::a01035
ldc.i4 36
beq a01035
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01035:
ldsfld int32 [rvastatic3]A::a01036
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01036
ldsfld int32 [rvastatic3]A::a01036
ldc.i4 37
beq a01036
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01036:
ldsfld int16 [rvastatic3]A::a01037
ldc.i4 1
add
stsfld int16 [rvastatic3]A::a01037
ldsfld int16 [rvastatic3]A::a01037
ldc.i4 38
beq a01037
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01037:
ldsfld float32 [rvastatic3]A::a01038
ldc.r4 1
add
stsfld float32 [rvastatic3]A::a01038
ldsfld float32 [rvastatic3]A::a01038
ldc.r4 39.0
beq a01038
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01038:
ldsfld int8 [rvastatic3]A::a01039
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01039
ldsfld int8 [rvastatic3]A::a01039
ldc.i4 40
beq a01039
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01039:
ldsfld int8 [rvastatic3]A::a01040
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01040
ldsfld int8 [rvastatic3]A::a01040
ldc.i4 41
beq a01040
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01040:
ldsfld float32 [rvastatic3]A::a01041
ldc.r4 1
add
stsfld float32 [rvastatic3]A::a01041
ldsfld float32 [rvastatic3]A::a01041
ldc.r4 42.0
beq a01041
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01041:
ldsfld int32 [rvastatic3]A::a01042
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01042
ldsfld int32 [rvastatic3]A::a01042
ldc.i4 43
beq a01042
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01042:
ldsfld int32 [rvastatic3]A::a01043
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01043
ldsfld int32 [rvastatic3]A::a01043
ldc.i4 44
beq a01043
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01043:
ldsfld int32 [rvastatic3]A::a01044
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01044
ldsfld int32 [rvastatic3]A::a01044
ldc.i4 45
beq a01044
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01044:
ldsfld int64 [rvastatic3]A::a01045
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a01045
ldsfld int64 [rvastatic3]A::a01045
ldc.i8 46
beq a01045
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01045:
ldsfld int64 [rvastatic3]A::a01046
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a01046
ldsfld int64 [rvastatic3]A::a01046
ldc.i8 47
beq a01046
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01046:
ldsfld int64 [rvastatic3]A::a01047
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a01047
ldsfld int64 [rvastatic3]A::a01047
ldc.i8 48
beq a01047
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01047:
ldsfld float32 [rvastatic3]A::a01048
ldc.r4 1
add
stsfld float32 [rvastatic3]A::a01048
ldsfld float32 [rvastatic3]A::a01048
ldc.r4 49.0
beq a01048
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01048:
ldsfld int64 [rvastatic3]A::a01049
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a01049
ldsfld int64 [rvastatic3]A::a01049
ldc.i8 50
beq a01049
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01049:
ldsfld int32 [rvastatic3]A::a01050
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01050
ldsfld int32 [rvastatic3]A::a01050
ldc.i4 51
beq a01050
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01050:
ldsfld float32 [rvastatic3]A::a01051
ldc.r4 1
add
stsfld float32 [rvastatic3]A::a01051
ldsfld float32 [rvastatic3]A::a01051
ldc.r4 52.0
beq a01051
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01051:
ldsfld int32 [rvastatic3]A::a01052
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01052
ldsfld int32 [rvastatic3]A::a01052
ldc.i4 53
beq a01052
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01052:
ldsfld int64 [rvastatic3]A::a01053
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a01053
ldsfld int64 [rvastatic3]A::a01053
ldc.i8 54
beq a01053
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01053:
ldsfld int8 [rvastatic3]A::a01054
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01054
ldsfld int8 [rvastatic3]A::a01054
ldc.i4 55
beq a01054
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01054:
ldsfld int8 [rvastatic3]A::a01055
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01055
ldsfld int8 [rvastatic3]A::a01055
ldc.i4 56
beq a01055
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01055:
ldsfld float32 [rvastatic3]A::a01056
ldc.r4 1
add
stsfld float32 [rvastatic3]A::a01056
ldsfld float32 [rvastatic3]A::a01056
ldc.r4 57.0
beq a01056
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01056:
ldsfld int32 [rvastatic3]A::a01057
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01057
ldsfld int32 [rvastatic3]A::a01057
ldc.i4 58
beq a01057
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01057:
ldsfld int64 [rvastatic3]A::a01058
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a01058
ldsfld int64 [rvastatic3]A::a01058
ldc.i8 59
beq a01058
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01058:
ldsfld int64 [rvastatic3]A::a01059
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a01059
ldsfld int64 [rvastatic3]A::a01059
ldc.i8 60
beq a01059
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01059:
ldsfld int8 [rvastatic3]A::a01060
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01060
ldsfld int8 [rvastatic3]A::a01060
ldc.i4 61
beq a01060
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01060:
ldsfld int16 [rvastatic3]A::a01061
ldc.i4 1
add
stsfld int16 [rvastatic3]A::a01061
ldsfld int16 [rvastatic3]A::a01061
ldc.i4 62
beq a01061
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01061:
ldsfld int64 [rvastatic3]A::a01062
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a01062
ldsfld int64 [rvastatic3]A::a01062
ldc.i8 63
beq a01062
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01062:
ldsfld float32 [rvastatic3]A::a01063
ldc.r4 1
add
stsfld float32 [rvastatic3]A::a01063
ldsfld float32 [rvastatic3]A::a01063
ldc.r4 64.0
beq a01063
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01063:
ldsfld int64 [rvastatic3]A::a01064
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a01064
ldsfld int64 [rvastatic3]A::a01064
ldc.i8 65
beq a01064
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01064:
ldsfld float32 [rvastatic3]A::a01065
ldc.r4 1
add
stsfld float32 [rvastatic3]A::a01065
ldsfld float32 [rvastatic3]A::a01065
ldc.r4 66.0
beq a01065
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01065:
ldsfld int8 [rvastatic3]A::a01066
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01066
ldsfld int8 [rvastatic3]A::a01066
ldc.i4 67
beq a01066
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01066:
ldsfld int16 [rvastatic3]A::a01067
ldc.i4 1
add
stsfld int16 [rvastatic3]A::a01067
ldsfld int16 [rvastatic3]A::a01067
ldc.i4 68
beq a01067
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01067:
ldsfld int64 [rvastatic3]A::a01068
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a01068
ldsfld int64 [rvastatic3]A::a01068
ldc.i8 69
beq a01068
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01068:
ldsfld int64 [rvastatic3]A::a01069
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a01069
ldsfld int64 [rvastatic3]A::a01069
ldc.i8 70
beq a01069
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01069:
ldsfld int64 [rvastatic3]A::a01070
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a01070
ldsfld int64 [rvastatic3]A::a01070
ldc.i8 71
beq a01070
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01070:
ldsfld int32 [rvastatic3]A::a01071
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01071
ldsfld int32 [rvastatic3]A::a01071
ldc.i4 72
beq a01071
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01071:
ldsfld int8 [rvastatic3]A::a01072
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01072
ldsfld int8 [rvastatic3]A::a01072
ldc.i4 73
beq a01072
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01072:
ldsfld int32 [rvastatic3]A::a01073
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01073
ldsfld int32 [rvastatic3]A::a01073
ldc.i4 74
beq a01073
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01073:
ldsfld float32 [rvastatic3]A::a01074
ldc.r4 1
add
stsfld float32 [rvastatic3]A::a01074
ldsfld float32 [rvastatic3]A::a01074
ldc.r4 75.0
beq a01074
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01074:
ldsfld int64 [rvastatic3]A::a01075
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a01075
ldsfld int64 [rvastatic3]A::a01075
ldc.i8 76
beq a01075
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01075:
ldsfld int8 [rvastatic3]A::a01076
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01076
ldsfld int8 [rvastatic3]A::a01076
ldc.i4 77
beq a01076
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01076:
ldsfld int8 [rvastatic3]A::a01077
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01077
ldsfld int8 [rvastatic3]A::a01077
ldc.i4 78
beq a01077
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01077:
ldsfld int8 [rvastatic3]A::a01078
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01078
ldsfld int8 [rvastatic3]A::a01078
ldc.i4 79
beq a01078
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01078:
ldsfld int32 [rvastatic3]A::a01079
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01079
ldsfld int32 [rvastatic3]A::a01079
ldc.i4 80
beq a01079
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01079:
ldsfld int16 [rvastatic3]A::a01080
ldc.i4 1
add
stsfld int16 [rvastatic3]A::a01080
ldsfld int16 [rvastatic3]A::a01080
ldc.i4 81
beq a01080
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01080:
ldsfld int32 [rvastatic3]A::a01081
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01081
ldsfld int32 [rvastatic3]A::a01081
ldc.i4 82
beq a01081
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01081:
ldsfld int8 [rvastatic3]A::a01082
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01082
ldsfld int8 [rvastatic3]A::a01082
ldc.i4 83
beq a01082
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01082:
ldsfld int32 [rvastatic3]A::a01083
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01083
ldsfld int32 [rvastatic3]A::a01083
ldc.i4 84
beq a01083
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01083:
ldsfld int8 [rvastatic3]A::a01084
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01084
ldsfld int8 [rvastatic3]A::a01084
ldc.i4 85
beq a01084
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01084:
ldsfld int32 [rvastatic3]A::a01085
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01085
ldsfld int32 [rvastatic3]A::a01085
ldc.i4 86
beq a01085
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01085:
ldsfld int8 [rvastatic3]A::a01086
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01086
ldsfld int8 [rvastatic3]A::a01086
ldc.i4 87
beq a01086
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01086:
ldsfld int64 [rvastatic3]A::a01087
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a01087
ldsfld int64 [rvastatic3]A::a01087
ldc.i8 88
beq a01087
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01087:
ldsfld int8 [rvastatic3]A::a01088
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01088
ldsfld int8 [rvastatic3]A::a01088
ldc.i4 89
beq a01088
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01088:
ldsfld int16 [rvastatic3]A::a01089
ldc.i4 1
add
stsfld int16 [rvastatic3]A::a01089
ldsfld int16 [rvastatic3]A::a01089
ldc.i4 90
beq a01089
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01089:
ldsfld int64 [rvastatic3]A::a01090
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a01090
ldsfld int64 [rvastatic3]A::a01090
ldc.i8 91
beq a01090
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01090:
ldsfld int8 [rvastatic3]A::a01091
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01091
ldsfld int8 [rvastatic3]A::a01091
ldc.i4 92
beq a01091
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01091:
ldsfld int64 [rvastatic3]A::a01092
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a01092
ldsfld int64 [rvastatic3]A::a01092
ldc.i8 93
beq a01092
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01092:
ldsfld int16 [rvastatic3]A::a01093
ldc.i4 1
add
stsfld int16 [rvastatic3]A::a01093
ldsfld int16 [rvastatic3]A::a01093
ldc.i4 94
beq a01093
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01093:
ldsfld int8 [rvastatic3]A::a01094
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01094
ldsfld int8 [rvastatic3]A::a01094
ldc.i4 95
beq a01094
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01094:
ldsfld float32 [rvastatic3]A::a01095
ldc.r4 1
add
stsfld float32 [rvastatic3]A::a01095
ldsfld float32 [rvastatic3]A::a01095
ldc.r4 96.0
beq a01095
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01095:
ldsfld int16 [rvastatic3]A::a01096
ldc.i4 1
add
stsfld int16 [rvastatic3]A::a01096
ldsfld int16 [rvastatic3]A::a01096
ldc.i4 97
beq a01096
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01096:
ldsfld int64 [rvastatic3]A::a01097
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a01097
ldsfld int64 [rvastatic3]A::a01097
ldc.i8 98
beq a01097
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01097:
ldsfld float32 [rvastatic3]A::a01098
ldc.r4 1
add
stsfld float32 [rvastatic3]A::a01098
ldsfld float32 [rvastatic3]A::a01098
ldc.r4 99.0
beq a01098
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01098:
ldsfld int32 [rvastatic3]A::a01099
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01099
ldsfld int32 [rvastatic3]A::a01099
ldc.i4 100
beq a01099
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01099:
ldsfld int32 [rvastatic3]A::a010100
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a010100
ldsfld int32 [rvastatic3]A::a010100
ldc.i4 101
beq a010100
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010100:
ldsfld int32 [rvastatic3]A::a010101
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a010101
ldsfld int32 [rvastatic3]A::a010101
ldc.i4 102
beq a010101
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010101:
ldsfld int8 [rvastatic3]A::a010102
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a010102
ldsfld int8 [rvastatic3]A::a010102
ldc.i4 103
beq a010102
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010102:
ldsfld int16 [rvastatic3]A::a010103
ldc.i4 1
add
stsfld int16 [rvastatic3]A::a010103
ldsfld int16 [rvastatic3]A::a010103
ldc.i4 104
beq a010103
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010103:
ldsfld int8 [rvastatic3]A::a010104
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a010104
ldsfld int8 [rvastatic3]A::a010104
ldc.i4 105
beq a010104
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010104:
ldsfld float32 [rvastatic3]A::a010105
ldc.r4 1
add
stsfld float32 [rvastatic3]A::a010105
ldsfld float32 [rvastatic3]A::a010105
ldc.r4 106.0
beq a010105
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010105:
ldsfld float32 [rvastatic3]A::a010106
ldc.r4 1
add
stsfld float32 [rvastatic3]A::a010106
ldsfld float32 [rvastatic3]A::a010106
ldc.r4 107.0
beq a010106
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010106:
ldsfld int8 [rvastatic3]A::a010107
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a010107
ldsfld int8 [rvastatic3]A::a010107
ldc.i4 108
beq a010107
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010107:
ldsfld float32 [rvastatic3]A::a010108
ldc.r4 1
add
stsfld float32 [rvastatic3]A::a010108
ldsfld float32 [rvastatic3]A::a010108
ldc.r4 109.0
beq a010108
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010108:
ldsfld int16 [rvastatic3]A::a010109
ldc.i4 1
add
stsfld int16 [rvastatic3]A::a010109
ldsfld int16 [rvastatic3]A::a010109
ldc.i4 110
beq a010109
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010109:
ldsfld int8 [rvastatic3]A::a010110
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a010110
ldsfld int8 [rvastatic3]A::a010110
ldc.i4 111
beq a010110
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010110:
ldsfld int64 [rvastatic3]A::a010111
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a010111
ldsfld int64 [rvastatic3]A::a010111
ldc.i8 112
beq a010111
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010111:
ldsfld int16 [rvastatic3]A::a010112
ldc.i4 1
add
stsfld int16 [rvastatic3]A::a010112
ldsfld int16 [rvastatic3]A::a010112
ldc.i4 113
beq a010112
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010112:
ldsfld int16 [rvastatic3]A::a010113
ldc.i4 1
add
stsfld int16 [rvastatic3]A::a010113
ldsfld int16 [rvastatic3]A::a010113
ldc.i4 114
beq a010113
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010113:
ldsfld int32 [rvastatic3]A::a010114
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a010114
ldsfld int32 [rvastatic3]A::a010114
ldc.i4 115
beq a010114
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010114:
ldsfld int32 [rvastatic3]A::a010115
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a010115
ldsfld int32 [rvastatic3]A::a010115
ldc.i4 116
beq a010115
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010115:
ldsfld int8 [rvastatic3]A::a010116
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a010116
ldsfld int8 [rvastatic3]A::a010116
ldc.i4 117
beq a010116
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010116:
ldsfld int16 [rvastatic3]A::a010117
ldc.i4 1
add
stsfld int16 [rvastatic3]A::a010117
ldsfld int16 [rvastatic3]A::a010117
ldc.i4 118
beq a010117
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010117:
ldsfld int64 [rvastatic3]A::a010118
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a010118
ldsfld int64 [rvastatic3]A::a010118
ldc.i8 119
beq a010118
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010118:
ldsfld int8 [rvastatic3]A::a010119
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a010119
ldsfld int8 [rvastatic3]A::a010119
ldc.i4 120
beq a010119
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010119:
ldsfld int64 [rvastatic3]A::a010120
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a010120
ldsfld int64 [rvastatic3]A::a010120
ldc.i8 121
beq a010120
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010120:
ldsfld int32 [rvastatic3]A::a010121
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a010121
ldsfld int32 [rvastatic3]A::a010121
ldc.i4 122
beq a010121
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010121:
ldsfld int8 [rvastatic3]A::a010122
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a010122
ldsfld int8 [rvastatic3]A::a010122
ldc.i4 3
beq a010122
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010122:
ldsfld int32 [rvastatic3]A::a010123
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a010123
ldsfld int32 [rvastatic3]A::a010123
ldc.i4 124
beq a010123
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010123:
ldsfld int8 [rvastatic3]A::a010124
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a010124
ldsfld int8 [rvastatic3]A::a010124
ldc.i4 5
beq a010124
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010124:
ldsfld int64 [rvastatic3]A::a010125
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a010125
ldsfld int64 [rvastatic3]A::a010125
ldc.i8 126
beq a010125
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010125:
ldsfld float32 [rvastatic3]A::a010126
ldc.r4 1
add
stsfld float32 [rvastatic3]A::a010126
ldsfld float32 [rvastatic3]A::a010126
ldc.r4 127.0
beq a010126
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010126:
ldsfld int64 [rvastatic3]A::a010127
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a010127
ldsfld int64 [rvastatic3]A::a010127
ldc.i8 128
beq a010127
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010127:
ret}
.method static int32 Main(string[] args){.entrypoint .maxstack 5
.custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = (
01 00 00 00
)
call void [rvastatic3]A::V1()
call void [rvastatic3]A::V2()
call void [rvastatic3]A::V3()
call void [rvastatic3]A::V4()
call void [rvastatic3]A::V5()
call void [rvastatic3]A::V6()
ldc.i4 100
ret}
.field public static int64 a0100 at b0100
.field public static int64 a0101 at b0101
.field public static float32 a0102 at b0102
.field private static int32 aALIGN10102 at bALIGN10102
.field public static int64 a0103 at b0103
.field public static int64 a0104 at b0104
.field public static int16 a0105 at b0105
.field private static int16 aALIGN10105 at bALIGN10105
.field private static int32 aALIGN20105 at bALIGN20105
.field public static int16 a0106 at b0106
.field private static int16 aALIGN10106 at bALIGN10106
.field private static int32 aALIGN20106 at bALIGN20106
.field public static float32 a0107 at b0107
.field private static int32 aALIGN10107 at bALIGN10107
.field public static int32 a0108 at b0108
.field private static int32 aALIGN10108 at bALIGN10108
.field public static int8 a0109 at b0109
.field private static int32 aALIGN10109 at bALIGN10109
.field private static int16 aALIGN20109 at bALIGN20109
.field private static int8 aALIGN20109 at bALIGN30109
.field public static int32 a01010 at b01010
.field private static int32 aALIGN101010 at bALIGN101010
.field public static int64 a01011 at b01011
.field public static int32 a01012 at b01012
.field private static int32 aALIGN101012 at bALIGN101012
.field public static int8 a01013 at b01013
.field private static int32 aALIGN101013 at bALIGN101013
.field private static int16 aALIGN201013 at bALIGN201013
.field private static int8 aALIGN201013 at bALIGN301013
.field public static int16 a01014 at b01014
.field private static int16 aALIGN101014 at bALIGN101014
.field private static int32 aALIGN201014 at bALIGN201014
.field public static int16 a01015 at b01015
.field private static int16 aALIGN101015 at bALIGN101015
.field private static int32 aALIGN201015 at bALIGN201015
.field public static float32 a01016 at b01016
.field private static int32 aALIGN101016 at bALIGN101016
.field public static float32 a01017 at b01017
.field private static int32 aALIGN101017 at bALIGN101017
.field public static int32 a01018 at b01018
.field private static int32 aALIGN101018 at bALIGN101018
.field public static int8 a01019 at b01019
.field private static int32 aALIGN101019 at bALIGN101019
.field private static int16 aALIGN201019 at bALIGN201019
.field private static int8 aALIGN201019 at bALIGN301019
.field public static int32 a01020 at b01020
.field private static int32 aALIGN101020 at bALIGN101020
.field public static int32 a01021 at b01021
.field private static int32 aALIGN101021 at bALIGN101021
.field public static int64 a01022 at b01022
.field public static int32 a01023 at b01023
.field private static int32 aALIGN101023 at bALIGN101023
.field public static int8 a01024 at b01024
.field private static int32 aALIGN101024 at bALIGN101024
.field private static int16 aALIGN201024 at bALIGN201024
.field private static int8 aALIGN201024 at bALIGN301024
.field public static int8 a01025 at b01025
.field private static int32 aALIGN101025 at bALIGN101025
.field private static int16 aALIGN201025 at bALIGN201025
.field private static int8 aALIGN201025 at bALIGN301025
.field public static int16 a01026 at b01026
.field private static int16 aALIGN101026 at bALIGN101026
.field private static int32 aALIGN201026 at bALIGN201026
.field public static int8 a01027 at b01027
.field private static int32 aALIGN101027 at bALIGN101027
.field private static int16 aALIGN201027 at bALIGN201027
.field private static int8 aALIGN201027 at bALIGN301027
.field public static int16 a01028 at b01028
.field private static int16 aALIGN101028 at bALIGN101028
.field private static int32 aALIGN201028 at bALIGN201028
.field public static int64 a01029 at b01029
.field public static int32 a01030 at b01030
.field private static int32 aALIGN101030 at bALIGN101030
.field public static int32 a01031 at b01031
.field private static int32 aALIGN101031 at bALIGN101031
.field public static int32 a01032 at b01032
.field private static int32 aALIGN101032 at bALIGN101032
.field public static int8 a01033 at b01033
.field private static int32 aALIGN101033 at bALIGN101033
.field private static int16 aALIGN201033 at bALIGN201033
.field private static int8 aALIGN201033 at bALIGN301033
.field public static int16 a01034 at b01034
.field private static int16 aALIGN101034 at bALIGN101034
.field private static int32 aALIGN201034 at bALIGN201034
.field public static int32 a01035 at b01035
.field private static int32 aALIGN101035 at bALIGN101035
.field public static int32 a01036 at b01036
.field private static int32 aALIGN101036 at bALIGN101036
.field public static int16 a01037 at b01037
.field private static int16 aALIGN101037 at bALIGN101037
.field private static int32 aALIGN201037 at bALIGN201037
.field public static float32 a01038 at b01038
.field private static int32 aALIGN101038 at bALIGN101038
.field public static int8 a01039 at b01039
.field private static int32 aALIGN101039 at bALIGN101039
.field private static int16 aALIGN201039 at bALIGN201039
.field private static int8 aALIGN201039 at bALIGN301039
.field public static int8 a01040 at b01040
.field private static int32 aALIGN101040 at bALIGN101040
.field private static int16 aALIGN201040 at bALIGN201040
.field private static int8 aALIGN201040 at bALIGN301040
.field public static float32 a01041 at b01041
.field private static int32 aALIGN101041 at bALIGN101041
.field public static int32 a01042 at b01042
.field private static int32 aALIGN101042 at bALIGN101042
.field public static int32 a01043 at b01043
.field private static int32 aALIGN101043 at bALIGN101043
.field public static int32 a01044 at b01044
.field private static int32 aALIGN101044 at bALIGN101044
.field public static int64 a01045 at b01045
.field public static int64 a01046 at b01046
.field public static int64 a01047 at b01047
.field public static float32 a01048 at b01048
.field private static int32 aALIGN101048 at bALIGN101048
.field public static int64 a01049 at b01049
.field public static int32 a01050 at b01050
.field private static int32 aALIGN101050 at bALIGN101050
.field public static float32 a01051 at b01051
.field private static int32 aALIGN101051 at bALIGN101051
.field public static int32 a01052 at b01052
.field private static int32 aALIGN101052 at bALIGN101052
.field public static int64 a01053 at b01053
.field public static int8 a01054 at b01054
.field private static int32 aALIGN101054 at bALIGN101054
.field private static int16 aALIGN201054 at bALIGN201054
.field private static int8 aALIGN201054 at bALIGN301054
.field public static int8 a01055 at b01055
.field private static int32 aALIGN101055 at bALIGN101055
.field private static int16 aALIGN201055 at bALIGN201055
.field private static int8 aALIGN201055 at bALIGN301055
.field public static float32 a01056 at b01056
.field private static int32 aALIGN101056 at bALIGN101056
.field public static int32 a01057 at b01057
.field private static int32 aALIGN101057 at bALIGN101057
.field public static int64 a01058 at b01058
.field public static int64 a01059 at b01059
.field public static int8 a01060 at b01060
.field private static int32 aALIGN101060 at bALIGN101060
.field private static int16 aALIGN201060 at bALIGN201060
.field private static int8 aALIGN201060 at bALIGN301060
.field public static int16 a01061 at b01061
.field private static int16 aALIGN101061 at bALIGN101061
.field private static int32 aALIGN201061 at bALIGN201061
.field public static int64 a01062 at b01062
.field public static float32 a01063 at b01063
.field private static int32 aALIGN101063 at bALIGN101063
.field public static int64 a01064 at b01064
.field public static float32 a01065 at b01065
.field private static int32 aALIGN101065 at bALIGN101065
.field public static int8 a01066 at b01066
.field private static int32 aALIGN101066 at bALIGN101066
.field private static int16 aALIGN201066 at bALIGN201066
.field private static int8 aALIGN201066 at bALIGN301066
.field public static int16 a01067 at b01067
.field private static int16 aALIGN101067 at bALIGN101067
.field private static int32 aALIGN201067 at bALIGN201067
.field public static int64 a01068 at b01068
.field public static int64 a01069 at b01069
.field public static int64 a01070 at b01070
.field public static int32 a01071 at b01071
.field private static int32 aALIGN101071 at bALIGN101071
.field public static int8 a01072 at b01072
.field private static int32 aALIGN101072 at bALIGN101072
.field private static int16 aALIGN201072 at bALIGN201072
.field private static int8 aALIGN201072 at bALIGN301072
.field public static int32 a01073 at b01073
.field private static int32 aALIGN101073 at bALIGN101073
.field public static float32 a01074 at b01074
.field private static int32 aALIGN101074 at bALIGN101074
.field public static int64 a01075 at b01075
.field public static int8 a01076 at b01076
.field private static int32 aALIGN101076 at bALIGN101076
.field private static int16 aALIGN201076 at bALIGN201076
.field private static int8 aALIGN201076 at bALIGN301076
.field public static int8 a01077 at b01077
.field private static int32 aALIGN101077 at bALIGN101077
.field private static int16 aALIGN201077 at bALIGN201077
.field private static int8 aALIGN201077 at bALIGN301077
.field public static int8 a01078 at b01078
.field private static int32 aALIGN101078 at bALIGN101078
.field private static int16 aALIGN201078 at bALIGN201078
.field private static int8 aALIGN201078 at bALIGN301078
.field public static int32 a01079 at b01079
.field private static int32 aALIGN101079 at bALIGN101079
.field public static int16 a01080 at b01080
.field private static int16 aALIGN101080 at bALIGN101080
.field private static int32 aALIGN201080 at bALIGN201080
.field public static int32 a01081 at b01081
.field private static int32 aALIGN101081 at bALIGN101081
.field public static int8 a01082 at b01082
.field private static int32 aALIGN101082 at bALIGN101082
.field private static int16 aALIGN201082 at bALIGN201082
.field private static int8 aALIGN201082 at bALIGN301082
.field public static int32 a01083 at b01083
.field private static int32 aALIGN101083 at bALIGN101083
.field public static int8 a01084 at b01084
.field private static int32 aALIGN101084 at bALIGN101084
.field private static int16 aALIGN201084 at bALIGN201084
.field private static int8 aALIGN201084 at bALIGN301084
.field public static int32 a01085 at b01085
.field private static int32 aALIGN101085 at bALIGN101085
.field public static int8 a01086 at b01086
.field private static int32 aALIGN101086 at bALIGN101086
.field private static int16 aALIGN201086 at bALIGN201086
.field private static int8 aALIGN201086 at bALIGN301086
.field public static int64 a01087 at b01087
.field public static int8 a01088 at b01088
.field private static int32 aALIGN101088 at bALIGN101088
.field private static int16 aALIGN201088 at bALIGN201088
.field private static int8 aALIGN201088 at bALIGN301088
.field public static int16 a01089 at b01089
.field private static int16 aALIGN101089 at bALIGN101089
.field private static int32 aALIGN201089 at bALIGN201089
.field public static int64 a01090 at b01090
.field public static int8 a01091 at b01091
.field private static int32 aALIGN101091 at bALIGN101091
.field private static int16 aALIGN201091 at bALIGN201091
.field private static int8 aALIGN201091 at bALIGN301091
.field public static int64 a01092 at b01092
.field public static int16 a01093 at b01093
.field private static int16 aALIGN101093 at bALIGN101093
.field private static int32 aALIGN201093 at bALIGN201093
.field public static int8 a01094 at b01094
.field private static int32 aALIGN101094 at bALIGN101094
.field private static int16 aALIGN201094 at bALIGN201094
.field private static int8 aALIGN201094 at bALIGN301094
.field public static float32 a01095 at b01095
.field private static int32 aALIGN101095 at bALIGN101095
.field public static int16 a01096 at b01096
.field private static int16 aALIGN101096 at bALIGN101096
.field private static int32 aALIGN201096 at bALIGN201096
.field public static int64 a01097 at b01097
.field public static float32 a01098 at b01098
.field private static int32 aALIGN101098 at bALIGN101098
.field public static int32 a01099 at b01099
.field private static int32 aALIGN101099 at bALIGN101099
.field public static int32 a010100 at b010100
.field private static int32 aALIGN1010100 at bALIGN1010100
.field public static int32 a010101 at b010101
.field private static int32 aALIGN1010101 at bALIGN1010101
.field public static int8 a010102 at b010102
.field private static int32 aALIGN1010102 at bALIGN1010102
.field private static int16 aALIGN2010102 at bALIGN2010102
.field private static int8 aALIGN2010102 at bALIGN3010102
.field public static int16 a010103 at b010103
.field private static int16 aALIGN1010103 at bALIGN1010103
.field private static int32 aALIGN2010103 at bALIGN2010103
.field public static int8 a010104 at b010104
.field private static int32 aALIGN1010104 at bALIGN1010104
.field private static int16 aALIGN2010104 at bALIGN2010104
.field private static int8 aALIGN2010104 at bALIGN3010104
.field public static float32 a010105 at b010105
.field private static int32 aALIGN1010105 at bALIGN1010105
.field public static float32 a010106 at b010106
.field private static int32 aALIGN1010106 at bALIGN1010106
.field public static int8 a010107 at b010107
.field private static int32 aALIGN1010107 at bALIGN1010107
.field private static int16 aALIGN2010107 at bALIGN2010107
.field private static int8 aALIGN2010107 at bALIGN3010107
.field public static float32 a010108 at b010108
.field private static int32 aALIGN1010108 at bALIGN1010108
.field public static int16 a010109 at b010109
.field private static int16 aALIGN1010109 at bALIGN1010109
.field private static int32 aALIGN2010109 at bALIGN2010109
.field public static int8 a010110 at b010110
.field private static int32 aALIGN1010110 at bALIGN1010110
.field private static int16 aALIGN2010110 at bALIGN2010110
.field private static int8 aALIGN2010110 at bALIGN3010110
.field public static int64 a010111 at b010111
.field public static int16 a010112 at b010112
.field private static int16 aALIGN1010112 at bALIGN1010112
.field private static int32 aALIGN2010112 at bALIGN2010112
.field public static int16 a010113 at b010113
.field private static int16 aALIGN1010113 at bALIGN1010113
.field private static int32 aALIGN2010113 at bALIGN2010113
.field public static int32 a010114 at b010114
.field private static int32 aALIGN1010114 at bALIGN1010114
.field public static int32 a010115 at b010115
.field private static int32 aALIGN1010115 at bALIGN1010115
.field public static int8 a010116 at b010116
.field private static int32 aALIGN1010116 at bALIGN1010116
.field private static int16 aALIGN2010116 at bALIGN2010116
.field private static int8 aALIGN2010116 at bALIGN3010116
.field public static int16 a010117 at b010117
.field private static int16 aALIGN1010117 at bALIGN1010117
.field private static int32 aALIGN2010117 at bALIGN2010117
.field public static int64 a010118 at b010118
.field public static int8 a010119 at b010119
.field private static int32 aALIGN1010119 at bALIGN1010119
.field private static int16 aALIGN2010119 at bALIGN2010119
.field private static int8 aALIGN2010119 at bALIGN3010119
.field public static int64 a010120 at b010120
.field public static int32 a010121 at b010121
.field private static int32 aALIGN1010121 at bALIGN1010121
.field public static int8 a010122 at b010122
.field private static int32 aALIGN1010122 at bALIGN1010122
.field private static int16 aALIGN2010122 at bALIGN2010122
.field private static int8 aALIGN2010122 at bALIGN3010122
.field public static int32 a010123 at b010123
.field private static int32 aALIGN1010123 at bALIGN1010123
.field public static int8 a010124 at b010124
.field private static int32 aALIGN1010124 at bALIGN1010124
.field private static int16 aALIGN2010124 at bALIGN2010124
.field private static int8 aALIGN2010124 at bALIGN3010124
.field public static int64 a010125 at b010125
.field public static float32 a010126 at b010126
.field private static int32 aALIGN1010126 at bALIGN1010126
.field public static int64 a010127 at b010127
}
.data b0100 = int64(0)
.data b0101 = int64(1)
.data b0102 = float32(2.0)
.data bALIGN10102 = int32(0)
.data b0103 = int64(3)
.data b0104 = int64(4)
.data b0105 = int16(5)
.data bALIGN10105 = int16(0)
.data bALIGN20105 = int32(0)
.data b0106 = int16(6)
.data bALIGN10106 = int16(0)
.data bALIGN20106 = int32(0)
.data b0107 = float32(7.0)
.data bALIGN10107 = int32(0)
.data b0108 = int32(8)
.data bALIGN10108 = int32(0)
.data b0109 = int8(9)
.data bALIGN10109 = int32(0)
.data bALIGN20109 = int16(0)
.data bALIGN30109 = int8(0)
.data b01010 = int32(10)
.data bALIGN101010 = int32(0)
.data b01011 = int64(11)
.data b01012 = int32(12)
.data bALIGN101012 = int32(0)
.data b01013 = int8(13)
.data bALIGN101013 = int32(0)
.data bALIGN201013 = int16(0)
.data bALIGN301013 = int8(0)
.data b01014 = int16(14)
.data bALIGN101014 = int16(0)
.data bALIGN201014 = int32(0)
.data b01015 = int16(15)
.data bALIGN101015 = int16(0)
.data bALIGN201015 = int32(0)
.data b01016 = float32(16.0)
.data bALIGN101016 = int32(0)
.data b01017 = float32(17.0)
.data bALIGN101017 = int32(0)
.data b01018 = int32(18)
.data bALIGN101018 = int32(0)
.data b01019 = int8(19)
.data bALIGN101019 = int32(0)
.data bALIGN201019 = int16(0)
.data bALIGN301019 = int8(0)
.data b01020 = int32(20)
.data bALIGN101020 = int32(0)
.data b01021 = int32(21)
.data bALIGN101021 = int32(0)
.data b01022 = int64(22)
.data b01023 = int32(23)
.data bALIGN101023 = int32(0)
.data b01024 = int8(24)
.data bALIGN101024 = int32(0)
.data bALIGN201024 = int16(0)
.data bALIGN301024 = int8(0)
.data b01025 = int8(25)
.data bALIGN101025 = int32(0)
.data bALIGN201025 = int16(0)
.data bALIGN301025 = int8(0)
.data b01026 = int16(26)
.data bALIGN101026 = int16(0)
.data bALIGN201026 = int32(0)
.data b01027 = int8(27)
.data bALIGN101027 = int32(0)
.data bALIGN201027 = int16(0)
.data bALIGN301027 = int8(0)
.data b01028 = int16(28)
.data bALIGN101028 = int16(0)
.data bALIGN201028 = int32(0)
.data b01029 = int64(29)
.data b01030 = int32(30)
.data bALIGN101030 = int32(0)
.data b01031 = int32(31)
.data bALIGN101031 = int32(0)
.data b01032 = int32(32)
.data bALIGN101032 = int32(0)
.data b01033 = int8(33)
.data bALIGN101033 = int32(0)
.data bALIGN201033 = int16(0)
.data bALIGN301033 = int8(0)
.data b01034 = int16(34)
.data bALIGN101034 = int16(0)
.data bALIGN201034 = int32(0)
.data b01035 = int32(35)
.data bALIGN101035 = int32(0)
.data b01036 = int32(36)
.data bALIGN101036 = int32(0)
.data b01037 = int16(37)
.data bALIGN101037 = int16(0)
.data bALIGN201037 = int32(0)
.data b01038 = float32(38.0)
.data bALIGN101038 = int32(0)
.data b01039 = int8(39)
.data bALIGN101039 = int32(0)
.data bALIGN201039 = int16(0)
.data bALIGN301039 = int8(0)
.data b01040 = int8(40)
.data bALIGN101040 = int32(0)
.data bALIGN201040 = int16(0)
.data bALIGN301040 = int8(0)
.data b01041 = float32(41.0)
.data bALIGN101041 = int32(0)
.data b01042 = int32(42)
.data bALIGN101042 = int32(0)
.data b01043 = int32(43)
.data bALIGN101043 = int32(0)
.data b01044 = int32(44)
.data bALIGN101044 = int32(0)
.data b01045 = int64(45)
.data b01046 = int64(46)
.data b01047 = int64(47)
.data b01048 = float32(48.0)
.data bALIGN101048 = int32(0)
.data b01049 = int64(49)
.data b01050 = int32(50)
.data bALIGN101050 = int32(0)
.data b01051 = float32(51.0)
.data bALIGN101051 = int32(0)
.data b01052 = int32(52)
.data bALIGN101052 = int32(0)
.data b01053 = int64(53)
.data b01054 = int8(54)
.data bALIGN101054 = int32(0)
.data bALIGN201054 = int16(0)
.data bALIGN301054 = int8(0)
.data b01055 = int8(55)
.data bALIGN101055 = int32(0)
.data bALIGN201055 = int16(0)
.data bALIGN301055 = int8(0)
.data b01056 = float32(56.0)
.data bALIGN101056 = int32(0)
.data b01057 = int32(57)
.data bALIGN101057 = int32(0)
.data b01058 = int64(58)
.data b01059 = int64(59)
.data b01060 = int8(60)
.data bALIGN101060 = int32(0)
.data bALIGN201060 = int16(0)
.data bALIGN301060 = int8(0)
.data b01061 = int16(61)
.data bALIGN101061 = int16(0)
.data bALIGN201061 = int32(0)
.data b01062 = int64(62)
.data b01063 = float32(63.0)
.data bALIGN101063 = int32(0)
.data b01064 = int64(64)
.data b01065 = float32(65.0)
.data bALIGN101065 = int32(0)
.data b01066 = int8(66)
.data bALIGN101066 = int32(0)
.data bALIGN201066 = int16(0)
.data bALIGN301066 = int8(0)
.data b01067 = int16(67)
.data bALIGN101067 = int16(0)
.data bALIGN201067 = int32(0)
.data b01068 = int64(68)
.data b01069 = int64(69)
.data b01070 = int64(70)
.data b01071 = int32(71)
.data bALIGN101071 = int32(0)
.data b01072 = int8(72)
.data bALIGN101072 = int32(0)
.data bALIGN201072 = int16(0)
.data bALIGN301072 = int8(0)
.data b01073 = int32(73)
.data bALIGN101073 = int32(0)
.data b01074 = float32(74.0)
.data bALIGN101074 = int32(0)
.data b01075 = int64(75)
.data b01076 = int8(76)
.data bALIGN101076 = int32(0)
.data bALIGN201076 = int16(0)
.data bALIGN301076 = int8(0)
.data b01077 = int8(77)
.data bALIGN101077 = int32(0)
.data bALIGN201077 = int16(0)
.data bALIGN301077 = int8(0)
.data b01078 = int8(78)
.data bALIGN101078 = int32(0)
.data bALIGN201078 = int16(0)
.data bALIGN301078 = int8(0)
.data b01079 = int32(79)
.data bALIGN101079 = int32(0)
.data b01080 = int16(80)
.data bALIGN101080 = int16(0)
.data bALIGN201080 = int32(0)
.data b01081 = int32(81)
.data bALIGN101081 = int32(0)
.data b01082 = int8(82)
.data bALIGN101082 = int32(0)
.data bALIGN201082 = int16(0)
.data bALIGN301082 = int8(0)
.data b01083 = int32(83)
.data bALIGN101083 = int32(0)
.data b01084 = int8(84)
.data bALIGN101084 = int32(0)
.data bALIGN201084 = int16(0)
.data bALIGN301084 = int8(0)
.data b01085 = int32(85)
.data bALIGN101085 = int32(0)
.data b01086 = int8(86)
.data bALIGN101086 = int32(0)
.data bALIGN201086 = int16(0)
.data bALIGN301086 = int8(0)
.data b01087 = int64(87)
.data b01088 = int8(88)
.data bALIGN101088 = int32(0)
.data bALIGN201088 = int16(0)
.data bALIGN301088 = int8(0)
.data b01089 = int16(89)
.data bALIGN101089 = int16(0)
.data bALIGN201089 = int32(0)
.data b01090 = int64(90)
.data b01091 = int8(91)
.data bALIGN101091 = int32(0)
.data bALIGN201091 = int16(0)
.data bALIGN301091 = int8(0)
.data b01092 = int64(92)
.data b01093 = int16(93)
.data bALIGN101093 = int16(0)
.data bALIGN201093 = int32(0)
.data b01094 = int8(94)
.data bALIGN101094 = int32(0)
.data bALIGN201094 = int16(0)
.data bALIGN301094 = int8(0)
.data b01095 = float32(95.0)
.data bALIGN101095 = int32(0)
.data b01096 = int16(96)
.data bALIGN101096 = int16(0)
.data bALIGN201096 = int32(0)
.data b01097 = int64(97)
.data b01098 = float32(98.0)
.data bALIGN101098 = int32(0)
.data b01099 = int32(99)
.data bALIGN101099 = int32(0)
.data b010100 = int32(100)
.data bALIGN1010100 = int32(0)
.data b010101 = int32(101)
.data bALIGN1010101 = int32(0)
.data b010102 = int8(102)
.data bALIGN1010102 = int32(0)
.data bALIGN2010102 = int16(0)
.data bALIGN3010102 = int8(0)
.data b010103 = int16(103)
.data bALIGN1010103 = int16(0)
.data bALIGN2010103 = int32(0)
.data b010104 = int8(104)
.data bALIGN1010104 = int32(0)
.data bALIGN2010104 = int16(0)
.data bALIGN3010104 = int8(0)
.data b010105 = float32(105.0)
.data bALIGN1010105 = int32(0)
.data b010106 = float32(106.0)
.data bALIGN1010106 = int32(0)
.data b010107 = int8(107)
.data bALIGN1010107 = int32(0)
.data bALIGN2010107 = int16(0)
.data bALIGN3010107 = int8(0)
.data b010108 = float32(108.0)
.data bALIGN1010108 = int32(0)
.data b010109 = int16(109)
.data bALIGN1010109 = int16(0)
.data bALIGN2010109 = int32(0)
.data b010110 = int8(110)
.data bALIGN1010110 = int32(0)
.data bALIGN2010110 = int16(0)
.data bALIGN3010110 = int8(0)
.data b010111 = int64(111)
.data b010112 = int16(112)
.data bALIGN1010112 = int16(0)
.data bALIGN2010112 = int32(0)
.data b010113 = int16(113)
.data bALIGN1010113 = int16(0)
.data bALIGN2010113 = int32(0)
.data b010114 = int32(114)
.data bALIGN1010114 = int32(0)
.data b010115 = int32(115)
.data bALIGN1010115 = int32(0)
.data b010116 = int8(116)
.data bALIGN1010116 = int32(0)
.data bALIGN2010116 = int16(0)
.data bALIGN3010116 = int8(0)
.data b010117 = int16(117)
.data bALIGN1010117 = int16(0)
.data bALIGN2010117 = int32(0)
.data b010118 = int64(118)
.data b010119 = int8(119)
.data bALIGN1010119 = int32(0)
.data bALIGN2010119 = int16(0)
.data bALIGN3010119 = int8(0)
.data b010120 = int64(120)
.data b010121 = int32(121)
.data bALIGN1010121 = int32(0)
.data b010122 = int8(2)
.data bALIGN1010122 = int32(0)
.data bALIGN2010122 = int16(0)
.data bALIGN3010122 = int8(0)
.data b010123 = int32(123)
.data bALIGN1010123 = int32(0)
.data b010124 = int8(4)
.data bALIGN1010124 = int32(0)
.data bALIGN2010124 = int16(0)
.data bALIGN3010124 = int8(0)
.data b010125 = int64(125)
.data b010126 = float32(126.0)
.data bALIGN1010126 = int32(0)
.data b010127 = int64(127)
|
.assembly extern mscorlib{}
.assembly extern xunit.core {}
.assembly rvastatic3{}
.class public A{
.method static native int Call1(int64) {.maxstack 50
ldarg.0
conv.i8
ldc.i8 2
mul
ldc.i4 1
shr.un
conv.i
conv.i
ret
}
.method static native int Call2(float64) {.maxstack 50
ldarg.0
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
conv.i
ret
}
.method static void V1() {.maxstack 50
ldsfld int64 [rvastatic3]A::a0100
ldc.i8 0
beq a0101
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0101:
ldsfld int64 [rvastatic3]A::a0101
ldc.i8 1
beq a0102
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0102:
ldsfld float32 [rvastatic3]A::a0102
ldc.r4 2.0
beq a0103
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0103:
ldsfld int64 [rvastatic3]A::a0103
ldc.i8 3
beq a0104
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0104:
ldsfld int64 [rvastatic3]A::a0104
ldc.i8 4
beq a0105
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0105:
ldsfld int16 [rvastatic3]A::a0105
ldc.i4 5
beq a0106
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0106:
ldsfld int16 [rvastatic3]A::a0106
ldc.i4 6
beq a0107
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0107:
ldsfld float32 [rvastatic3]A::a0107
ldc.r4 7.0
beq a0108
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0108:
ldsfld int32 [rvastatic3]A::a0108
ldc.i4 8
beq a0109
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0109:
ldsfld int8 [rvastatic3]A::a0109
ldc.i4 9
beq a01010
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01010:
ldsfld int32 [rvastatic3]A::a01010
ldc.i4 10
beq a01011
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01011:
ldsfld int64 [rvastatic3]A::a01011
ldc.i8 11
beq a01012
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01012:
ldsfld int32 [rvastatic3]A::a01012
ldc.i4 12
beq a01013
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01013:
ldsfld int8 [rvastatic3]A::a01013
ldc.i4 13
beq a01014
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01014:
ldsfld int16 [rvastatic3]A::a01014
ldc.i4 14
beq a01015
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01015:
ldsfld int16 [rvastatic3]A::a01015
ldc.i4 15
beq a01016
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01016:
ldsfld float32 [rvastatic3]A::a01016
ldc.r4 16.0
beq a01017
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01017:
ldsfld float32 [rvastatic3]A::a01017
ldc.r4 17.0
beq a01018
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01018:
ldsfld int32 [rvastatic3]A::a01018
ldc.i4 18
beq a01019
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01019:
ldsfld int8 [rvastatic3]A::a01019
ldc.i4 19
beq a01020
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01020:
ldsfld int32 [rvastatic3]A::a01020
ldc.i4 20
beq a01021
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01021:
ldsfld int32 [rvastatic3]A::a01021
ldc.i4 21
beq a01022
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01022:
ldsfld int64 [rvastatic3]A::a01022
ldc.i8 22
beq a01023
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01023:
ldsfld int32 [rvastatic3]A::a01023
ldc.i4 23
beq a01024
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01024:
ldsfld int8 [rvastatic3]A::a01024
ldc.i4 24
beq a01025
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01025:
ldsfld int8 [rvastatic3]A::a01025
ldc.i4 25
beq a01026
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01026:
ldsfld int16 [rvastatic3]A::a01026
ldc.i4 26
beq a01027
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01027:
ldsfld int8 [rvastatic3]A::a01027
ldc.i4 27
beq a01028
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01028:
ldsfld int16 [rvastatic3]A::a01028
ldc.i4 28
beq a01029
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01029:
ldsfld int64 [rvastatic3]A::a01029
ldc.i8 29
beq a01030
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01030:
ldsfld int32 [rvastatic3]A::a01030
ldc.i4 30
beq a01031
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01031:
ldsfld int32 [rvastatic3]A::a01031
ldc.i4 31
beq a01032
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01032:
ldsfld int32 [rvastatic3]A::a01032
ldc.i4 32
beq a01033
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01033:
ldsfld int8 [rvastatic3]A::a01033
ldc.i4 33
beq a01034
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01034:
ldsfld int16 [rvastatic3]A::a01034
ldc.i4 34
beq a01035
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01035:
ldsfld int32 [rvastatic3]A::a01035
ldc.i4 35
beq a01036
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01036:
ldsfld int32 [rvastatic3]A::a01036
ldc.i4 36
beq a01037
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01037:
ldsfld int16 [rvastatic3]A::a01037
ldc.i4 37
beq a01038
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01038:
ldsfld float32 [rvastatic3]A::a01038
ldc.r4 38.0
beq a01039
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01039:
ldsfld int8 [rvastatic3]A::a01039
ldc.i4 39
beq a01040
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01040:
ldsfld int8 [rvastatic3]A::a01040
ldc.i4 40
beq a01041
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01041:
ldsfld float32 [rvastatic3]A::a01041
ldc.r4 41.0
beq a01042
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01042:
ldsfld int32 [rvastatic3]A::a01042
ldc.i4 42
beq a01043
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01043:
ldsfld int32 [rvastatic3]A::a01043
ldc.i4 43
beq a01044
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01044:
ldsfld int32 [rvastatic3]A::a01044
ldc.i4 44
beq a01045
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01045:
ldsfld int64 [rvastatic3]A::a01045
ldc.i8 45
beq a01046
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01046:
ldsfld int64 [rvastatic3]A::a01046
ldc.i8 46
beq a01047
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01047:
ldsfld int64 [rvastatic3]A::a01047
ldc.i8 47
beq a01048
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01048:
ldsfld float32 [rvastatic3]A::a01048
ldc.r4 48.0
beq a01049
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01049:
ldsfld int64 [rvastatic3]A::a01049
ldc.i8 49
beq a01050
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01050:
ldsfld int32 [rvastatic3]A::a01050
ldc.i4 50
beq a01051
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01051:
ldsfld float32 [rvastatic3]A::a01051
ldc.r4 51.0
beq a01052
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01052:
ldsfld int32 [rvastatic3]A::a01052
ldc.i4 52
beq a01053
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01053:
ldsfld int64 [rvastatic3]A::a01053
ldc.i8 53
beq a01054
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01054:
ldsfld int8 [rvastatic3]A::a01054
ldc.i4 54
beq a01055
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01055:
ldsfld int8 [rvastatic3]A::a01055
ldc.i4 55
beq a01056
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01056:
ldsfld float32 [rvastatic3]A::a01056
ldc.r4 56.0
beq a01057
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01057:
ldsfld int32 [rvastatic3]A::a01057
ldc.i4 57
beq a01058
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01058:
ldsfld int64 [rvastatic3]A::a01058
ldc.i8 58
beq a01059
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01059:
ldsfld int64 [rvastatic3]A::a01059
ldc.i8 59
beq a01060
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01060:
ldsfld int8 [rvastatic3]A::a01060
ldc.i4 60
beq a01061
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01061:
ldsfld int16 [rvastatic3]A::a01061
ldc.i4 61
beq a01062
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01062:
ldsfld int64 [rvastatic3]A::a01062
ldc.i8 62
beq a01063
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01063:
ldsfld float32 [rvastatic3]A::a01063
ldc.r4 63.0
beq a01064
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01064:
ldsfld int64 [rvastatic3]A::a01064
ldc.i8 64
beq a01065
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01065:
ldsfld float32 [rvastatic3]A::a01065
ldc.r4 65.0
beq a01066
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01066:
ldsfld int8 [rvastatic3]A::a01066
ldc.i4 66
beq a01067
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01067:
ldsfld int16 [rvastatic3]A::a01067
ldc.i4 67
beq a01068
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01068:
ldsfld int64 [rvastatic3]A::a01068
ldc.i8 68
beq a01069
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01069:
ldsfld int64 [rvastatic3]A::a01069
ldc.i8 69
beq a01070
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01070:
ldsfld int64 [rvastatic3]A::a01070
ldc.i8 70
beq a01071
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01071:
ldsfld int32 [rvastatic3]A::a01071
ldc.i4 71
beq a01072
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01072:
ldsfld int8 [rvastatic3]A::a01072
ldc.i4 72
beq a01073
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01073:
ldsfld int32 [rvastatic3]A::a01073
ldc.i4 73
beq a01074
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01074:
ldsfld float32 [rvastatic3]A::a01074
ldc.r4 74.0
beq a01075
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01075:
ldsfld int64 [rvastatic3]A::a01075
ldc.i8 75
beq a01076
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01076:
ldsfld int8 [rvastatic3]A::a01076
ldc.i4 76
beq a01077
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01077:
ldsfld int8 [rvastatic3]A::a01077
ldc.i4 77
beq a01078
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01078:
ldsfld int8 [rvastatic3]A::a01078
ldc.i4 78
beq a01079
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01079:
ldsfld int32 [rvastatic3]A::a01079
ldc.i4 79
beq a01080
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01080:
ldsfld int16 [rvastatic3]A::a01080
ldc.i4 80
beq a01081
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01081:
ldsfld int32 [rvastatic3]A::a01081
ldc.i4 81
beq a01082
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01082:
ldsfld int8 [rvastatic3]A::a01082
ldc.i4 82
beq a01083
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01083:
ldsfld int32 [rvastatic3]A::a01083
ldc.i4 83
beq a01084
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01084:
ldsfld int8 [rvastatic3]A::a01084
ldc.i4 84
beq a01085
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01085:
ldsfld int32 [rvastatic3]A::a01085
ldc.i4 85
beq a01086
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01086:
ldsfld int8 [rvastatic3]A::a01086
ldc.i4 86
beq a01087
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01087:
ldsfld int64 [rvastatic3]A::a01087
ldc.i8 87
beq a01088
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01088:
ldsfld int8 [rvastatic3]A::a01088
ldc.i4 88
beq a01089
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01089:
ldsfld int16 [rvastatic3]A::a01089
ldc.i4 89
beq a01090
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01090:
ldsfld int64 [rvastatic3]A::a01090
ldc.i8 90
beq a01091
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01091:
ldsfld int8 [rvastatic3]A::a01091
ldc.i4 91
beq a01092
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01092:
ldsfld int64 [rvastatic3]A::a01092
ldc.i8 92
beq a01093
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01093:
ldsfld int16 [rvastatic3]A::a01093
ldc.i4 93
beq a01094
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01094:
ldsfld int8 [rvastatic3]A::a01094
ldc.i4 94
beq a01095
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01095:
ldsfld float32 [rvastatic3]A::a01095
ldc.r4 95.0
beq a01096
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01096:
ldsfld int16 [rvastatic3]A::a01096
ldc.i4 96
beq a01097
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01097:
ldsfld int64 [rvastatic3]A::a01097
ldc.i8 97
beq a01098
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01098:
ldsfld float32 [rvastatic3]A::a01098
ldc.r4 98.0
beq a01099
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01099:
ldsfld int32 [rvastatic3]A::a01099
ldc.i4 99
beq a010100
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010100:
ldsfld int32 [rvastatic3]A::a010100
ldc.i4 100
beq a010101
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010101:
ldsfld int32 [rvastatic3]A::a010101
ldc.i4 101
beq a010102
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010102:
ldsfld int8 [rvastatic3]A::a010102
ldc.i4 102
beq a010103
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010103:
ldsfld int16 [rvastatic3]A::a010103
ldc.i4 103
beq a010104
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010104:
ldsfld int8 [rvastatic3]A::a010104
ldc.i4 104
beq a010105
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010105:
ldsfld float32 [rvastatic3]A::a010105
ldc.r4 105.0
beq a010106
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010106:
ldsfld float32 [rvastatic3]A::a010106
ldc.r4 106.0
beq a010107
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010107:
ldsfld int8 [rvastatic3]A::a010107
ldc.i4 107
beq a010108
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010108:
ldsfld float32 [rvastatic3]A::a010108
ldc.r4 108.0
beq a010109
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010109:
ldsfld int16 [rvastatic3]A::a010109
ldc.i4 109
beq a010110
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010110:
ldsfld int8 [rvastatic3]A::a010110
ldc.i4 110
beq a010111
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010111:
ldsfld int64 [rvastatic3]A::a010111
ldc.i8 111
beq a010112
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010112:
ldsfld int16 [rvastatic3]A::a010112
ldc.i4 112
beq a010113
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010113:
ldsfld int16 [rvastatic3]A::a010113
ldc.i4 113
beq a010114
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010114:
ldsfld int32 [rvastatic3]A::a010114
ldc.i4 114
beq a010115
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010115:
ldsfld int32 [rvastatic3]A::a010115
ldc.i4 115
beq a010116
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010116:
ldsfld int8 [rvastatic3]A::a010116
ldc.i4 116
beq a010117
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010117:
ldsfld int16 [rvastatic3]A::a010117
ldc.i4 117
beq a010118
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010118:
ldsfld int64 [rvastatic3]A::a010118
ldc.i8 118
beq a010119
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010119:
ldsfld int8 [rvastatic3]A::a010119
ldc.i4 119
beq a010120
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010120:
ldsfld int64 [rvastatic3]A::a010120
ldc.i8 120
beq a010121
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010121:
ldsfld int32 [rvastatic3]A::a010121
ldc.i4 121
beq a010122
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010122:
ldsfld int8 [rvastatic3]A::a010122
ldc.i4 2
beq a010123
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010123:
ldsfld int32 [rvastatic3]A::a010123
ldc.i4 123
beq a010124
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010124:
ldsfld int8 [rvastatic3]A::a010124
ldc.i4 4
beq a010125
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010125:
ldsfld int64 [rvastatic3]A::a010125
ldc.i8 125
beq a010126
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010126:
ldsfld float32 [rvastatic3]A::a010126
ldc.r4 126.0
beq a010127
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010127:
ldsfld int64 [rvastatic3]A::a010127
ldc.i8 127
beq a010128
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010128:
ret}
.method static void V2() {.maxstack 50
ldsflda int64 [rvastatic3]A::a0100
ldind.i8
ldc.i8 0
beq a0100
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0100:
ldsflda int64 [rvastatic3]A::a0101
ldind.i8
ldc.i8 1
beq a0101
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0101:
ldsflda float32 [rvastatic3]A::a0102
ldind.r4
ldc.r4 2.0
beq a0102
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0102:
ldsflda int64 [rvastatic3]A::a0103
ldind.i8
ldc.i8 3
beq a0103
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0103:
ldsflda int64 [rvastatic3]A::a0104
ldind.i8
ldc.i8 4
beq a0104
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0104:
ldsflda int16 [rvastatic3]A::a0105
ldind.i2
ldc.i4 5
beq a0105
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0105:
ldsflda int16 [rvastatic3]A::a0106
ldind.i2
ldc.i4 6
beq a0106
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0106:
ldsflda float32 [rvastatic3]A::a0107
ldind.r4
ldc.r4 7.0
beq a0107
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0107:
ldsflda int32 [rvastatic3]A::a0108
ldind.i4
ldc.i4 8
beq a0108
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0108:
ldsflda int8 [rvastatic3]A::a0109
ldind.i1
ldc.i4 9
beq a0109
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0109:
ldsflda int32 [rvastatic3]A::a01010
ldind.i4
ldc.i4 10
beq a01010
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01010:
ldsflda int64 [rvastatic3]A::a01011
ldind.i8
ldc.i8 11
beq a01011
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01011:
ldsflda int32 [rvastatic3]A::a01012
ldind.i4
ldc.i4 12
beq a01012
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01012:
ldsflda int8 [rvastatic3]A::a01013
ldind.i1
ldc.i4 13
beq a01013
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01013:
ldsflda int16 [rvastatic3]A::a01014
ldind.i2
ldc.i4 14
beq a01014
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01014:
ldsflda int16 [rvastatic3]A::a01015
ldind.i2
ldc.i4 15
beq a01015
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01015:
ldsflda float32 [rvastatic3]A::a01016
ldind.r4
ldc.r4 16.0
beq a01016
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01016:
ldsflda float32 [rvastatic3]A::a01017
ldind.r4
ldc.r4 17.0
beq a01017
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01017:
ldsflda int32 [rvastatic3]A::a01018
ldind.i4
ldc.i4 18
beq a01018
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01018:
ldsflda int8 [rvastatic3]A::a01019
ldind.i1
ldc.i4 19
beq a01019
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01019:
ldsflda int32 [rvastatic3]A::a01020
ldind.i4
ldc.i4 20
beq a01020
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01020:
ldsflda int32 [rvastatic3]A::a01021
ldind.i4
ldc.i4 21
beq a01021
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01021:
ldsflda int64 [rvastatic3]A::a01022
ldind.i8
ldc.i8 22
beq a01022
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01022:
ldsflda int32 [rvastatic3]A::a01023
ldind.i4
ldc.i4 23
beq a01023
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01023:
ldsflda int8 [rvastatic3]A::a01024
ldind.i1
ldc.i4 24
beq a01024
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01024:
ldsflda int8 [rvastatic3]A::a01025
ldind.i1
ldc.i4 25
beq a01025
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01025:
ldsflda int16 [rvastatic3]A::a01026
ldind.i2
ldc.i4 26
beq a01026
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01026:
ldsflda int8 [rvastatic3]A::a01027
ldind.i1
ldc.i4 27
beq a01027
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01027:
ldsflda int16 [rvastatic3]A::a01028
ldind.i2
ldc.i4 28
beq a01028
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01028:
ldsflda int64 [rvastatic3]A::a01029
ldind.i8
ldc.i8 29
beq a01029
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01029:
ldsflda int32 [rvastatic3]A::a01030
ldind.i4
ldc.i4 30
beq a01030
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01030:
ldsflda int32 [rvastatic3]A::a01031
ldind.i4
ldc.i4 31
beq a01031
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01031:
ldsflda int32 [rvastatic3]A::a01032
ldind.i4
ldc.i4 32
beq a01032
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01032:
ldsflda int8 [rvastatic3]A::a01033
ldind.i1
ldc.i4 33
beq a01033
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01033:
ldsflda int16 [rvastatic3]A::a01034
ldind.i2
ldc.i4 34
beq a01034
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01034:
ldsflda int32 [rvastatic3]A::a01035
ldind.i4
ldc.i4 35
beq a01035
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01035:
ldsflda int32 [rvastatic3]A::a01036
ldind.i4
ldc.i4 36
beq a01036
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01036:
ldsflda int16 [rvastatic3]A::a01037
ldind.i2
ldc.i4 37
beq a01037
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01037:
ldsflda float32 [rvastatic3]A::a01038
ldind.r4
ldc.r4 38.0
beq a01038
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01038:
ldsflda int8 [rvastatic3]A::a01039
ldind.i1
ldc.i4 39
beq a01039
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01039:
ldsflda int8 [rvastatic3]A::a01040
ldind.i1
ldc.i4 40
beq a01040
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01040:
ldsflda float32 [rvastatic3]A::a01041
ldind.r4
ldc.r4 41.0
beq a01041
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01041:
ldsflda int32 [rvastatic3]A::a01042
ldind.i4
ldc.i4 42
beq a01042
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01042:
ldsflda int32 [rvastatic3]A::a01043
ldind.i4
ldc.i4 43
beq a01043
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01043:
ldsflda int32 [rvastatic3]A::a01044
ldind.i4
ldc.i4 44
beq a01044
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01044:
ldsflda int64 [rvastatic3]A::a01045
ldind.i8
ldc.i8 45
beq a01045
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01045:
ldsflda int64 [rvastatic3]A::a01046
ldind.i8
ldc.i8 46
beq a01046
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01046:
ldsflda int64 [rvastatic3]A::a01047
ldind.i8
ldc.i8 47
beq a01047
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01047:
ldsflda float32 [rvastatic3]A::a01048
ldind.r4
ldc.r4 48.0
beq a01048
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01048:
ldsflda int64 [rvastatic3]A::a01049
ldind.i8
ldc.i8 49
beq a01049
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01049:
ldsflda int32 [rvastatic3]A::a01050
ldind.i4
ldc.i4 50
beq a01050
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01050:
ldsflda float32 [rvastatic3]A::a01051
ldind.r4
ldc.r4 51.0
beq a01051
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01051:
ldsflda int32 [rvastatic3]A::a01052
ldind.i4
ldc.i4 52
beq a01052
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01052:
ldsflda int64 [rvastatic3]A::a01053
ldind.i8
ldc.i8 53
beq a01053
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01053:
ldsflda int8 [rvastatic3]A::a01054
ldind.i1
ldc.i4 54
beq a01054
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01054:
ldsflda int8 [rvastatic3]A::a01055
ldind.i1
ldc.i4 55
beq a01055
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01055:
ldsflda float32 [rvastatic3]A::a01056
ldind.r4
ldc.r4 56.0
beq a01056
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01056:
ldsflda int32 [rvastatic3]A::a01057
ldind.i4
ldc.i4 57
beq a01057
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01057:
ldsflda int64 [rvastatic3]A::a01058
ldind.i8
ldc.i8 58
beq a01058
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01058:
ldsflda int64 [rvastatic3]A::a01059
ldind.i8
ldc.i8 59
beq a01059
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01059:
ldsflda int8 [rvastatic3]A::a01060
ldind.i1
ldc.i4 60
beq a01060
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01060:
ldsflda int16 [rvastatic3]A::a01061
ldind.i2
ldc.i4 61
beq a01061
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01061:
ldsflda int64 [rvastatic3]A::a01062
ldind.i8
ldc.i8 62
beq a01062
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01062:
ldsflda float32 [rvastatic3]A::a01063
ldind.r4
ldc.r4 63.0
beq a01063
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01063:
ldsflda int64 [rvastatic3]A::a01064
ldind.i8
ldc.i8 64
beq a01064
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01064:
ldsflda float32 [rvastatic3]A::a01065
ldind.r4
ldc.r4 65.0
beq a01065
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01065:
ldsflda int8 [rvastatic3]A::a01066
ldind.i1
ldc.i4 66
beq a01066
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01066:
ldsflda int16 [rvastatic3]A::a01067
ldind.i2
ldc.i4 67
beq a01067
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01067:
ldsflda int64 [rvastatic3]A::a01068
ldind.i8
ldc.i8 68
beq a01068
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01068:
ldsflda int64 [rvastatic3]A::a01069
ldind.i8
ldc.i8 69
beq a01069
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01069:
ldsflda int64 [rvastatic3]A::a01070
ldind.i8
ldc.i8 70
beq a01070
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01070:
ldsflda int32 [rvastatic3]A::a01071
ldind.i4
ldc.i4 71
beq a01071
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01071:
ldsflda int8 [rvastatic3]A::a01072
ldind.i1
ldc.i4 72
beq a01072
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01072:
ldsflda int32 [rvastatic3]A::a01073
ldind.i4
ldc.i4 73
beq a01073
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01073:
ldsflda float32 [rvastatic3]A::a01074
ldind.r4
ldc.r4 74.0
beq a01074
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01074:
ldsflda int64 [rvastatic3]A::a01075
ldind.i8
ldc.i8 75
beq a01075
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01075:
ldsflda int8 [rvastatic3]A::a01076
ldind.i1
ldc.i4 76
beq a01076
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01076:
ldsflda int8 [rvastatic3]A::a01077
ldind.i1
ldc.i4 77
beq a01077
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01077:
ldsflda int8 [rvastatic3]A::a01078
ldind.i1
ldc.i4 78
beq a01078
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01078:
ldsflda int32 [rvastatic3]A::a01079
ldind.i4
ldc.i4 79
beq a01079
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01079:
ldsflda int16 [rvastatic3]A::a01080
ldind.i2
ldc.i4 80
beq a01080
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01080:
ldsflda int32 [rvastatic3]A::a01081
ldind.i4
ldc.i4 81
beq a01081
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01081:
ldsflda int8 [rvastatic3]A::a01082
ldind.i1
ldc.i4 82
beq a01082
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01082:
ldsflda int32 [rvastatic3]A::a01083
ldind.i4
ldc.i4 83
beq a01083
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01083:
ldsflda int8 [rvastatic3]A::a01084
ldind.i1
ldc.i4 84
beq a01084
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01084:
ldsflda int32 [rvastatic3]A::a01085
ldind.i4
ldc.i4 85
beq a01085
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01085:
ldsflda int8 [rvastatic3]A::a01086
ldind.i1
ldc.i4 86
beq a01086
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01086:
ldsflda int64 [rvastatic3]A::a01087
ldind.i8
ldc.i8 87
beq a01087
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01087:
ldsflda int8 [rvastatic3]A::a01088
ldind.i1
ldc.i4 88
beq a01088
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01088:
ldsflda int16 [rvastatic3]A::a01089
ldind.i2
ldc.i4 89
beq a01089
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01089:
ldsflda int64 [rvastatic3]A::a01090
ldind.i8
ldc.i8 90
beq a01090
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01090:
ldsflda int8 [rvastatic3]A::a01091
ldind.i1
ldc.i4 91
beq a01091
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01091:
ldsflda int64 [rvastatic3]A::a01092
ldind.i8
ldc.i8 92
beq a01092
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01092:
ldsflda int16 [rvastatic3]A::a01093
ldind.i2
ldc.i4 93
beq a01093
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01093:
ldsflda int8 [rvastatic3]A::a01094
ldind.i1
ldc.i4 94
beq a01094
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01094:
ldsflda float32 [rvastatic3]A::a01095
ldind.r4
ldc.r4 95.0
beq a01095
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01095:
ldsflda int16 [rvastatic3]A::a01096
ldind.i2
ldc.i4 96
beq a01096
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01096:
ldsflda int64 [rvastatic3]A::a01097
ldind.i8
ldc.i8 97
beq a01097
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01097:
ldsflda float32 [rvastatic3]A::a01098
ldind.r4
ldc.r4 98.0
beq a01098
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01098:
ldsflda int32 [rvastatic3]A::a01099
ldind.i4
ldc.i4 99
beq a01099
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01099:
ldsflda int32 [rvastatic3]A::a010100
ldind.i4
ldc.i4 100
beq a010100
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010100:
ldsflda int32 [rvastatic3]A::a010101
ldind.i4
ldc.i4 101
beq a010101
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010101:
ldsflda int8 [rvastatic3]A::a010102
ldind.i1
ldc.i4 102
beq a010102
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010102:
ldsflda int16 [rvastatic3]A::a010103
ldind.i2
ldc.i4 103
beq a010103
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010103:
ldsflda int8 [rvastatic3]A::a010104
ldind.i1
ldc.i4 104
beq a010104
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010104:
ldsflda float32 [rvastatic3]A::a010105
ldind.r4
ldc.r4 105.0
beq a010105
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010105:
ldsflda float32 [rvastatic3]A::a010106
ldind.r4
ldc.r4 106.0
beq a010106
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010106:
ldsflda int8 [rvastatic3]A::a010107
ldind.i1
ldc.i4 107
beq a010107
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010107:
ldsflda float32 [rvastatic3]A::a010108
ldind.r4
ldc.r4 108.0
beq a010108
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010108:
ldsflda int16 [rvastatic3]A::a010109
ldind.i2
ldc.i4 109
beq a010109
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010109:
ldsflda int8 [rvastatic3]A::a010110
ldind.i1
ldc.i4 110
beq a010110
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010110:
ldsflda int64 [rvastatic3]A::a010111
ldind.i8
ldc.i8 111
beq a010111
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010111:
ldsflda int16 [rvastatic3]A::a010112
ldind.i2
ldc.i4 112
beq a010112
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010112:
ldsflda int16 [rvastatic3]A::a010113
ldind.i2
ldc.i4 113
beq a010113
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010113:
ldsflda int32 [rvastatic3]A::a010114
ldind.i4
ldc.i4 114
beq a010114
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010114:
ldsflda int32 [rvastatic3]A::a010115
ldind.i4
ldc.i4 115
beq a010115
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010115:
ldsflda int8 [rvastatic3]A::a010116
ldind.i1
ldc.i4 116
beq a010116
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010116:
ldsflda int16 [rvastatic3]A::a010117
ldind.i2
ldc.i4 117
beq a010117
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010117:
ldsflda int64 [rvastatic3]A::a010118
ldind.i8
ldc.i8 118
beq a010118
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010118:
ldsflda int8 [rvastatic3]A::a010119
ldind.i1
ldc.i4 119
beq a010119
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010119:
ldsflda int64 [rvastatic3]A::a010120
ldind.i8
ldc.i8 120
beq a010120
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010120:
ldsflda int32 [rvastatic3]A::a010121
ldind.i4
ldc.i4 121
beq a010121
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010121:
ldsflda int8 [rvastatic3]A::a010122
ldind.i1
ldc.i4 2
beq a010122
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010122:
ldsflda int32 [rvastatic3]A::a010123
ldind.i4
ldc.i4 123
beq a010123
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010123:
ldsflda int8 [rvastatic3]A::a010124
ldind.i1
ldc.i4 4
beq a010124
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010124:
ldsflda int64 [rvastatic3]A::a010125
ldind.i8
ldc.i8 125
beq a010125
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010125:
ldsflda float32 [rvastatic3]A::a010126
ldind.r4
ldc.r4 126.0
beq a010126
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010126:
ldsflda int64 [rvastatic3]A::a010127
ldind.i8
ldc.i8 127
beq a010127
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010127:
ret}
.method static void V3() {.maxstack 50
ldsfld int32 [rvastatic3]A::a01044
ldc.i4 44
beq a010129
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010129:
ldsfld float32 [rvastatic3]A::a01056
ldc.r4 56.0
beq a010130
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010130:
ldsfld int16 [rvastatic3]A::a01028
ldc.i4 28
beq a010131
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010131:
ldsfld int8 [rvastatic3]A::a01066
ldc.i4 66
beq a010132
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010132:
ldsfld int64 [rvastatic3]A::a010125
ldc.i8 125
beq a010133
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010133:
ldsfld int16 [rvastatic3]A::a010109
ldc.i4 109
beq a010134
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010134:
ldsfld int64 [rvastatic3]A::a01058
ldc.i8 58
beq a010135
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010135:
ldsfld int8 [rvastatic3]A::a010107
ldc.i4 107
beq a010136
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010136:
ldsfld int64 [rvastatic3]A::a01047
ldc.i8 47
beq a010137
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010137:
ldsfld int32 [rvastatic3]A::a01030
ldc.i4 30
beq a010138
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010138:
ldsfld int16 [rvastatic3]A::a01067
ldc.i4 67
beq a010139
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010139:
ldsfld int32 [rvastatic3]A::a01012
ldc.i4 12
beq a010140
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010140:
ldsfld int32 [rvastatic3]A::a010100
ldc.i4 100
beq a010141
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010141:
ldsfld int32 [rvastatic3]A::a010100
ldc.i4 100
beq a010142
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010142:
ldsfld int8 [rvastatic3]A::a01019
ldc.i4 19
beq a010143
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010143:
ldsfld int16 [rvastatic3]A::a010103
ldc.i4 103
beq a010144
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010144:
ldsfld int16 [rvastatic3]A::a01093
ldc.i4 93
beq a010145
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010145:
ldsfld int64 [rvastatic3]A::a01058
ldc.i8 58
beq a010146
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010146:
ldsfld float32 [rvastatic3]A::a010106
ldc.r4 106.0
beq a010147
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010147:
ldsfld int16 [rvastatic3]A::a01061
ldc.i4 61
beq a010148
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010148:
ldsfld int16 [rvastatic3]A::a010103
ldc.i4 103
beq a010149
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010149:
ldsfld float32 [rvastatic3]A::a0102
ldc.r4 2.0
beq a010150
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010150:
ldsfld int16 [rvastatic3]A::a010109
ldc.i4 109
beq a010151
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010151:
ldsfld float32 [rvastatic3]A::a010105
ldc.r4 105.0
beq a010152
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010152:
ldsfld int64 [rvastatic3]A::a01097
ldc.i8 97
beq a010153
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010153:
ldsfld int32 [rvastatic3]A::a010114
ldc.i4 114
beq a010154
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010154:
ldsfld int32 [rvastatic3]A::a01043
ldc.i4 43
beq a010155
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010155:
ldsfld int32 [rvastatic3]A::a01073
ldc.i4 73
beq a010156
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010156:
ldsfld float32 [rvastatic3]A::a0102
ldc.r4 2.0
beq a010157
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010157:
ldsfld int64 [rvastatic3]A::a01046
ldc.i8 46
beq a010158
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010158:
ldsfld int16 [rvastatic3]A::a01015
ldc.i4 15
beq a010159
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010159:
ldsfld int32 [rvastatic3]A::a01023
ldc.i4 23
beq a010160
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010160:
ldsfld int64 [rvastatic3]A::a01068
ldc.i8 68
beq a010161
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010161:
ldsfld int64 [rvastatic3]A::a010120
ldc.i8 120
beq a010162
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010162:
ldsfld int16 [rvastatic3]A::a0106
ldc.i4 6
beq a010163
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010163:
ldsfld int32 [rvastatic3]A::a01079
ldc.i4 79
beq a010164
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010164:
ldsfld int32 [rvastatic3]A::a01073
ldc.i4 73
beq a010165
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010165:
ldsfld int8 [rvastatic3]A::a010104
ldc.i4 104
beq a010166
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010166:
ldsfld int8 [rvastatic3]A::a01033
ldc.i4 33
beq a010167
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010167:
ldsfld int32 [rvastatic3]A::a01071
ldc.i4 71
beq a010168
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010168:
ldsfld int32 [rvastatic3]A::a0108
ldc.i4 8
beq a010169
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010169:
ldsfld float32 [rvastatic3]A::a01041
ldc.r4 41.0
beq a010170
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010170:
ldsfld int16 [rvastatic3]A::a010112
ldc.i4 112
beq a010171
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010171:
ldsfld float32 [rvastatic3]A::a01065
ldc.r4 65.0
beq a010172
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010172:
ldsfld int8 [rvastatic3]A::a01078
ldc.i4 78
beq a010173
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010173:
ldsfld int16 [rvastatic3]A::a01089
ldc.i4 89
beq a010174
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010174:
ldsfld float32 [rvastatic3]A::a01095
ldc.r4 95.0
beq a010175
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010175:
ldsfld int8 [rvastatic3]A::a01039
ldc.i4 39
beq a010176
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010176:
ldsfld int32 [rvastatic3]A::a01043
ldc.i4 43
beq a010177
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010177:
ldsfld float32 [rvastatic3]A::a01048
ldc.r4 48.0
beq a010178
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010178:
ldsfld int8 [rvastatic3]A::a010122
ldc.i4 2
beq a010179
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010179:
ldsfld int64 [rvastatic3]A::a01047
ldc.i8 47
beq a010180
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010180:
ldsfld int8 [rvastatic3]A::a010119
ldc.i4 119
beq a010181
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010181:
ldsfld float32 [rvastatic3]A::a010126
ldc.r4 126.0
beq a010182
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010182:
ldsfld float32 [rvastatic3]A::a01017
ldc.r4 17.0
beq a010183
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010183:
ldsfld float32 [rvastatic3]A::a01041
ldc.r4 41.0
beq a010184
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010184:
ldsfld int64 [rvastatic3]A::a01075
ldc.i8 75
beq a010185
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010185:
ldsfld int32 [rvastatic3]A::a01050
ldc.i4 50
beq a010186
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010186:
ldsfld int16 [rvastatic3]A::a01096
ldc.i4 96
beq a010187
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010187:
ldsfld int32 [rvastatic3]A::a01010
ldc.i4 10
beq a010188
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010188:
ldsfld int8 [rvastatic3]A::a01066
ldc.i4 66
beq a010189
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010189:
ldsfld int16 [rvastatic3]A::a010113
ldc.i4 113
beq a010190
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010190:
ldsfld int8 [rvastatic3]A::a010104
ldc.i4 104
beq a010191
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010191:
ldsfld int64 [rvastatic3]A::a0101
ldc.i8 1
beq a010192
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010192:
ldsfld int16 [rvastatic3]A::a01015
ldc.i4 15
beq a010193
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010193:
ldsfld int32 [rvastatic3]A::a01043
ldc.i4 43
beq a010194
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010194:
ldsfld int32 [rvastatic3]A::a01071
ldc.i4 71
beq a010195
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010195:
ldsfld int8 [rvastatic3]A::a010107
ldc.i4 107
beq a010196
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010196:
ldsfld int8 [rvastatic3]A::a01094
ldc.i4 94
beq a010197
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010197:
ldsfld int64 [rvastatic3]A::a01068
ldc.i8 68
beq a010198
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010198:
ldsfld int64 [rvastatic3]A::a01029
ldc.i8 29
beq a010199
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010199:
ldsfld int8 [rvastatic3]A::a010116
ldc.i4 116
beq a010200
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010200:
ldsfld int8 [rvastatic3]A::a01025
ldc.i4 25
beq a010201
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010201:
ldsfld int16 [rvastatic3]A::a01034
ldc.i4 34
beq a010202
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010202:
ldsfld int32 [rvastatic3]A::a01052
ldc.i4 52
beq a010203
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010203:
ldsfld int16 [rvastatic3]A::a01061
ldc.i4 61
beq a010204
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010204:
ldsfld int32 [rvastatic3]A::a01018
ldc.i4 18
beq a010205
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010205:
ldsfld int32 [rvastatic3]A::a01043
ldc.i4 43
beq a010206
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010206:
ldsfld int16 [rvastatic3]A::a01026
ldc.i4 26
beq a010207
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010207:
ldsfld float32 [rvastatic3]A::a0107
ldc.r4 7.0
beq a010208
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010208:
ldsfld int32 [rvastatic3]A::a01018
ldc.i4 18
beq a010209
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010209:
ldsfld int64 [rvastatic3]A::a0104
ldc.i8 4
beq a010210
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010210:
ldsfld int64 [rvastatic3]A::a01029
ldc.i8 29
beq a010211
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010211:
ldsfld int8 [rvastatic3]A::a01082
ldc.i4 82
beq a010212
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010212:
ldsfld int32 [rvastatic3]A::a01052
ldc.i4 52
beq a010213
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010213:
ldsfld float32 [rvastatic3]A::a01095
ldc.r4 95.0
beq a010214
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010214:
ldsfld int32 [rvastatic3]A::a01032
ldc.i4 32
beq a010215
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010215:
ldsfld int64 [rvastatic3]A::a01069
ldc.i8 69
beq a010216
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010216:
ldsfld int16 [rvastatic3]A::a010103
ldc.i4 103
beq a010217
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010217:
ldsfld int64 [rvastatic3]A::a01092
ldc.i8 92
beq a010218
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010218:
ldsfld int64 [rvastatic3]A::a0104
ldc.i8 4
beq a010219
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010219:
ldsfld int64 [rvastatic3]A::a01022
ldc.i8 22
beq a010220
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010220:
ldsfld float32 [rvastatic3]A::a0107
ldc.r4 7.0
beq a010221
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010221:
ldsfld int64 [rvastatic3]A::a01022
ldc.i8 22
beq a010222
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010222:
ldsfld int16 [rvastatic3]A::a0105
ldc.i4 5
beq a010223
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010223:
ldsfld int32 [rvastatic3]A::a01023
ldc.i4 23
beq a010224
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010224:
ldsfld int64 [rvastatic3]A::a01064
ldc.i8 64
beq a010225
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010225:
ldsfld int8 [rvastatic3]A::a010110
ldc.i4 110
beq a010226
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010226:
ldsfld int32 [rvastatic3]A::a01050
ldc.i4 50
beq a010227
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010227:
ldsfld int32 [rvastatic3]A::a01035
ldc.i4 35
beq a010228
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010228:
ldsfld float32 [rvastatic3]A::a01017
ldc.r4 17.0
beq a010229
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010229:
ldsfld int8 [rvastatic3]A::a010116
ldc.i4 116
beq a010230
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010230:
ldsfld int8 [rvastatic3]A::a01072
ldc.i4 72
beq a010231
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010231:
ldsfld int16 [rvastatic3]A::a010103
ldc.i4 103
beq a010232
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010232:
ldsfld int32 [rvastatic3]A::a01018
ldc.i4 18
beq a010233
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010233:
ldsfld int16 [rvastatic3]A::a0105
ldc.i4 5
beq a010234
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010234:
ldsfld int64 [rvastatic3]A::a01022
ldc.i8 22
beq a010235
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010235:
ldsfld int8 [rvastatic3]A::a01084
ldc.i4 84
beq a010236
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010236:
ldsfld int32 [rvastatic3]A::a01073
ldc.i4 73
beq a010237
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010237:
ldsfld int32 [rvastatic3]A::a01083
ldc.i4 83
beq a010238
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010238:
ldsfld int32 [rvastatic3]A::a01023
ldc.i4 23
beq a010239
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010239:
ldsfld int32 [rvastatic3]A::a01031
ldc.i4 31
beq a010240
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010240:
ldsfld int32 [rvastatic3]A::a01023
ldc.i4 23
beq a010241
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010241:
ldsfld int16 [rvastatic3]A::a01089
ldc.i4 89
beq a010242
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010242:
ldsfld int64 [rvastatic3]A::a010120
ldc.i8 120
beq a010243
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010243:
ldsfld int16 [rvastatic3]A::a01061
ldc.i4 61
beq a010244
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010244:
ldsfld int32 [rvastatic3]A::a01083
ldc.i4 83
beq a010245
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010245:
ldsfld int64 [rvastatic3]A::a01022
ldc.i8 22
beq a010246
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010246:
ldsfld int8 [rvastatic3]A::a01076
ldc.i4 76
beq a010247
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010247:
ldsfld int64 [rvastatic3]A::a01047
ldc.i8 47
beq a010248
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010248:
ldsfld int32 [rvastatic3]A::a01010
ldc.i4 10
beq a010249
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010249:
ldsfld int64 [rvastatic3]A::a0101
ldc.i8 1
beq a010250
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010250:
ldsfld int64 [rvastatic3]A::a0103
ldc.i8 3
beq a010251
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010251:
ldsfld int64 [rvastatic3]A::a0101
ldc.i8 1
beq a010252
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010252:
ldsfld int64 [rvastatic3]A::a01064
ldc.i8 64
beq a010253
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010253:
ldsfld int16 [rvastatic3]A::a0106
ldc.i4 6
beq a010254
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010254:
ldsfld int16 [rvastatic3]A::a010109
ldc.i4 109
beq a010255
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010255:
ldsfld int64 [rvastatic3]A::a0103
ldc.i8 3
beq a010256
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010256:
ret}
.method static void V4() {.maxstack 50
ldsflda int64 [rvastatic3]A::a0100
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i8
ldc.i8 0
beq a0100
ldstr "a0100"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a0100:
ldsflda int64 [rvastatic3]A::a0101
conv.r8
ldc.r8 234.098
add
conv.r8
ldc.r8 -234.098
add
conv.i
ldind.i8
ldc.i8 1
beq a0101
ldstr "a0101"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a0101:
ldsflda float32 [rvastatic3]A::a0102
conv.i8
dup
ldc.i8 0xffffffff00000000
and
ldc.i4 32
shr.un
conv.i8
ldc.i4 32
shl
or
conv.i
ldind.r4
ldc.r4 2.0
beq a0102
ldstr "a0102"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a0102:
ldsflda int64 [rvastatic3]A::a0103
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i8
ldc.i8 3
beq a0103
ldstr "a0103"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a0103:
ldsflda int64 [rvastatic3]A::a0104
conv.i8
dup
ldc.i8 0x0000000000ffffff
and
ldc.i4 32
shl
conv.i8
ldc.i4 32
shr.un
or
conv.i
ldind.i8
ldc.i8 4
beq a0104
ldstr "a0104"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a0104:
ldsflda int16 [rvastatic3]A::a0105
conv.i8
dup
dup
xor
xor
conv.i
ldind.i2
ldc.i4 5
beq a0105
ldstr "a0105"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a0105:
ldsflda int16 [rvastatic3]A::a0106
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i2
ldc.i4 6
beq a0106
ldstr "a0106"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a0106:
ldsflda float32 [rvastatic3]A::a0107
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.r4
ldc.r4 7.0
beq a0107
ldstr "a0107"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a0107:
ldsflda int32 [rvastatic3]A::a0108
conv.i8
dup
dup
xor
xor
conv.i
ldind.i4
ldc.i4 8
beq a0108
ldstr "a0108"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a0108:
ldsflda int8 [rvastatic3]A::a0109
conv.i8
dup
ldc.i8 0xffffffff00000000
and
ldc.i4 32
shr.un
conv.i8
ldc.i4 32
shl
or
conv.i
ldind.i1
ldc.i4 9
beq a0109
ldstr "a0109"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a0109:
ldsflda int32 [rvastatic3]A::a01010
conv.i8
ldc.i8 40202
add
conv.i8
ldc.i8 40202
sub
conv.i
ldind.i4
ldc.i4 10
beq a01010
ldstr "a01010"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01010:
ldsflda int64 [rvastatic3]A::a01011
conv.i8
ldc.i8 2
mul
ldc.i4 1
shr.un
conv.i
ldind.i8
ldc.i8 11
beq a01011
ldstr "a01011"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01011:
ldsflda int32 [rvastatic3]A::a01012
conv.i8
dup
ldc.i8 0xffffffff00000000
and
ldc.i4 32
shr.un
conv.i8
ldc.i4 32
shl
or
conv.i
ldind.i4
ldc.i4 12
beq a01012
ldstr "a01012"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01012:
ldsflda int8 [rvastatic3]A::a01013
conv.i8
dup
ldc.i8 0xffffffff00000000
and
ldc.i4 32
shr.un
conv.i8
ldc.i4 32
shl
or
conv.i
ldind.i1
ldc.i4 13
beq a01013
ldstr "a01013"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01013:
ldsflda int16 [rvastatic3]A::a01014
conv.i8
dup
ldc.i8 0xffffffff00000000
and
ldc.i4 32
shr.un
conv.i8
ldc.i4 32
shl
or
conv.i
ldind.i2
ldc.i4 14
beq a01014
ldstr "a01014"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01014:
ldsflda int16 [rvastatic3]A::a01015
conv.i8
ldc.i8 37800
add
conv.i8
ldc.i8 37800
sub
conv.i
ldind.i2
ldc.i4 15
beq a01015
ldstr "a01015"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01015:
ldsflda float32 [rvastatic3]A::a01016
conv.i8
dup
ldc.i8 0x0000000000ffffff
and
ldc.i4 32
shl
conv.i8
ldc.i4 32
shr.un
or
conv.i
ldind.r4
ldc.r4 16.0
beq a01016
ldstr "a01016"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01016:
ldsflda float32 [rvastatic3]A::a01017
conv.i8
ldc.i8 2
mul
ldc.i4 1
shr.un
conv.i
ldind.r4
ldc.r4 17.0
beq a01017
ldstr "a01017"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01017:
ldsflda int32 [rvastatic3]A::a01018
conv.i8
ldc.i8 2058
add
conv.i8
ldc.i8 2058
sub
conv.i
ldind.i4
ldc.i4 18
beq a01018
ldstr "a01018"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01018:
ldsflda int8 [rvastatic3]A::a01019
conv.i8
ldc.i8 8916
add
conv.i8
ldc.i8 8916
sub
conv.i
ldind.i1
ldc.i4 19
beq a01019
ldstr "a01019"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01019:
ldsflda int32 [rvastatic3]A::a01020
conv.i8
dup
ldc.i8 0xffffffff00000000
and
ldc.i4 32
shr.un
conv.i8
ldc.i4 32
shl
or
conv.i
ldind.i4
ldc.i4 20
beq a01020
ldstr "a01020"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01020:
ldsflda int32 [rvastatic3]A::a01021
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i4
ldc.i4 21
beq a01021
ldstr "a01021"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01021:
ldsflda int64 [rvastatic3]A::a01022
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i8
ldc.i8 22
beq a01022
ldstr "a01022"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01022:
ldsflda int32 [rvastatic3]A::a01023
conv.i8
dup
ldc.i8 0xffffffff00000000
and
ldc.i4 32
shr.un
conv.i8
ldc.i4 32
shl
or
conv.i
ldind.i4
ldc.i4 23
beq a01023
ldstr "a01023"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01023:
ldsflda int8 [rvastatic3]A::a01024
conv.i8
dup
dup
xor
xor
conv.i
ldind.i1
ldc.i4 24
beq a01024
ldstr "a01024"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01024:
ldsflda int8 [rvastatic3]A::a01025
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i1
ldc.i4 25
beq a01025
ldstr "a01025"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01025:
ldsflda int16 [rvastatic3]A::a01026
conv.r8
ldc.r8 234.098
add
conv.r8
ldc.r8 -234.098
add
conv.i
ldind.i2
ldc.i4 26
beq a01026
ldstr "a01026"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01026:
ldsflda int8 [rvastatic3]A::a01027
conv.i8
dup
ldc.i8 0xffffffff00000000
and
ldc.i4 32
shr.un
conv.i8
ldc.i4 32
shl
or
conv.i
ldind.i1
ldc.i4 27
beq a01027
ldstr "a01027"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01027:
ldsflda int16 [rvastatic3]A::a01028
conv.i8
dup
ldc.i8 0xffffffff00000000
and
ldc.i4 32
shr.un
conv.i8
ldc.i4 32
shl
or
conv.i
ldind.i2
ldc.i4 28
beq a01028
ldstr "a01028"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01028:
ldsflda int64 [rvastatic3]A::a01029
conv.i8
ldc.i8 877
add
conv.i8
ldc.i8 877
sub
conv.i
ldind.i8
ldc.i8 29
beq a01029
ldstr "a01029"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01029:
ldsflda int32 [rvastatic3]A::a01030
conv.i8
ldc.i8 47449
add
conv.i8
ldc.i8 47449
sub
conv.i
ldind.i4
ldc.i4 30
beq a01030
ldstr "a01030"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01030:
ldsflda int32 [rvastatic3]A::a01031
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i4
ldc.i4 31
beq a01031
ldstr "a01031"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01031:
ldsflda int32 [rvastatic3]A::a01032
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i4
ldc.i4 32
beq a01032
ldstr "a01032"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01032:
ldsflda int8 [rvastatic3]A::a01033
conv.i8
dup
dup
xor
xor
conv.i
ldind.i1
ldc.i4 33
beq a01033
ldstr "a01033"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01033:
ldsflda int16 [rvastatic3]A::a01034
conv.i8
ldc.i8 2
mul
ldc.i4 1
shr.un
conv.i
ldind.i2
ldc.i4 34
beq a01034
ldstr "a01034"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01034:
ldsflda int32 [rvastatic3]A::a01035
conv.i8
ldc.i8 2
mul
ldc.i4 1
shr.un
conv.i
ldind.i4
ldc.i4 35
beq a01035
ldstr "a01035"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01035:
ldsflda int32 [rvastatic3]A::a01036
conv.i8
dup
ldc.i8 0x0000000000ffffff
and
ldc.i4 32
shl
conv.i8
ldc.i4 32
shr.un
or
conv.i
ldind.i4
ldc.i4 36
beq a01036
ldstr "a01036"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01036:
ldsflda int16 [rvastatic3]A::a01037
conv.i8
ldc.i8 25670
add
conv.i8
ldc.i8 25670
sub
conv.i
ldind.i2
ldc.i4 37
beq a01037
ldstr "a01037"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01037:
ldsflda float32 [rvastatic3]A::a01038
conv.i8
dup
ldc.i8 0x0000000000ffffff
and
ldc.i4 32
shl
conv.i8
ldc.i4 32
shr.un
or
conv.i
ldind.r4
ldc.r4 38.0
beq a01038
ldstr "a01038"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01038:
ldsflda int8 [rvastatic3]A::a01039
conv.i8
ldc.i8 2
mul
ldc.i4 1
shr.un
conv.i
ldind.i1
ldc.i4 39
beq a01039
ldstr "a01039"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01039:
ldsflda int8 [rvastatic3]A::a01040
conv.i8
ldc.i8 63628
add
conv.i8
ldc.i8 63628
sub
conv.i
ldind.i1
ldc.i4 40
beq a01040
ldstr "a01040"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01040:
ldsflda float32 [rvastatic3]A::a01041
conv.r8
ldc.r8 234.098
add
conv.r8
ldc.r8 -234.098
add
conv.i
ldind.r4
ldc.r4 41.0
beq a01041
ldstr "a01041"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01041:
ldsflda int32 [rvastatic3]A::a01042
conv.i8
dup
dup
xor
xor
conv.i
ldind.i4
ldc.i4 42
beq a01042
ldstr "a01042"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01042:
ldsflda int32 [rvastatic3]A::a01043
conv.i8
dup
ldc.i8 0x0000000000ffffff
and
ldc.i4 32
shl
conv.i8
ldc.i4 32
shr.un
or
conv.i
ldind.i4
ldc.i4 43
beq a01043
ldstr "a01043"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01043:
ldsflda int32 [rvastatic3]A::a01044
conv.i8
dup
ldc.i8 0x0000000000ffffff
and
ldc.i4 32
shl
conv.i8
ldc.i4 32
shr.un
or
conv.i
ldind.i4
ldc.i4 44
beq a01044
ldstr "a01044"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01044:
ldsflda int64 [rvastatic3]A::a01045
conv.i8
dup
ldc.i8 0x0000000000ffffff
and
ldc.i4 32
shl
conv.i8
ldc.i4 32
shr.un
or
conv.i
ldind.i8
ldc.i8 45
beq a01045
ldstr "a01045"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01045:
ldsflda int64 [rvastatic3]A::a01046
conv.i8
ldc.i8 53032
add
conv.i8
ldc.i8 53032
sub
conv.i
ldind.i8
ldc.i8 46
beq a01046
ldstr "a01046"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01046:
ldsflda int64 [rvastatic3]A::a01047
conv.i8
dup
ldc.i8 0xffffffff00000000
and
ldc.i4 32
shr.un
conv.i8
ldc.i4 32
shl
or
conv.i
ldind.i8
ldc.i8 47
beq a01047
ldstr "a01047"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01047:
ldsflda float32 [rvastatic3]A::a01048
conv.i8
ldc.i8 48333
add
conv.i8
ldc.i8 48333
sub
conv.i
ldind.r4
ldc.r4 48.0
beq a01048
ldstr "a01048"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01048:
ldsflda int64 [rvastatic3]A::a01049
conv.r8
ldc.r8 234.098
add
conv.r8
ldc.r8 -234.098
add
conv.i
ldind.i8
ldc.i8 49
beq a01049
ldstr "a01049"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01049:
ldsflda int32 [rvastatic3]A::a01050
conv.i8
dup
ldc.i8 0xffffffff00000000
and
ldc.i4 32
shr.un
conv.i8
ldc.i4 32
shl
or
conv.i
ldind.i4
ldc.i4 50
beq a01050
ldstr "a01050"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01050:
ldsflda float32 [rvastatic3]A::a01051
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.r4
ldc.r4 51.0
beq a01051
ldstr "a01051"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01051:
ldsflda int32 [rvastatic3]A::a01052
conv.i8
dup
ldc.i8 0x0000000000ffffff
and
ldc.i4 32
shl
conv.i8
ldc.i4 32
shr.un
or
conv.i
ldind.i4
ldc.i4 52
beq a01052
ldstr "a01052"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01052:
ldsflda int64 [rvastatic3]A::a01053
conv.i8
dup
ldc.i8 0xffffffff00000000
and
ldc.i4 32
shr.un
conv.i8
ldc.i4 32
shl
or
conv.i
ldind.i8
ldc.i8 53
beq a01053
ldstr "a01053"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01053:
ldsflda int8 [rvastatic3]A::a01054
conv.i8
dup
ldc.i8 0x0000000000ffffff
and
ldc.i4 32
shl
conv.i8
ldc.i4 32
shr.un
or
conv.i
ldind.i1
ldc.i4 54
beq a01054
ldstr "a01054"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01054:
ldsflda int8 [rvastatic3]A::a01055
conv.r8
ldc.r8 234.098
add
conv.r8
ldc.r8 -234.098
add
conv.i
ldind.i1
ldc.i4 55
beq a01055
ldstr "a01055"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01055:
ldsflda float32 [rvastatic3]A::a01056
conv.i8
ldc.i8 4395
add
conv.i8
ldc.i8 4395
sub
conv.i
ldind.r4
ldc.r4 56.0
beq a01056
ldstr "a01056"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01056:
ldsflda int32 [rvastatic3]A::a01057
conv.r8
ldc.r8 234.098
add
conv.r8
ldc.r8 -234.098
add
conv.i
ldind.i4
ldc.i4 57
beq a01057
ldstr "a01057"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01057:
ldsflda int64 [rvastatic3]A::a01058
conv.i8
ldc.i8 2
mul
ldc.i4 1
shr.un
conv.i
ldind.i8
ldc.i8 58
beq a01058
ldstr "a01058"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01058:
ldsflda int64 [rvastatic3]A::a01059
conv.i8
ldc.i8 18075
add
conv.i8
ldc.i8 18075
sub
conv.i
ldind.i8
ldc.i8 59
beq a01059
ldstr "a01059"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01059:
ldsflda int8 [rvastatic3]A::a01060
conv.i8
dup
ldc.i8 0xffffffff00000000
and
ldc.i4 32
shr.un
conv.i8
ldc.i4 32
shl
or
conv.i
ldind.i1
ldc.i4 60
beq a01060
ldstr "a01060"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01060:
ldsflda int16 [rvastatic3]A::a01061
conv.r8
ldc.r8 234.098
add
conv.r8
ldc.r8 -234.098
add
conv.i
ldind.i2
ldc.i4 61
beq a01061
ldstr "a01061"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01061:
ldsflda int64 [rvastatic3]A::a01062
conv.i8
dup
ldc.i8 0x0000000000ffffff
and
ldc.i4 32
shl
conv.i8
ldc.i4 32
shr.un
or
conv.i
ldind.i8
ldc.i8 62
beq a01062
ldstr "a01062"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01062:
ldsflda float32 [rvastatic3]A::a01063
conv.r8
ldc.r8 234.098
add
conv.r8
ldc.r8 -234.098
add
conv.i
ldind.r4
ldc.r4 63.0
beq a01063
ldstr "a01063"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01063:
ldsflda int64 [rvastatic3]A::a01064
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i8
ldc.i8 64
beq a01064
ldstr "a01064"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01064:
ldsflda float32 [rvastatic3]A::a01065
conv.i8
ldc.i8 46752
add
conv.i8
ldc.i8 46752
sub
conv.i
ldind.r4
ldc.r4 65.0
beq a01065
ldstr "a01065"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01065:
ldsflda int8 [rvastatic3]A::a01066
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i1
ldc.i4 66
beq a01066
ldstr "a01066"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01066:
ldsflda int16 [rvastatic3]A::a01067
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i2
ldc.i4 67
beq a01067
ldstr "a01067"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01067:
ldsflda int64 [rvastatic3]A::a01068
conv.i8
dup
dup
xor
xor
conv.i
ldind.i8
ldc.i8 68
beq a01068
ldstr "a01068"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01068:
ldsflda int64 [rvastatic3]A::a01069
conv.i8
dup
ldc.i8 0xffffffff00000000
and
ldc.i4 32
shr.un
conv.i8
ldc.i4 32
shl
or
conv.i
ldind.i8
ldc.i8 69
beq a01069
ldstr "a01069"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01069:
ldsflda int64 [rvastatic3]A::a01070
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i8
ldc.i8 70
beq a01070
ldstr "a01070"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01070:
ldsflda int32 [rvastatic3]A::a01071
conv.i8
ldc.i8 2
mul
ldc.i4 1
shr.un
conv.i
ldind.i4
ldc.i4 71
beq a01071
ldstr "a01071"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01071:
ldsflda int8 [rvastatic3]A::a01072
conv.i8
ldc.i8 2
mul
ldc.i4 1
shr.un
conv.i
ldind.i1
ldc.i4 72
beq a01072
ldstr "a01072"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01072:
ldsflda int32 [rvastatic3]A::a01073
conv.i8
dup
ldc.i8 0xffffffff00000000
and
ldc.i4 32
shr.un
conv.i8
ldc.i4 32
shl
or
conv.i
ldind.i4
ldc.i4 73
beq a01073
ldstr "a01073"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01073:
ldsflda float32 [rvastatic3]A::a01074
conv.r8
ldc.r8 234.098
add
conv.r8
ldc.r8 -234.098
add
conv.i
ldind.r4
ldc.r4 74.0
beq a01074
ldstr "a01074"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01074:
ldsflda int64 [rvastatic3]A::a01075
conv.i8
dup
dup
xor
xor
conv.i
ldind.i8
ldc.i8 75
beq a01075
ldstr "a01075"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01075:
ldsflda int8 [rvastatic3]A::a01076
conv.r8
ldc.r8 234.098
add
conv.r8
ldc.r8 -234.098
add
conv.i
ldind.i1
ldc.i4 76
beq a01076
ldstr "a01076"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01076:
ldsflda int8 [rvastatic3]A::a01077
conv.i8
dup
ldc.i8 0x0000000000ffffff
and
ldc.i4 32
shl
conv.i8
ldc.i4 32
shr.un
or
conv.i
ldind.i1
ldc.i4 77
beq a01077
ldstr "a01077"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01077:
ldsflda int8 [rvastatic3]A::a01078
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i1
ldc.i4 78
beq a01078
ldstr "a01078"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01078:
ldsflda int32 [rvastatic3]A::a01079
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i4
ldc.i4 79
beq a01079
ldstr "a01079"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01079:
ldsflda int16 [rvastatic3]A::a01080
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i2
ldc.i4 80
beq a01080
ldstr "a01080"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01080:
ldsflda int32 [rvastatic3]A::a01081
conv.i8
ldc.i8 2
mul
ldc.i4 1
shr.un
conv.i
ldind.i4
ldc.i4 81
beq a01081
ldstr "a01081"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01081:
ldsflda int8 [rvastatic3]A::a01082
conv.r8
ldc.r8 234.098
add
conv.r8
ldc.r8 -234.098
add
conv.i
ldind.i1
ldc.i4 82
beq a01082
ldstr "a01082"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01082:
ldsflda int32 [rvastatic3]A::a01083
conv.i8
dup
dup
xor
xor
conv.i
ldind.i4
ldc.i4 83
beq a01083
ldstr "a01083"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01083:
ldsflda int8 [rvastatic3]A::a01084
conv.i8
ldc.i8 2
mul
ldc.i4 1
shr.un
conv.i
ldind.i1
ldc.i4 84
beq a01084
ldstr "a01084"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01084:
ldsflda int32 [rvastatic3]A::a01085
conv.i8
dup
dup
xor
xor
conv.i
ldind.i4
ldc.i4 85
beq a01085
ldstr "a01085"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01085:
ldsflda int8 [rvastatic3]A::a01086
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i1
ldc.i4 86
beq a01086
ldstr "a01086"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01086:
ldsflda int64 [rvastatic3]A::a01087
conv.i8
ldc.i8 13996
add
conv.i8
ldc.i8 13996
sub
conv.i
ldind.i8
ldc.i8 87
beq a01087
ldstr "a01087"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01087:
ldsflda int8 [rvastatic3]A::a01088
conv.i8
dup
dup
xor
xor
conv.i
ldind.i1
ldc.i4 88
beq a01088
ldstr "a01088"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01088:
ldsflda int16 [rvastatic3]A::a01089
conv.i8
dup
ldc.i8 0x0000000000ffffff
and
ldc.i4 32
shl
conv.i8
ldc.i4 32
shr.un
or
conv.i
ldind.i2
ldc.i4 89
beq a01089
ldstr "a01089"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01089:
ldsflda int64 [rvastatic3]A::a01090
conv.i8
ldc.i8 38561
add
conv.i8
ldc.i8 38561
sub
conv.i
ldind.i8
ldc.i8 90
beq a01090
ldstr "a01090"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01090:
ldsflda int8 [rvastatic3]A::a01091
conv.i8
dup
ldc.i8 0xffffffff00000000
and
ldc.i4 32
shr.un
conv.i8
ldc.i4 32
shl
or
conv.i
ldind.i1
ldc.i4 91
beq a01091
ldstr "a01091"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01091:
ldsflda int64 [rvastatic3]A::a01092
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i8
ldc.i8 92
beq a01092
ldstr "a01092"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01092:
ldsflda int16 [rvastatic3]A::a01093
conv.r8
ldc.r8 234.098
add
conv.r8
ldc.r8 -234.098
add
conv.i
ldind.i2
ldc.i4 93
beq a01093
ldstr "a01093"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01093:
ldsflda int8 [rvastatic3]A::a01094
conv.i8
ldc.i8 16915
add
conv.i8
ldc.i8 16915
sub
conv.i
ldind.i1
ldc.i4 94
beq a01094
ldstr "a01094"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01094:
ldsflda float32 [rvastatic3]A::a01095
conv.i8
dup
dup
xor
xor
conv.i
ldind.r4
ldc.r4 95.0
beq a01095
ldstr "a01095"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01095:
ldsflda int16 [rvastatic3]A::a01096
conv.i8
dup
dup
xor
xor
conv.i
ldind.i2
ldc.i4 96
beq a01096
ldstr "a01096"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01096:
ldsflda int64 [rvastatic3]A::a01097
conv.i8
ldc.i8 53815
add
conv.i8
ldc.i8 53815
sub
conv.i
ldind.i8
ldc.i8 97
beq a01097
ldstr "a01097"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01097:
ldsflda float32 [rvastatic3]A::a01098
conv.i8
dup
ldc.i8 0xffffffff00000000
and
ldc.i4 32
shr.un
conv.i8
ldc.i4 32
shl
or
conv.i
ldind.r4
ldc.r4 98.0
beq a01098
ldstr "a01098"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01098:
ldsflda int32 [rvastatic3]A::a01099
conv.i8
dup
ldc.i8 0x0000000000ffffff
and
ldc.i4 32
shl
conv.i8
ldc.i4 32
shr.un
or
conv.i
ldind.i4
ldc.i4 99
beq a01099
ldstr "a01099"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01099:
ldsflda int32 [rvastatic3]A::a010100
conv.r8
ldc.r8 234.098
add
conv.r8
ldc.r8 -234.098
add
conv.i
ldind.i4
ldc.i4 100
beq a010100
ldstr "a010100"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010100:
ldsflda int32 [rvastatic3]A::a010101
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i4
ldc.i4 101
beq a010101
ldstr "a010101"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010101:
ldsflda int8 [rvastatic3]A::a010102
conv.i8
dup
dup
xor
xor
conv.i
ldind.i1
ldc.i4 102
beq a010102
ldstr "a010102"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010102:
ldsflda int16 [rvastatic3]A::a010103
conv.i8
ldc.i8 2
mul
ldc.i4 1
shr.un
conv.i
ldind.i2
ldc.i4 103
beq a010103
ldstr "a010103"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010103:
ldsflda int8 [rvastatic3]A::a010104
conv.i8
dup
ldc.i8 0x0000000000ffffff
and
ldc.i4 32
shl
conv.i8
ldc.i4 32
shr.un
or
conv.i
ldind.i1
ldc.i4 104
beq a010104
ldstr "a010104"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010104:
ldsflda float32 [rvastatic3]A::a010105
conv.i8
dup
ldc.i8 0x0000000000ffffff
and
ldc.i4 32
shl
conv.i8
ldc.i4 32
shr.un
or
conv.i
ldind.r4
ldc.r4 105.0
beq a010105
ldstr "a010105"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010105:
ldsflda float32 [rvastatic3]A::a010106
conv.i8
dup
ldc.i8 0x0000000000ffffff
and
ldc.i4 32
shl
conv.i8
ldc.i4 32
shr.un
or
conv.i
ldind.r4
ldc.r4 106.0
beq a010106
ldstr "a010106"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010106:
ldsflda int8 [rvastatic3]A::a010107
conv.i8
dup
dup
xor
xor
conv.i
ldind.i1
ldc.i4 107
beq a010107
ldstr "a010107"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010107:
ldsflda float32 [rvastatic3]A::a010108
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.r4
ldc.r4 108.0
beq a010108
ldstr "a010108"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010108:
ldsflda int16 [rvastatic3]A::a010109
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i2
ldc.i4 109
beq a010109
ldstr "a010109"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010109:
ldsflda int8 [rvastatic3]A::a010110
conv.r8
ldc.r8 234.098
add
conv.r8
ldc.r8 -234.098
add
conv.i
ldind.i1
ldc.i4 110
beq a010110
ldstr "a010110"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010110:
ldsflda int64 [rvastatic3]A::a010111
conv.i8
dup
dup
xor
xor
conv.i
ldind.i8
ldc.i8 111
beq a010111
ldstr "a010111"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010111:
ldsflda int16 [rvastatic3]A::a010112
conv.i8
dup
ldc.i8 0x0000000000ffffff
and
ldc.i4 32
shl
conv.i8
ldc.i4 32
shr.un
or
conv.i
ldind.i2
ldc.i4 112
beq a010112
ldstr "a010112"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010112:
ldsflda int16 [rvastatic3]A::a010113
conv.i8
ldc.i8 2
mul
ldc.i4 1
shr.un
conv.i
ldind.i2
ldc.i4 113
beq a010113
ldstr "a010113"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010113:
ldsflda int32 [rvastatic3]A::a010114
conv.r8
ldc.r8 234.098
add
conv.r8
ldc.r8 -234.098
add
conv.i
ldind.i4
ldc.i4 114
beq a010114
ldstr "a010114"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010114:
ldsflda int32 [rvastatic3]A::a010115
conv.i8
dup
dup
xor
xor
conv.i
ldind.i4
ldc.i4 115
beq a010115
ldstr "a010115"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010115:
ldsflda int8 [rvastatic3]A::a010116
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i1
ldc.i4 116
beq a010116
ldstr "a010116"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010116:
ldsflda int16 [rvastatic3]A::a010117
conv.i8
dup
ldc.i8 0x0000000000ffffff
and
ldc.i4 32
shl
conv.i8
ldc.i4 32
shr.un
or
conv.i
ldind.i2
ldc.i4 117
beq a010117
ldstr "a010117"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010117:
ldsflda int64 [rvastatic3]A::a010118
conv.i8
ldc.i8 2
mul
ldc.i4 1
shr.un
conv.i
ldind.i8
ldc.i8 118
beq a010118
ldstr "a010118"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010118:
ldsflda int8 [rvastatic3]A::a010119
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i1
ldc.i4 119
beq a010119
ldstr "a010119"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010119:
ldsflda int64 [rvastatic3]A::a010120
conv.r8
ldc.r8 234.098
add
conv.r8
ldc.r8 -234.098
add
conv.i
ldind.i8
ldc.i8 120
beq a010120
ldstr "a010120"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010120:
ldsflda int32 [rvastatic3]A::a010121
conv.i8
dup
dup
xor
xor
conv.i
ldind.i4
ldc.i4 121
beq a010121
ldstr "a010121"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010121:
ldsflda int8 [rvastatic3]A::a010122
conv.i8
ldc.i8 51550
add
conv.i8
ldc.i8 51550
sub
conv.i
ldind.i1
ldc.i4 2
beq a010122
ldstr "a010122"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010122:
ldsflda int32 [rvastatic3]A::a010123
conv.i8
dup
ldc.i8 0x0000000000ffffff
and
ldc.i4 32
shl
conv.i8
ldc.i4 32
shr.un
or
conv.i
ldind.i4
ldc.i4 123
beq a010123
ldstr "a010123"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010123:
ldsflda int8 [rvastatic3]A::a010124
conv.i8
conv.r8
conv.u8
conv.i
conv.i8
conv.r8
conv.u8
conv.i
ldind.i1
ldc.i4 4
beq a010124
ldstr "a010124"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010124:
ldsflda int64 [rvastatic3]A::a010125
conv.i8
ldc.i8 47464
add
conv.i8
ldc.i8 47464
sub
conv.i
ldind.i8
ldc.i8 125
beq a010125
ldstr "a010125"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010125:
ldsflda float32 [rvastatic3]A::a010126
conv.i8
ldc.i8 24077
add
conv.i8
ldc.i8 24077
sub
conv.i
ldind.r4
ldc.r4 126.0
beq a010126
ldstr "a010126"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010126:
ldsflda int64 [rvastatic3]A::a010127
conv.i8
ldc.i8 28583
add
conv.i8
ldc.i8 28583
sub
conv.i
ldind.i8
ldc.i8 127
beq a010127
ldstr "a010127"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010127:
ret}
.method static void V5() {.maxstack 50
ldsflda int64 [rvastatic3]A::a0100
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i8
ldc.i8 0
beq a0100
ldstr "a0100"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a0100:
ldsflda int64 [rvastatic3]A::a0101
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 1
beq a0101
ldstr "a0101"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a0101:
ldsflda float32 [rvastatic3]A::a0102
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.r4
ldc.r4 2.0
beq a0102
ldstr "a0102"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a0102:
ldsflda int64 [rvastatic3]A::a0103
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 3
beq a0103
ldstr "a0103"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a0103:
ldsflda int64 [rvastatic3]A::a0104
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 4
beq a0104
ldstr "a0104"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a0104:
ldsflda int16 [rvastatic3]A::a0105
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i2
ldc.i4 5
beq a0105
ldstr "a0105"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a0105:
ldsflda int16 [rvastatic3]A::a0106
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i2
ldc.i4 6
beq a0106
ldstr "a0106"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a0106:
ldsflda float32 [rvastatic3]A::a0107
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.r4
ldc.r4 7.0
beq a0107
ldstr "a0107"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a0107:
ldsflda int32 [rvastatic3]A::a0108
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i4
ldc.i4 8
beq a0108
ldstr "a0108"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a0108:
ldsflda int8 [rvastatic3]A::a0109
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 9
beq a0109
ldstr "a0109"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a0109:
ldsflda int32 [rvastatic3]A::a01010
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i4
ldc.i4 10
beq a01010
ldstr "a01010"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01010:
ldsflda int64 [rvastatic3]A::a01011
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 11
beq a01011
ldstr "a01011"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01011:
ldsflda int32 [rvastatic3]A::a01012
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i4
ldc.i4 12
beq a01012
ldstr "a01012"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01012:
ldsflda int8 [rvastatic3]A::a01013
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 13
beq a01013
ldstr "a01013"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01013:
ldsflda int16 [rvastatic3]A::a01014
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i2
ldc.i4 14
beq a01014
ldstr "a01014"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01014:
ldsflda int16 [rvastatic3]A::a01015
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i2
ldc.i4 15
beq a01015
ldstr "a01015"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01015:
ldsflda float32 [rvastatic3]A::a01016
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.r4
ldc.r4 16.0
beq a01016
ldstr "a01016"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01016:
ldsflda float32 [rvastatic3]A::a01017
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.r4
ldc.r4 17.0
beq a01017
ldstr "a01017"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01017:
ldsflda int32 [rvastatic3]A::a01018
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i4
ldc.i4 18
beq a01018
ldstr "a01018"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01018:
ldsflda int8 [rvastatic3]A::a01019
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 19
beq a01019
ldstr "a01019"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01019:
ldsflda int32 [rvastatic3]A::a01020
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i4
ldc.i4 20
beq a01020
ldstr "a01020"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01020:
ldsflda int32 [rvastatic3]A::a01021
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i4
ldc.i4 21
beq a01021
ldstr "a01021"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01021:
ldsflda int64 [rvastatic3]A::a01022
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 22
beq a01022
ldstr "a01022"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01022:
ldsflda int32 [rvastatic3]A::a01023
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i4
ldc.i4 23
beq a01023
ldstr "a01023"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01023:
ldsflda int8 [rvastatic3]A::a01024
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i1
ldc.i4 24
beq a01024
ldstr "a01024"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01024:
ldsflda int8 [rvastatic3]A::a01025
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i1
ldc.i4 25
beq a01025
ldstr "a01025"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01025:
ldsflda int16 [rvastatic3]A::a01026
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i2
ldc.i4 26
beq a01026
ldstr "a01026"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01026:
ldsflda int8 [rvastatic3]A::a01027
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 27
beq a01027
ldstr "a01027"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01027:
ldsflda int16 [rvastatic3]A::a01028
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i2
ldc.i4 28
beq a01028
ldstr "a01028"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01028:
ldsflda int64 [rvastatic3]A::a01029
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 29
beq a01029
ldstr "a01029"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01029:
ldsflda int32 [rvastatic3]A::a01030
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i4
ldc.i4 30
beq a01030
ldstr "a01030"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01030:
ldsflda int32 [rvastatic3]A::a01031
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i4
ldc.i4 31
beq a01031
ldstr "a01031"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01031:
ldsflda int32 [rvastatic3]A::a01032
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i4
ldc.i4 32
beq a01032
ldstr "a01032"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01032:
ldsflda int8 [rvastatic3]A::a01033
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 33
beq a01033
ldstr "a01033"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01033:
ldsflda int16 [rvastatic3]A::a01034
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i2
ldc.i4 34
beq a01034
ldstr "a01034"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01034:
ldsflda int32 [rvastatic3]A::a01035
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i4
ldc.i4 35
beq a01035
ldstr "a01035"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01035:
ldsflda int32 [rvastatic3]A::a01036
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i4
ldc.i4 36
beq a01036
ldstr "a01036"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01036:
ldsflda int16 [rvastatic3]A::a01037
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i2
ldc.i4 37
beq a01037
ldstr "a01037"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01037:
ldsflda float32 [rvastatic3]A::a01038
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.r4
ldc.r4 38.0
beq a01038
ldstr "a01038"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01038:
ldsflda int8 [rvastatic3]A::a01039
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i1
ldc.i4 39
beq a01039
ldstr "a01039"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01039:
ldsflda int8 [rvastatic3]A::a01040
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i1
ldc.i4 40
beq a01040
ldstr "a01040"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01040:
ldsflda float32 [rvastatic3]A::a01041
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.r4
ldc.r4 41.0
beq a01041
ldstr "a01041"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01041:
ldsflda int32 [rvastatic3]A::a01042
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i4
ldc.i4 42
beq a01042
ldstr "a01042"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01042:
ldsflda int32 [rvastatic3]A::a01043
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i4
ldc.i4 43
beq a01043
ldstr "a01043"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01043:
ldsflda int32 [rvastatic3]A::a01044
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i4
ldc.i4 44
beq a01044
ldstr "a01044"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01044:
ldsflda int64 [rvastatic3]A::a01045
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 45
beq a01045
ldstr "a01045"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01045:
ldsflda int64 [rvastatic3]A::a01046
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 46
beq a01046
ldstr "a01046"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01046:
ldsflda int64 [rvastatic3]A::a01047
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i8
ldc.i8 47
beq a01047
ldstr "a01047"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01047:
ldsflda float32 [rvastatic3]A::a01048
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.r4
ldc.r4 48.0
beq a01048
ldstr "a01048"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01048:
ldsflda int64 [rvastatic3]A::a01049
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 49
beq a01049
ldstr "a01049"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01049:
ldsflda int32 [rvastatic3]A::a01050
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i4
ldc.i4 50
beq a01050
ldstr "a01050"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01050:
ldsflda float32 [rvastatic3]A::a01051
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.r4
ldc.r4 51.0
beq a01051
ldstr "a01051"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01051:
ldsflda int32 [rvastatic3]A::a01052
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i4
ldc.i4 52
beq a01052
ldstr "a01052"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01052:
ldsflda int64 [rvastatic3]A::a01053
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 53
beq a01053
ldstr "a01053"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01053:
ldsflda int8 [rvastatic3]A::a01054
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i1
ldc.i4 54
beq a01054
ldstr "a01054"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01054:
ldsflda int8 [rvastatic3]A::a01055
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 55
beq a01055
ldstr "a01055"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01055:
ldsflda float32 [rvastatic3]A::a01056
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.r4
ldc.r4 56.0
beq a01056
ldstr "a01056"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01056:
ldsflda int32 [rvastatic3]A::a01057
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i4
ldc.i4 57
beq a01057
ldstr "a01057"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01057:
ldsflda int64 [rvastatic3]A::a01058
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 58
beq a01058
ldstr "a01058"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01058:
ldsflda int64 [rvastatic3]A::a01059
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 59
beq a01059
ldstr "a01059"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01059:
ldsflda int8 [rvastatic3]A::a01060
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 60
beq a01060
ldstr "a01060"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01060:
ldsflda int16 [rvastatic3]A::a01061
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i2
ldc.i4 61
beq a01061
ldstr "a01061"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01061:
ldsflda int64 [rvastatic3]A::a01062
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 62
beq a01062
ldstr "a01062"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01062:
ldsflda float32 [rvastatic3]A::a01063
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.r4
ldc.r4 63.0
beq a01063
ldstr "a01063"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01063:
ldsflda int64 [rvastatic3]A::a01064
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 64
beq a01064
ldstr "a01064"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01064:
ldsflda float32 [rvastatic3]A::a01065
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.r4
ldc.r4 65.0
beq a01065
ldstr "a01065"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01065:
ldsflda int8 [rvastatic3]A::a01066
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i1
ldc.i4 66
beq a01066
ldstr "a01066"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01066:
ldsflda int16 [rvastatic3]A::a01067
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i2
ldc.i4 67
beq a01067
ldstr "a01067"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01067:
ldsflda int64 [rvastatic3]A::a01068
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i8
ldc.i8 68
beq a01068
ldstr "a01068"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01068:
ldsflda int64 [rvastatic3]A::a01069
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i8
ldc.i8 69
beq a01069
ldstr "a01069"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01069:
ldsflda int64 [rvastatic3]A::a01070
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 70
beq a01070
ldstr "a01070"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01070:
ldsflda int32 [rvastatic3]A::a01071
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i4
ldc.i4 71
beq a01071
ldstr "a01071"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01071:
ldsflda int8 [rvastatic3]A::a01072
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 72
beq a01072
ldstr "a01072"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01072:
ldsflda int32 [rvastatic3]A::a01073
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i4
ldc.i4 73
beq a01073
ldstr "a01073"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01073:
ldsflda float32 [rvastatic3]A::a01074
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.r4
ldc.r4 74.0
beq a01074
ldstr "a01074"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01074:
ldsflda int64 [rvastatic3]A::a01075
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 75
beq a01075
ldstr "a01075"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01075:
ldsflda int8 [rvastatic3]A::a01076
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 76
beq a01076
ldstr "a01076"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01076:
ldsflda int8 [rvastatic3]A::a01077
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 77
beq a01077
ldstr "a01077"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01077:
ldsflda int8 [rvastatic3]A::a01078
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i1
ldc.i4 78
beq a01078
ldstr "a01078"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01078:
ldsflda int32 [rvastatic3]A::a01079
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i4
ldc.i4 79
beq a01079
ldstr "a01079"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01079:
ldsflda int16 [rvastatic3]A::a01080
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i2
ldc.i4 80
beq a01080
ldstr "a01080"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01080:
ldsflda int32 [rvastatic3]A::a01081
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i4
ldc.i4 81
beq a01081
ldstr "a01081"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01081:
ldsflda int8 [rvastatic3]A::a01082
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 82
beq a01082
ldstr "a01082"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01082:
ldsflda int32 [rvastatic3]A::a01083
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i4
ldc.i4 83
beq a01083
ldstr "a01083"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01083:
ldsflda int8 [rvastatic3]A::a01084
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 84
beq a01084
ldstr "a01084"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01084:
ldsflda int32 [rvastatic3]A::a01085
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i4
ldc.i4 85
beq a01085
ldstr "a01085"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01085:
ldsflda int8 [rvastatic3]A::a01086
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 86
beq a01086
ldstr "a01086"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01086:
ldsflda int64 [rvastatic3]A::a01087
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i8
ldc.i8 87
beq a01087
ldstr "a01087"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01087:
ldsflda int8 [rvastatic3]A::a01088
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 88
beq a01088
ldstr "a01088"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01088:
ldsflda int16 [rvastatic3]A::a01089
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i2
ldc.i4 89
beq a01089
ldstr "a01089"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01089:
ldsflda int64 [rvastatic3]A::a01090
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 90
beq a01090
ldstr "a01090"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01090:
ldsflda int8 [rvastatic3]A::a01091
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 91
beq a01091
ldstr "a01091"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01091:
ldsflda int64 [rvastatic3]A::a01092
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 92
beq a01092
ldstr "a01092"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01092:
ldsflda int16 [rvastatic3]A::a01093
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i2
ldc.i4 93
beq a01093
ldstr "a01093"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01093:
ldsflda int8 [rvastatic3]A::a01094
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 94
beq a01094
ldstr "a01094"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01094:
ldsflda float32 [rvastatic3]A::a01095
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.r4
ldc.r4 95.0
beq a01095
ldstr "a01095"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01095:
ldsflda int16 [rvastatic3]A::a01096
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i2
ldc.i4 96
beq a01096
ldstr "a01096"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01096:
ldsflda int64 [rvastatic3]A::a01097
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 97
beq a01097
ldstr "a01097"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01097:
ldsflda float32 [rvastatic3]A::a01098
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.r4
ldc.r4 98.0
beq a01098
ldstr "a01098"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01098:
ldsflda int32 [rvastatic3]A::a01099
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i4
ldc.i4 99
beq a01099
ldstr "a01099"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a01099:
ldsflda int32 [rvastatic3]A::a010100
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i4
ldc.i4 100
beq a010100
ldstr "a010100"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010100:
ldsflda int32 [rvastatic3]A::a010101
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i4
ldc.i4 101
beq a010101
ldstr "a010101"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010101:
ldsflda int8 [rvastatic3]A::a010102
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i1
ldc.i4 102
beq a010102
ldstr "a010102"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010102:
ldsflda int16 [rvastatic3]A::a010103
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i2
ldc.i4 103
beq a010103
ldstr "a010103"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010103:
ldsflda int8 [rvastatic3]A::a010104
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 104
beq a010104
ldstr "a010104"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010104:
ldsflda float32 [rvastatic3]A::a010105
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.r4
ldc.r4 105.0
beq a010105
ldstr "a010105"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010105:
ldsflda float32 [rvastatic3]A::a010106
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.r4
ldc.r4 106.0
beq a010106
ldstr "a010106"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010106:
ldsflda int8 [rvastatic3]A::a010107
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 107
beq a010107
ldstr "a010107"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010107:
ldsflda float32 [rvastatic3]A::a010108
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.r4
ldc.r4 108.0
beq a010108
ldstr "a010108"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010108:
ldsflda int16 [rvastatic3]A::a010109
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i2
ldc.i4 109
beq a010109
ldstr "a010109"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010109:
ldsflda int8 [rvastatic3]A::a010110
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 110
beq a010110
ldstr "a010110"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010110:
ldsflda int64 [rvastatic3]A::a010111
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 111
beq a010111
ldstr "a010111"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010111:
ldsflda int16 [rvastatic3]A::a010112
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i2
ldc.i4 112
beq a010112
ldstr "a010112"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010112:
ldsflda int16 [rvastatic3]A::a010113
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i2
ldc.i4 113
beq a010113
ldstr "a010113"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010113:
ldsflda int32 [rvastatic3]A::a010114
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i4
ldc.i4 114
beq a010114
ldstr "a010114"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010114:
ldsflda int32 [rvastatic3]A::a010115
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i4
ldc.i4 115
beq a010115
ldstr "a010115"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010115:
ldsflda int8 [rvastatic3]A::a010116
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 116
beq a010116
ldstr "a010116"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010116:
ldsflda int16 [rvastatic3]A::a010117
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i2
ldc.i4 117
beq a010117
ldstr "a010117"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010117:
ldsflda int64 [rvastatic3]A::a010118
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 118
beq a010118
ldstr "a010118"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010118:
ldsflda int8 [rvastatic3]A::a010119
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 119
beq a010119
ldstr "a010119"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010119:
ldsflda int64 [rvastatic3]A::a010120
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 120
beq a010120
ldstr "a010120"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010120:
ldsflda int32 [rvastatic3]A::a010121
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i4
ldc.i4 121
beq a010121
ldstr "a010121"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010121:
ldsflda int8 [rvastatic3]A::a010122
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 2
beq a010122
ldstr "a010122"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010122:
ldsflda int32 [rvastatic3]A::a010123
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i4
ldc.i4 123
beq a010123
ldstr "a010123"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010123:
ldsflda int8 [rvastatic3]A::a010124
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i1
ldc.i4 4
beq a010124
ldstr "a010124"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010124:
ldsflda int64 [rvastatic3]A::a010125
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.i8
ldc.i8 125
beq a010125
ldstr "a010125"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010125:
ldsflda float32 [rvastatic3]A::a010126
conv.r8
call native int [rvastatic3]A::Call2(float64)
ldind.r4
ldc.r4 126.0
beq a010126
ldstr "a010126"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010126:
ldsflda int64 [rvastatic3]A::a010127
conv.i8
call native int [rvastatic3]A::Call1(int64)
ldind.i8
ldc.i8 127
beq a010127
ldstr "a010127"
newobj instance void [mscorlib]System.Exception::.ctor(string)
throw
a010127:
ret}
.method static void V6() {.maxstack 50
ldsfld int64 [rvastatic3]A::a0100
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a0100
ldsfld int64 [rvastatic3]A::a0100
ldc.i8 1
beq a0100
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0100:
ldsfld int64 [rvastatic3]A::a0101
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a0101
ldsfld int64 [rvastatic3]A::a0101
ldc.i8 2
beq a0101
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0101:
ldsfld float32 [rvastatic3]A::a0102
ldc.r4 1
add
stsfld float32 [rvastatic3]A::a0102
ldsfld float32 [rvastatic3]A::a0102
ldc.r4 3.0
beq a0102
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0102:
ldsfld int64 [rvastatic3]A::a0103
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a0103
ldsfld int64 [rvastatic3]A::a0103
ldc.i8 4
beq a0103
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0103:
ldsfld int64 [rvastatic3]A::a0104
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a0104
ldsfld int64 [rvastatic3]A::a0104
ldc.i8 5
beq a0104
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0104:
ldsfld int16 [rvastatic3]A::a0105
ldc.i4 1
add
stsfld int16 [rvastatic3]A::a0105
ldsfld int16 [rvastatic3]A::a0105
ldc.i4 6
beq a0105
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0105:
ldsfld int16 [rvastatic3]A::a0106
ldc.i4 1
add
stsfld int16 [rvastatic3]A::a0106
ldsfld int16 [rvastatic3]A::a0106
ldc.i4 7
beq a0106
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0106:
ldsfld float32 [rvastatic3]A::a0107
ldc.r4 1
add
stsfld float32 [rvastatic3]A::a0107
ldsfld float32 [rvastatic3]A::a0107
ldc.r4 8.0
beq a0107
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0107:
ldsfld int32 [rvastatic3]A::a0108
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a0108
ldsfld int32 [rvastatic3]A::a0108
ldc.i4 9
beq a0108
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0108:
ldsfld int8 [rvastatic3]A::a0109
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a0109
ldsfld int8 [rvastatic3]A::a0109
ldc.i4 10
beq a0109
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a0109:
ldsfld int32 [rvastatic3]A::a01010
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01010
ldsfld int32 [rvastatic3]A::a01010
ldc.i4 11
beq a01010
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01010:
ldsfld int64 [rvastatic3]A::a01011
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a01011
ldsfld int64 [rvastatic3]A::a01011
ldc.i8 12
beq a01011
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01011:
ldsfld int32 [rvastatic3]A::a01012
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01012
ldsfld int32 [rvastatic3]A::a01012
ldc.i4 13
beq a01012
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01012:
ldsfld int8 [rvastatic3]A::a01013
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01013
ldsfld int8 [rvastatic3]A::a01013
ldc.i4 14
beq a01013
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01013:
ldsfld int16 [rvastatic3]A::a01014
ldc.i4 1
add
stsfld int16 [rvastatic3]A::a01014
ldsfld int16 [rvastatic3]A::a01014
ldc.i4 15
beq a01014
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01014:
ldsfld int16 [rvastatic3]A::a01015
ldc.i4 1
add
stsfld int16 [rvastatic3]A::a01015
ldsfld int16 [rvastatic3]A::a01015
ldc.i4 16
beq a01015
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01015:
ldsfld float32 [rvastatic3]A::a01016
ldc.r4 1
add
stsfld float32 [rvastatic3]A::a01016
ldsfld float32 [rvastatic3]A::a01016
ldc.r4 17.0
beq a01016
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01016:
ldsfld float32 [rvastatic3]A::a01017
ldc.r4 1
add
stsfld float32 [rvastatic3]A::a01017
ldsfld float32 [rvastatic3]A::a01017
ldc.r4 18.0
beq a01017
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01017:
ldsfld int32 [rvastatic3]A::a01018
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01018
ldsfld int32 [rvastatic3]A::a01018
ldc.i4 19
beq a01018
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01018:
ldsfld int8 [rvastatic3]A::a01019
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01019
ldsfld int8 [rvastatic3]A::a01019
ldc.i4 20
beq a01019
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01019:
ldsfld int32 [rvastatic3]A::a01020
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01020
ldsfld int32 [rvastatic3]A::a01020
ldc.i4 21
beq a01020
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01020:
ldsfld int32 [rvastatic3]A::a01021
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01021
ldsfld int32 [rvastatic3]A::a01021
ldc.i4 22
beq a01021
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01021:
ldsfld int64 [rvastatic3]A::a01022
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a01022
ldsfld int64 [rvastatic3]A::a01022
ldc.i8 23
beq a01022
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01022:
ldsfld int32 [rvastatic3]A::a01023
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01023
ldsfld int32 [rvastatic3]A::a01023
ldc.i4 24
beq a01023
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01023:
ldsfld int8 [rvastatic3]A::a01024
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01024
ldsfld int8 [rvastatic3]A::a01024
ldc.i4 25
beq a01024
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01024:
ldsfld int8 [rvastatic3]A::a01025
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01025
ldsfld int8 [rvastatic3]A::a01025
ldc.i4 26
beq a01025
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01025:
ldsfld int16 [rvastatic3]A::a01026
ldc.i4 1
add
stsfld int16 [rvastatic3]A::a01026
ldsfld int16 [rvastatic3]A::a01026
ldc.i4 27
beq a01026
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01026:
ldsfld int8 [rvastatic3]A::a01027
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01027
ldsfld int8 [rvastatic3]A::a01027
ldc.i4 28
beq a01027
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01027:
ldsfld int16 [rvastatic3]A::a01028
ldc.i4 1
add
stsfld int16 [rvastatic3]A::a01028
ldsfld int16 [rvastatic3]A::a01028
ldc.i4 29
beq a01028
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01028:
ldsfld int64 [rvastatic3]A::a01029
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a01029
ldsfld int64 [rvastatic3]A::a01029
ldc.i8 30
beq a01029
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01029:
ldsfld int32 [rvastatic3]A::a01030
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01030
ldsfld int32 [rvastatic3]A::a01030
ldc.i4 31
beq a01030
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01030:
ldsfld int32 [rvastatic3]A::a01031
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01031
ldsfld int32 [rvastatic3]A::a01031
ldc.i4 32
beq a01031
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01031:
ldsfld int32 [rvastatic3]A::a01032
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01032
ldsfld int32 [rvastatic3]A::a01032
ldc.i4 33
beq a01032
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01032:
ldsfld int8 [rvastatic3]A::a01033
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01033
ldsfld int8 [rvastatic3]A::a01033
ldc.i4 34
beq a01033
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01033:
ldsfld int16 [rvastatic3]A::a01034
ldc.i4 1
add
stsfld int16 [rvastatic3]A::a01034
ldsfld int16 [rvastatic3]A::a01034
ldc.i4 35
beq a01034
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01034:
ldsfld int32 [rvastatic3]A::a01035
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01035
ldsfld int32 [rvastatic3]A::a01035
ldc.i4 36
beq a01035
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01035:
ldsfld int32 [rvastatic3]A::a01036
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01036
ldsfld int32 [rvastatic3]A::a01036
ldc.i4 37
beq a01036
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01036:
ldsfld int16 [rvastatic3]A::a01037
ldc.i4 1
add
stsfld int16 [rvastatic3]A::a01037
ldsfld int16 [rvastatic3]A::a01037
ldc.i4 38
beq a01037
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01037:
ldsfld float32 [rvastatic3]A::a01038
ldc.r4 1
add
stsfld float32 [rvastatic3]A::a01038
ldsfld float32 [rvastatic3]A::a01038
ldc.r4 39.0
beq a01038
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01038:
ldsfld int8 [rvastatic3]A::a01039
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01039
ldsfld int8 [rvastatic3]A::a01039
ldc.i4 40
beq a01039
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01039:
ldsfld int8 [rvastatic3]A::a01040
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01040
ldsfld int8 [rvastatic3]A::a01040
ldc.i4 41
beq a01040
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01040:
ldsfld float32 [rvastatic3]A::a01041
ldc.r4 1
add
stsfld float32 [rvastatic3]A::a01041
ldsfld float32 [rvastatic3]A::a01041
ldc.r4 42.0
beq a01041
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01041:
ldsfld int32 [rvastatic3]A::a01042
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01042
ldsfld int32 [rvastatic3]A::a01042
ldc.i4 43
beq a01042
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01042:
ldsfld int32 [rvastatic3]A::a01043
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01043
ldsfld int32 [rvastatic3]A::a01043
ldc.i4 44
beq a01043
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01043:
ldsfld int32 [rvastatic3]A::a01044
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01044
ldsfld int32 [rvastatic3]A::a01044
ldc.i4 45
beq a01044
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01044:
ldsfld int64 [rvastatic3]A::a01045
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a01045
ldsfld int64 [rvastatic3]A::a01045
ldc.i8 46
beq a01045
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01045:
ldsfld int64 [rvastatic3]A::a01046
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a01046
ldsfld int64 [rvastatic3]A::a01046
ldc.i8 47
beq a01046
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01046:
ldsfld int64 [rvastatic3]A::a01047
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a01047
ldsfld int64 [rvastatic3]A::a01047
ldc.i8 48
beq a01047
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01047:
ldsfld float32 [rvastatic3]A::a01048
ldc.r4 1
add
stsfld float32 [rvastatic3]A::a01048
ldsfld float32 [rvastatic3]A::a01048
ldc.r4 49.0
beq a01048
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01048:
ldsfld int64 [rvastatic3]A::a01049
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a01049
ldsfld int64 [rvastatic3]A::a01049
ldc.i8 50
beq a01049
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01049:
ldsfld int32 [rvastatic3]A::a01050
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01050
ldsfld int32 [rvastatic3]A::a01050
ldc.i4 51
beq a01050
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01050:
ldsfld float32 [rvastatic3]A::a01051
ldc.r4 1
add
stsfld float32 [rvastatic3]A::a01051
ldsfld float32 [rvastatic3]A::a01051
ldc.r4 52.0
beq a01051
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01051:
ldsfld int32 [rvastatic3]A::a01052
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01052
ldsfld int32 [rvastatic3]A::a01052
ldc.i4 53
beq a01052
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01052:
ldsfld int64 [rvastatic3]A::a01053
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a01053
ldsfld int64 [rvastatic3]A::a01053
ldc.i8 54
beq a01053
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01053:
ldsfld int8 [rvastatic3]A::a01054
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01054
ldsfld int8 [rvastatic3]A::a01054
ldc.i4 55
beq a01054
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01054:
ldsfld int8 [rvastatic3]A::a01055
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01055
ldsfld int8 [rvastatic3]A::a01055
ldc.i4 56
beq a01055
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01055:
ldsfld float32 [rvastatic3]A::a01056
ldc.r4 1
add
stsfld float32 [rvastatic3]A::a01056
ldsfld float32 [rvastatic3]A::a01056
ldc.r4 57.0
beq a01056
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01056:
ldsfld int32 [rvastatic3]A::a01057
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01057
ldsfld int32 [rvastatic3]A::a01057
ldc.i4 58
beq a01057
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01057:
ldsfld int64 [rvastatic3]A::a01058
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a01058
ldsfld int64 [rvastatic3]A::a01058
ldc.i8 59
beq a01058
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01058:
ldsfld int64 [rvastatic3]A::a01059
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a01059
ldsfld int64 [rvastatic3]A::a01059
ldc.i8 60
beq a01059
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01059:
ldsfld int8 [rvastatic3]A::a01060
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01060
ldsfld int8 [rvastatic3]A::a01060
ldc.i4 61
beq a01060
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01060:
ldsfld int16 [rvastatic3]A::a01061
ldc.i4 1
add
stsfld int16 [rvastatic3]A::a01061
ldsfld int16 [rvastatic3]A::a01061
ldc.i4 62
beq a01061
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01061:
ldsfld int64 [rvastatic3]A::a01062
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a01062
ldsfld int64 [rvastatic3]A::a01062
ldc.i8 63
beq a01062
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01062:
ldsfld float32 [rvastatic3]A::a01063
ldc.r4 1
add
stsfld float32 [rvastatic3]A::a01063
ldsfld float32 [rvastatic3]A::a01063
ldc.r4 64.0
beq a01063
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01063:
ldsfld int64 [rvastatic3]A::a01064
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a01064
ldsfld int64 [rvastatic3]A::a01064
ldc.i8 65
beq a01064
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01064:
ldsfld float32 [rvastatic3]A::a01065
ldc.r4 1
add
stsfld float32 [rvastatic3]A::a01065
ldsfld float32 [rvastatic3]A::a01065
ldc.r4 66.0
beq a01065
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01065:
ldsfld int8 [rvastatic3]A::a01066
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01066
ldsfld int8 [rvastatic3]A::a01066
ldc.i4 67
beq a01066
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01066:
ldsfld int16 [rvastatic3]A::a01067
ldc.i4 1
add
stsfld int16 [rvastatic3]A::a01067
ldsfld int16 [rvastatic3]A::a01067
ldc.i4 68
beq a01067
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01067:
ldsfld int64 [rvastatic3]A::a01068
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a01068
ldsfld int64 [rvastatic3]A::a01068
ldc.i8 69
beq a01068
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01068:
ldsfld int64 [rvastatic3]A::a01069
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a01069
ldsfld int64 [rvastatic3]A::a01069
ldc.i8 70
beq a01069
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01069:
ldsfld int64 [rvastatic3]A::a01070
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a01070
ldsfld int64 [rvastatic3]A::a01070
ldc.i8 71
beq a01070
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01070:
ldsfld int32 [rvastatic3]A::a01071
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01071
ldsfld int32 [rvastatic3]A::a01071
ldc.i4 72
beq a01071
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01071:
ldsfld int8 [rvastatic3]A::a01072
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01072
ldsfld int8 [rvastatic3]A::a01072
ldc.i4 73
beq a01072
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01072:
ldsfld int32 [rvastatic3]A::a01073
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01073
ldsfld int32 [rvastatic3]A::a01073
ldc.i4 74
beq a01073
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01073:
ldsfld float32 [rvastatic3]A::a01074
ldc.r4 1
add
stsfld float32 [rvastatic3]A::a01074
ldsfld float32 [rvastatic3]A::a01074
ldc.r4 75.0
beq a01074
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01074:
ldsfld int64 [rvastatic3]A::a01075
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a01075
ldsfld int64 [rvastatic3]A::a01075
ldc.i8 76
beq a01075
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01075:
ldsfld int8 [rvastatic3]A::a01076
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01076
ldsfld int8 [rvastatic3]A::a01076
ldc.i4 77
beq a01076
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01076:
ldsfld int8 [rvastatic3]A::a01077
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01077
ldsfld int8 [rvastatic3]A::a01077
ldc.i4 78
beq a01077
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01077:
ldsfld int8 [rvastatic3]A::a01078
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01078
ldsfld int8 [rvastatic3]A::a01078
ldc.i4 79
beq a01078
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01078:
ldsfld int32 [rvastatic3]A::a01079
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01079
ldsfld int32 [rvastatic3]A::a01079
ldc.i4 80
beq a01079
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01079:
ldsfld int16 [rvastatic3]A::a01080
ldc.i4 1
add
stsfld int16 [rvastatic3]A::a01080
ldsfld int16 [rvastatic3]A::a01080
ldc.i4 81
beq a01080
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01080:
ldsfld int32 [rvastatic3]A::a01081
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01081
ldsfld int32 [rvastatic3]A::a01081
ldc.i4 82
beq a01081
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01081:
ldsfld int8 [rvastatic3]A::a01082
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01082
ldsfld int8 [rvastatic3]A::a01082
ldc.i4 83
beq a01082
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01082:
ldsfld int32 [rvastatic3]A::a01083
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01083
ldsfld int32 [rvastatic3]A::a01083
ldc.i4 84
beq a01083
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01083:
ldsfld int8 [rvastatic3]A::a01084
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01084
ldsfld int8 [rvastatic3]A::a01084
ldc.i4 85
beq a01084
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01084:
ldsfld int32 [rvastatic3]A::a01085
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01085
ldsfld int32 [rvastatic3]A::a01085
ldc.i4 86
beq a01085
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01085:
ldsfld int8 [rvastatic3]A::a01086
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01086
ldsfld int8 [rvastatic3]A::a01086
ldc.i4 87
beq a01086
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01086:
ldsfld int64 [rvastatic3]A::a01087
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a01087
ldsfld int64 [rvastatic3]A::a01087
ldc.i8 88
beq a01087
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01087:
ldsfld int8 [rvastatic3]A::a01088
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01088
ldsfld int8 [rvastatic3]A::a01088
ldc.i4 89
beq a01088
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01088:
ldsfld int16 [rvastatic3]A::a01089
ldc.i4 1
add
stsfld int16 [rvastatic3]A::a01089
ldsfld int16 [rvastatic3]A::a01089
ldc.i4 90
beq a01089
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01089:
ldsfld int64 [rvastatic3]A::a01090
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a01090
ldsfld int64 [rvastatic3]A::a01090
ldc.i8 91
beq a01090
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01090:
ldsfld int8 [rvastatic3]A::a01091
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01091
ldsfld int8 [rvastatic3]A::a01091
ldc.i4 92
beq a01091
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01091:
ldsfld int64 [rvastatic3]A::a01092
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a01092
ldsfld int64 [rvastatic3]A::a01092
ldc.i8 93
beq a01092
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01092:
ldsfld int16 [rvastatic3]A::a01093
ldc.i4 1
add
stsfld int16 [rvastatic3]A::a01093
ldsfld int16 [rvastatic3]A::a01093
ldc.i4 94
beq a01093
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01093:
ldsfld int8 [rvastatic3]A::a01094
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a01094
ldsfld int8 [rvastatic3]A::a01094
ldc.i4 95
beq a01094
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01094:
ldsfld float32 [rvastatic3]A::a01095
ldc.r4 1
add
stsfld float32 [rvastatic3]A::a01095
ldsfld float32 [rvastatic3]A::a01095
ldc.r4 96.0
beq a01095
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01095:
ldsfld int16 [rvastatic3]A::a01096
ldc.i4 1
add
stsfld int16 [rvastatic3]A::a01096
ldsfld int16 [rvastatic3]A::a01096
ldc.i4 97
beq a01096
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01096:
ldsfld int64 [rvastatic3]A::a01097
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a01097
ldsfld int64 [rvastatic3]A::a01097
ldc.i8 98
beq a01097
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01097:
ldsfld float32 [rvastatic3]A::a01098
ldc.r4 1
add
stsfld float32 [rvastatic3]A::a01098
ldsfld float32 [rvastatic3]A::a01098
ldc.r4 99.0
beq a01098
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01098:
ldsfld int32 [rvastatic3]A::a01099
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a01099
ldsfld int32 [rvastatic3]A::a01099
ldc.i4 100
beq a01099
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a01099:
ldsfld int32 [rvastatic3]A::a010100
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a010100
ldsfld int32 [rvastatic3]A::a010100
ldc.i4 101
beq a010100
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010100:
ldsfld int32 [rvastatic3]A::a010101
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a010101
ldsfld int32 [rvastatic3]A::a010101
ldc.i4 102
beq a010101
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010101:
ldsfld int8 [rvastatic3]A::a010102
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a010102
ldsfld int8 [rvastatic3]A::a010102
ldc.i4 103
beq a010102
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010102:
ldsfld int16 [rvastatic3]A::a010103
ldc.i4 1
add
stsfld int16 [rvastatic3]A::a010103
ldsfld int16 [rvastatic3]A::a010103
ldc.i4 104
beq a010103
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010103:
ldsfld int8 [rvastatic3]A::a010104
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a010104
ldsfld int8 [rvastatic3]A::a010104
ldc.i4 105
beq a010104
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010104:
ldsfld float32 [rvastatic3]A::a010105
ldc.r4 1
add
stsfld float32 [rvastatic3]A::a010105
ldsfld float32 [rvastatic3]A::a010105
ldc.r4 106.0
beq a010105
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010105:
ldsfld float32 [rvastatic3]A::a010106
ldc.r4 1
add
stsfld float32 [rvastatic3]A::a010106
ldsfld float32 [rvastatic3]A::a010106
ldc.r4 107.0
beq a010106
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010106:
ldsfld int8 [rvastatic3]A::a010107
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a010107
ldsfld int8 [rvastatic3]A::a010107
ldc.i4 108
beq a010107
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010107:
ldsfld float32 [rvastatic3]A::a010108
ldc.r4 1
add
stsfld float32 [rvastatic3]A::a010108
ldsfld float32 [rvastatic3]A::a010108
ldc.r4 109.0
beq a010108
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010108:
ldsfld int16 [rvastatic3]A::a010109
ldc.i4 1
add
stsfld int16 [rvastatic3]A::a010109
ldsfld int16 [rvastatic3]A::a010109
ldc.i4 110
beq a010109
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010109:
ldsfld int8 [rvastatic3]A::a010110
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a010110
ldsfld int8 [rvastatic3]A::a010110
ldc.i4 111
beq a010110
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010110:
ldsfld int64 [rvastatic3]A::a010111
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a010111
ldsfld int64 [rvastatic3]A::a010111
ldc.i8 112
beq a010111
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010111:
ldsfld int16 [rvastatic3]A::a010112
ldc.i4 1
add
stsfld int16 [rvastatic3]A::a010112
ldsfld int16 [rvastatic3]A::a010112
ldc.i4 113
beq a010112
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010112:
ldsfld int16 [rvastatic3]A::a010113
ldc.i4 1
add
stsfld int16 [rvastatic3]A::a010113
ldsfld int16 [rvastatic3]A::a010113
ldc.i4 114
beq a010113
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010113:
ldsfld int32 [rvastatic3]A::a010114
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a010114
ldsfld int32 [rvastatic3]A::a010114
ldc.i4 115
beq a010114
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010114:
ldsfld int32 [rvastatic3]A::a010115
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a010115
ldsfld int32 [rvastatic3]A::a010115
ldc.i4 116
beq a010115
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010115:
ldsfld int8 [rvastatic3]A::a010116
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a010116
ldsfld int8 [rvastatic3]A::a010116
ldc.i4 117
beq a010116
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010116:
ldsfld int16 [rvastatic3]A::a010117
ldc.i4 1
add
stsfld int16 [rvastatic3]A::a010117
ldsfld int16 [rvastatic3]A::a010117
ldc.i4 118
beq a010117
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010117:
ldsfld int64 [rvastatic3]A::a010118
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a010118
ldsfld int64 [rvastatic3]A::a010118
ldc.i8 119
beq a010118
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010118:
ldsfld int8 [rvastatic3]A::a010119
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a010119
ldsfld int8 [rvastatic3]A::a010119
ldc.i4 120
beq a010119
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010119:
ldsfld int64 [rvastatic3]A::a010120
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a010120
ldsfld int64 [rvastatic3]A::a010120
ldc.i8 121
beq a010120
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010120:
ldsfld int32 [rvastatic3]A::a010121
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a010121
ldsfld int32 [rvastatic3]A::a010121
ldc.i4 122
beq a010121
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010121:
ldsfld int8 [rvastatic3]A::a010122
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a010122
ldsfld int8 [rvastatic3]A::a010122
ldc.i4 3
beq a010122
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010122:
ldsfld int32 [rvastatic3]A::a010123
ldc.i4 1
add
stsfld int32 [rvastatic3]A::a010123
ldsfld int32 [rvastatic3]A::a010123
ldc.i4 124
beq a010123
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010123:
ldsfld int8 [rvastatic3]A::a010124
ldc.i4 1
add
stsfld int8 [rvastatic3]A::a010124
ldsfld int8 [rvastatic3]A::a010124
ldc.i4 5
beq a010124
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010124:
ldsfld int64 [rvastatic3]A::a010125
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a010125
ldsfld int64 [rvastatic3]A::a010125
ldc.i8 126
beq a010125
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010125:
ldsfld float32 [rvastatic3]A::a010126
ldc.r4 1
add
stsfld float32 [rvastatic3]A::a010126
ldsfld float32 [rvastatic3]A::a010126
ldc.r4 127.0
beq a010126
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010126:
ldsfld int64 [rvastatic3]A::a010127
ldc.i8 1
add
stsfld int64 [rvastatic3]A::a010127
ldsfld int64 [rvastatic3]A::a010127
ldc.i8 128
beq a010127
newobj instance void [mscorlib]System.Exception::.ctor()
throw
a010127:
ret}
.method static int32 Main(string[] args){.entrypoint .maxstack 5
.custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = (
01 00 00 00
)
call void [rvastatic3]A::V1()
call void [rvastatic3]A::V2()
call void [rvastatic3]A::V3()
call void [rvastatic3]A::V4()
call void [rvastatic3]A::V5()
call void [rvastatic3]A::V6()
ldc.i4 100
ret}
.field public static int64 a0100 at b0100
.field public static int64 a0101 at b0101
.field public static float32 a0102 at b0102
.field private static int32 aALIGN10102 at bALIGN10102
.field public static int64 a0103 at b0103
.field public static int64 a0104 at b0104
.field public static int16 a0105 at b0105
.field private static int16 aALIGN10105 at bALIGN10105
.field private static int32 aALIGN20105 at bALIGN20105
.field public static int16 a0106 at b0106
.field private static int16 aALIGN10106 at bALIGN10106
.field private static int32 aALIGN20106 at bALIGN20106
.field public static float32 a0107 at b0107
.field private static int32 aALIGN10107 at bALIGN10107
.field public static int32 a0108 at b0108
.field private static int32 aALIGN10108 at bALIGN10108
.field public static int8 a0109 at b0109
.field private static int32 aALIGN10109 at bALIGN10109
.field private static int16 aALIGN20109 at bALIGN20109
.field private static int8 aALIGN20109 at bALIGN30109
.field public static int32 a01010 at b01010
.field private static int32 aALIGN101010 at bALIGN101010
.field public static int64 a01011 at b01011
.field public static int32 a01012 at b01012
.field private static int32 aALIGN101012 at bALIGN101012
.field public static int8 a01013 at b01013
.field private static int32 aALIGN101013 at bALIGN101013
.field private static int16 aALIGN201013 at bALIGN201013
.field private static int8 aALIGN201013 at bALIGN301013
.field public static int16 a01014 at b01014
.field private static int16 aALIGN101014 at bALIGN101014
.field private static int32 aALIGN201014 at bALIGN201014
.field public static int16 a01015 at b01015
.field private static int16 aALIGN101015 at bALIGN101015
.field private static int32 aALIGN201015 at bALIGN201015
.field public static float32 a01016 at b01016
.field private static int32 aALIGN101016 at bALIGN101016
.field public static float32 a01017 at b01017
.field private static int32 aALIGN101017 at bALIGN101017
.field public static int32 a01018 at b01018
.field private static int32 aALIGN101018 at bALIGN101018
.field public static int8 a01019 at b01019
.field private static int32 aALIGN101019 at bALIGN101019
.field private static int16 aALIGN201019 at bALIGN201019
.field private static int8 aALIGN201019 at bALIGN301019
.field public static int32 a01020 at b01020
.field private static int32 aALIGN101020 at bALIGN101020
.field public static int32 a01021 at b01021
.field private static int32 aALIGN101021 at bALIGN101021
.field public static int64 a01022 at b01022
.field public static int32 a01023 at b01023
.field private static int32 aALIGN101023 at bALIGN101023
.field public static int8 a01024 at b01024
.field private static int32 aALIGN101024 at bALIGN101024
.field private static int16 aALIGN201024 at bALIGN201024
.field private static int8 aALIGN201024 at bALIGN301024
.field public static int8 a01025 at b01025
.field private static int32 aALIGN101025 at bALIGN101025
.field private static int16 aALIGN201025 at bALIGN201025
.field private static int8 aALIGN201025 at bALIGN301025
.field public static int16 a01026 at b01026
.field private static int16 aALIGN101026 at bALIGN101026
.field private static int32 aALIGN201026 at bALIGN201026
.field public static int8 a01027 at b01027
.field private static int32 aALIGN101027 at bALIGN101027
.field private static int16 aALIGN201027 at bALIGN201027
.field private static int8 aALIGN201027 at bALIGN301027
.field public static int16 a01028 at b01028
.field private static int16 aALIGN101028 at bALIGN101028
.field private static int32 aALIGN201028 at bALIGN201028
.field public static int64 a01029 at b01029
.field public static int32 a01030 at b01030
.field private static int32 aALIGN101030 at bALIGN101030
.field public static int32 a01031 at b01031
.field private static int32 aALIGN101031 at bALIGN101031
.field public static int32 a01032 at b01032
.field private static int32 aALIGN101032 at bALIGN101032
.field public static int8 a01033 at b01033
.field private static int32 aALIGN101033 at bALIGN101033
.field private static int16 aALIGN201033 at bALIGN201033
.field private static int8 aALIGN201033 at bALIGN301033
.field public static int16 a01034 at b01034
.field private static int16 aALIGN101034 at bALIGN101034
.field private static int32 aALIGN201034 at bALIGN201034
.field public static int32 a01035 at b01035
.field private static int32 aALIGN101035 at bALIGN101035
.field public static int32 a01036 at b01036
.field private static int32 aALIGN101036 at bALIGN101036
.field public static int16 a01037 at b01037
.field private static int16 aALIGN101037 at bALIGN101037
.field private static int32 aALIGN201037 at bALIGN201037
.field public static float32 a01038 at b01038
.field private static int32 aALIGN101038 at bALIGN101038
.field public static int8 a01039 at b01039
.field private static int32 aALIGN101039 at bALIGN101039
.field private static int16 aALIGN201039 at bALIGN201039
.field private static int8 aALIGN201039 at bALIGN301039
.field public static int8 a01040 at b01040
.field private static int32 aALIGN101040 at bALIGN101040
.field private static int16 aALIGN201040 at bALIGN201040
.field private static int8 aALIGN201040 at bALIGN301040
.field public static float32 a01041 at b01041
.field private static int32 aALIGN101041 at bALIGN101041
.field public static int32 a01042 at b01042
.field private static int32 aALIGN101042 at bALIGN101042
.field public static int32 a01043 at b01043
.field private static int32 aALIGN101043 at bALIGN101043
.field public static int32 a01044 at b01044
.field private static int32 aALIGN101044 at bALIGN101044
.field public static int64 a01045 at b01045
.field public static int64 a01046 at b01046
.field public static int64 a01047 at b01047
.field public static float32 a01048 at b01048
.field private static int32 aALIGN101048 at bALIGN101048
.field public static int64 a01049 at b01049
.field public static int32 a01050 at b01050
.field private static int32 aALIGN101050 at bALIGN101050
.field public static float32 a01051 at b01051
.field private static int32 aALIGN101051 at bALIGN101051
.field public static int32 a01052 at b01052
.field private static int32 aALIGN101052 at bALIGN101052
.field public static int64 a01053 at b01053
.field public static int8 a01054 at b01054
.field private static int32 aALIGN101054 at bALIGN101054
.field private static int16 aALIGN201054 at bALIGN201054
.field private static int8 aALIGN201054 at bALIGN301054
.field public static int8 a01055 at b01055
.field private static int32 aALIGN101055 at bALIGN101055
.field private static int16 aALIGN201055 at bALIGN201055
.field private static int8 aALIGN201055 at bALIGN301055
.field public static float32 a01056 at b01056
.field private static int32 aALIGN101056 at bALIGN101056
.field public static int32 a01057 at b01057
.field private static int32 aALIGN101057 at bALIGN101057
.field public static int64 a01058 at b01058
.field public static int64 a01059 at b01059
.field public static int8 a01060 at b01060
.field private static int32 aALIGN101060 at bALIGN101060
.field private static int16 aALIGN201060 at bALIGN201060
.field private static int8 aALIGN201060 at bALIGN301060
.field public static int16 a01061 at b01061
.field private static int16 aALIGN101061 at bALIGN101061
.field private static int32 aALIGN201061 at bALIGN201061
.field public static int64 a01062 at b01062
.field public static float32 a01063 at b01063
.field private static int32 aALIGN101063 at bALIGN101063
.field public static int64 a01064 at b01064
.field public static float32 a01065 at b01065
.field private static int32 aALIGN101065 at bALIGN101065
.field public static int8 a01066 at b01066
.field private static int32 aALIGN101066 at bALIGN101066
.field private static int16 aALIGN201066 at bALIGN201066
.field private static int8 aALIGN201066 at bALIGN301066
.field public static int16 a01067 at b01067
.field private static int16 aALIGN101067 at bALIGN101067
.field private static int32 aALIGN201067 at bALIGN201067
.field public static int64 a01068 at b01068
.field public static int64 a01069 at b01069
.field public static int64 a01070 at b01070
.field public static int32 a01071 at b01071
.field private static int32 aALIGN101071 at bALIGN101071
.field public static int8 a01072 at b01072
.field private static int32 aALIGN101072 at bALIGN101072
.field private static int16 aALIGN201072 at bALIGN201072
.field private static int8 aALIGN201072 at bALIGN301072
.field public static int32 a01073 at b01073
.field private static int32 aALIGN101073 at bALIGN101073
.field public static float32 a01074 at b01074
.field private static int32 aALIGN101074 at bALIGN101074
.field public static int64 a01075 at b01075
.field public static int8 a01076 at b01076
.field private static int32 aALIGN101076 at bALIGN101076
.field private static int16 aALIGN201076 at bALIGN201076
.field private static int8 aALIGN201076 at bALIGN301076
.field public static int8 a01077 at b01077
.field private static int32 aALIGN101077 at bALIGN101077
.field private static int16 aALIGN201077 at bALIGN201077
.field private static int8 aALIGN201077 at bALIGN301077
.field public static int8 a01078 at b01078
.field private static int32 aALIGN101078 at bALIGN101078
.field private static int16 aALIGN201078 at bALIGN201078
.field private static int8 aALIGN201078 at bALIGN301078
.field public static int32 a01079 at b01079
.field private static int32 aALIGN101079 at bALIGN101079
.field public static int16 a01080 at b01080
.field private static int16 aALIGN101080 at bALIGN101080
.field private static int32 aALIGN201080 at bALIGN201080
.field public static int32 a01081 at b01081
.field private static int32 aALIGN101081 at bALIGN101081
.field public static int8 a01082 at b01082
.field private static int32 aALIGN101082 at bALIGN101082
.field private static int16 aALIGN201082 at bALIGN201082
.field private static int8 aALIGN201082 at bALIGN301082
.field public static int32 a01083 at b01083
.field private static int32 aALIGN101083 at bALIGN101083
.field public static int8 a01084 at b01084
.field private static int32 aALIGN101084 at bALIGN101084
.field private static int16 aALIGN201084 at bALIGN201084
.field private static int8 aALIGN201084 at bALIGN301084
.field public static int32 a01085 at b01085
.field private static int32 aALIGN101085 at bALIGN101085
.field public static int8 a01086 at b01086
.field private static int32 aALIGN101086 at bALIGN101086
.field private static int16 aALIGN201086 at bALIGN201086
.field private static int8 aALIGN201086 at bALIGN301086
.field public static int64 a01087 at b01087
.field public static int8 a01088 at b01088
.field private static int32 aALIGN101088 at bALIGN101088
.field private static int16 aALIGN201088 at bALIGN201088
.field private static int8 aALIGN201088 at bALIGN301088
.field public static int16 a01089 at b01089
.field private static int16 aALIGN101089 at bALIGN101089
.field private static int32 aALIGN201089 at bALIGN201089
.field public static int64 a01090 at b01090
.field public static int8 a01091 at b01091
.field private static int32 aALIGN101091 at bALIGN101091
.field private static int16 aALIGN201091 at bALIGN201091
.field private static int8 aALIGN201091 at bALIGN301091
.field public static int64 a01092 at b01092
.field public static int16 a01093 at b01093
.field private static int16 aALIGN101093 at bALIGN101093
.field private static int32 aALIGN201093 at bALIGN201093
.field public static int8 a01094 at b01094
.field private static int32 aALIGN101094 at bALIGN101094
.field private static int16 aALIGN201094 at bALIGN201094
.field private static int8 aALIGN201094 at bALIGN301094
.field public static float32 a01095 at b01095
.field private static int32 aALIGN101095 at bALIGN101095
.field public static int16 a01096 at b01096
.field private static int16 aALIGN101096 at bALIGN101096
.field private static int32 aALIGN201096 at bALIGN201096
.field public static int64 a01097 at b01097
.field public static float32 a01098 at b01098
.field private static int32 aALIGN101098 at bALIGN101098
.field public static int32 a01099 at b01099
.field private static int32 aALIGN101099 at bALIGN101099
.field public static int32 a010100 at b010100
.field private static int32 aALIGN1010100 at bALIGN1010100
.field public static int32 a010101 at b010101
.field private static int32 aALIGN1010101 at bALIGN1010101
.field public static int8 a010102 at b010102
.field private static int32 aALIGN1010102 at bALIGN1010102
.field private static int16 aALIGN2010102 at bALIGN2010102
.field private static int8 aALIGN2010102 at bALIGN3010102
.field public static int16 a010103 at b010103
.field private static int16 aALIGN1010103 at bALIGN1010103
.field private static int32 aALIGN2010103 at bALIGN2010103
.field public static int8 a010104 at b010104
.field private static int32 aALIGN1010104 at bALIGN1010104
.field private static int16 aALIGN2010104 at bALIGN2010104
.field private static int8 aALIGN2010104 at bALIGN3010104
.field public static float32 a010105 at b010105
.field private static int32 aALIGN1010105 at bALIGN1010105
.field public static float32 a010106 at b010106
.field private static int32 aALIGN1010106 at bALIGN1010106
.field public static int8 a010107 at b010107
.field private static int32 aALIGN1010107 at bALIGN1010107
.field private static int16 aALIGN2010107 at bALIGN2010107
.field private static int8 aALIGN2010107 at bALIGN3010107
.field public static float32 a010108 at b010108
.field private static int32 aALIGN1010108 at bALIGN1010108
.field public static int16 a010109 at b010109
.field private static int16 aALIGN1010109 at bALIGN1010109
.field private static int32 aALIGN2010109 at bALIGN2010109
.field public static int8 a010110 at b010110
.field private static int32 aALIGN1010110 at bALIGN1010110
.field private static int16 aALIGN2010110 at bALIGN2010110
.field private static int8 aALIGN2010110 at bALIGN3010110
.field public static int64 a010111 at b010111
.field public static int16 a010112 at b010112
.field private static int16 aALIGN1010112 at bALIGN1010112
.field private static int32 aALIGN2010112 at bALIGN2010112
.field public static int16 a010113 at b010113
.field private static int16 aALIGN1010113 at bALIGN1010113
.field private static int32 aALIGN2010113 at bALIGN2010113
.field public static int32 a010114 at b010114
.field private static int32 aALIGN1010114 at bALIGN1010114
.field public static int32 a010115 at b010115
.field private static int32 aALIGN1010115 at bALIGN1010115
.field public static int8 a010116 at b010116
.field private static int32 aALIGN1010116 at bALIGN1010116
.field private static int16 aALIGN2010116 at bALIGN2010116
.field private static int8 aALIGN2010116 at bALIGN3010116
.field public static int16 a010117 at b010117
.field private static int16 aALIGN1010117 at bALIGN1010117
.field private static int32 aALIGN2010117 at bALIGN2010117
.field public static int64 a010118 at b010118
.field public static int8 a010119 at b010119
.field private static int32 aALIGN1010119 at bALIGN1010119
.field private static int16 aALIGN2010119 at bALIGN2010119
.field private static int8 aALIGN2010119 at bALIGN3010119
.field public static int64 a010120 at b010120
.field public static int32 a010121 at b010121
.field private static int32 aALIGN1010121 at bALIGN1010121
.field public static int8 a010122 at b010122
.field private static int32 aALIGN1010122 at bALIGN1010122
.field private static int16 aALIGN2010122 at bALIGN2010122
.field private static int8 aALIGN2010122 at bALIGN3010122
.field public static int32 a010123 at b010123
.field private static int32 aALIGN1010123 at bALIGN1010123
.field public static int8 a010124 at b010124
.field private static int32 aALIGN1010124 at bALIGN1010124
.field private static int16 aALIGN2010124 at bALIGN2010124
.field private static int8 aALIGN2010124 at bALIGN3010124
.field public static int64 a010125 at b010125
.field public static float32 a010126 at b010126
.field private static int32 aALIGN1010126 at bALIGN1010126
.field public static int64 a010127 at b010127
}
.data b0100 = int64(0)
.data b0101 = int64(1)
.data b0102 = float32(2.0)
.data bALIGN10102 = int32(0)
.data b0103 = int64(3)
.data b0104 = int64(4)
.data b0105 = int16(5)
.data bALIGN10105 = int16(0)
.data bALIGN20105 = int32(0)
.data b0106 = int16(6)
.data bALIGN10106 = int16(0)
.data bALIGN20106 = int32(0)
.data b0107 = float32(7.0)
.data bALIGN10107 = int32(0)
.data b0108 = int32(8)
.data bALIGN10108 = int32(0)
.data b0109 = int8(9)
.data bALIGN10109 = int32(0)
.data bALIGN20109 = int16(0)
.data bALIGN30109 = int8(0)
.data b01010 = int32(10)
.data bALIGN101010 = int32(0)
.data b01011 = int64(11)
.data b01012 = int32(12)
.data bALIGN101012 = int32(0)
.data b01013 = int8(13)
.data bALIGN101013 = int32(0)
.data bALIGN201013 = int16(0)
.data bALIGN301013 = int8(0)
.data b01014 = int16(14)
.data bALIGN101014 = int16(0)
.data bALIGN201014 = int32(0)
.data b01015 = int16(15)
.data bALIGN101015 = int16(0)
.data bALIGN201015 = int32(0)
.data b01016 = float32(16.0)
.data bALIGN101016 = int32(0)
.data b01017 = float32(17.0)
.data bALIGN101017 = int32(0)
.data b01018 = int32(18)
.data bALIGN101018 = int32(0)
.data b01019 = int8(19)
.data bALIGN101019 = int32(0)
.data bALIGN201019 = int16(0)
.data bALIGN301019 = int8(0)
.data b01020 = int32(20)
.data bALIGN101020 = int32(0)
.data b01021 = int32(21)
.data bALIGN101021 = int32(0)
.data b01022 = int64(22)
.data b01023 = int32(23)
.data bALIGN101023 = int32(0)
.data b01024 = int8(24)
.data bALIGN101024 = int32(0)
.data bALIGN201024 = int16(0)
.data bALIGN301024 = int8(0)
.data b01025 = int8(25)
.data bALIGN101025 = int32(0)
.data bALIGN201025 = int16(0)
.data bALIGN301025 = int8(0)
.data b01026 = int16(26)
.data bALIGN101026 = int16(0)
.data bALIGN201026 = int32(0)
.data b01027 = int8(27)
.data bALIGN101027 = int32(0)
.data bALIGN201027 = int16(0)
.data bALIGN301027 = int8(0)
.data b01028 = int16(28)
.data bALIGN101028 = int16(0)
.data bALIGN201028 = int32(0)
.data b01029 = int64(29)
.data b01030 = int32(30)
.data bALIGN101030 = int32(0)
.data b01031 = int32(31)
.data bALIGN101031 = int32(0)
.data b01032 = int32(32)
.data bALIGN101032 = int32(0)
.data b01033 = int8(33)
.data bALIGN101033 = int32(0)
.data bALIGN201033 = int16(0)
.data bALIGN301033 = int8(0)
.data b01034 = int16(34)
.data bALIGN101034 = int16(0)
.data bALIGN201034 = int32(0)
.data b01035 = int32(35)
.data bALIGN101035 = int32(0)
.data b01036 = int32(36)
.data bALIGN101036 = int32(0)
.data b01037 = int16(37)
.data bALIGN101037 = int16(0)
.data bALIGN201037 = int32(0)
.data b01038 = float32(38.0)
.data bALIGN101038 = int32(0)
.data b01039 = int8(39)
.data bALIGN101039 = int32(0)
.data bALIGN201039 = int16(0)
.data bALIGN301039 = int8(0)
.data b01040 = int8(40)
.data bALIGN101040 = int32(0)
.data bALIGN201040 = int16(0)
.data bALIGN301040 = int8(0)
.data b01041 = float32(41.0)
.data bALIGN101041 = int32(0)
.data b01042 = int32(42)
.data bALIGN101042 = int32(0)
.data b01043 = int32(43)
.data bALIGN101043 = int32(0)
.data b01044 = int32(44)
.data bALIGN101044 = int32(0)
.data b01045 = int64(45)
.data b01046 = int64(46)
.data b01047 = int64(47)
.data b01048 = float32(48.0)
.data bALIGN101048 = int32(0)
.data b01049 = int64(49)
.data b01050 = int32(50)
.data bALIGN101050 = int32(0)
.data b01051 = float32(51.0)
.data bALIGN101051 = int32(0)
.data b01052 = int32(52)
.data bALIGN101052 = int32(0)
.data b01053 = int64(53)
.data b01054 = int8(54)
.data bALIGN101054 = int32(0)
.data bALIGN201054 = int16(0)
.data bALIGN301054 = int8(0)
.data b01055 = int8(55)
.data bALIGN101055 = int32(0)
.data bALIGN201055 = int16(0)
.data bALIGN301055 = int8(0)
.data b01056 = float32(56.0)
.data bALIGN101056 = int32(0)
.data b01057 = int32(57)
.data bALIGN101057 = int32(0)
.data b01058 = int64(58)
.data b01059 = int64(59)
.data b01060 = int8(60)
.data bALIGN101060 = int32(0)
.data bALIGN201060 = int16(0)
.data bALIGN301060 = int8(0)
.data b01061 = int16(61)
.data bALIGN101061 = int16(0)
.data bALIGN201061 = int32(0)
.data b01062 = int64(62)
.data b01063 = float32(63.0)
.data bALIGN101063 = int32(0)
.data b01064 = int64(64)
.data b01065 = float32(65.0)
.data bALIGN101065 = int32(0)
.data b01066 = int8(66)
.data bALIGN101066 = int32(0)
.data bALIGN201066 = int16(0)
.data bALIGN301066 = int8(0)
.data b01067 = int16(67)
.data bALIGN101067 = int16(0)
.data bALIGN201067 = int32(0)
.data b01068 = int64(68)
.data b01069 = int64(69)
.data b01070 = int64(70)
.data b01071 = int32(71)
.data bALIGN101071 = int32(0)
.data b01072 = int8(72)
.data bALIGN101072 = int32(0)
.data bALIGN201072 = int16(0)
.data bALIGN301072 = int8(0)
.data b01073 = int32(73)
.data bALIGN101073 = int32(0)
.data b01074 = float32(74.0)
.data bALIGN101074 = int32(0)
.data b01075 = int64(75)
.data b01076 = int8(76)
.data bALIGN101076 = int32(0)
.data bALIGN201076 = int16(0)
.data bALIGN301076 = int8(0)
.data b01077 = int8(77)
.data bALIGN101077 = int32(0)
.data bALIGN201077 = int16(0)
.data bALIGN301077 = int8(0)
.data b01078 = int8(78)
.data bALIGN101078 = int32(0)
.data bALIGN201078 = int16(0)
.data bALIGN301078 = int8(0)
.data b01079 = int32(79)
.data bALIGN101079 = int32(0)
.data b01080 = int16(80)
.data bALIGN101080 = int16(0)
.data bALIGN201080 = int32(0)
.data b01081 = int32(81)
.data bALIGN101081 = int32(0)
.data b01082 = int8(82)
.data bALIGN101082 = int32(0)
.data bALIGN201082 = int16(0)
.data bALIGN301082 = int8(0)
.data b01083 = int32(83)
.data bALIGN101083 = int32(0)
.data b01084 = int8(84)
.data bALIGN101084 = int32(0)
.data bALIGN201084 = int16(0)
.data bALIGN301084 = int8(0)
.data b01085 = int32(85)
.data bALIGN101085 = int32(0)
.data b01086 = int8(86)
.data bALIGN101086 = int32(0)
.data bALIGN201086 = int16(0)
.data bALIGN301086 = int8(0)
.data b01087 = int64(87)
.data b01088 = int8(88)
.data bALIGN101088 = int32(0)
.data bALIGN201088 = int16(0)
.data bALIGN301088 = int8(0)
.data b01089 = int16(89)
.data bALIGN101089 = int16(0)
.data bALIGN201089 = int32(0)
.data b01090 = int64(90)
.data b01091 = int8(91)
.data bALIGN101091 = int32(0)
.data bALIGN201091 = int16(0)
.data bALIGN301091 = int8(0)
.data b01092 = int64(92)
.data b01093 = int16(93)
.data bALIGN101093 = int16(0)
.data bALIGN201093 = int32(0)
.data b01094 = int8(94)
.data bALIGN101094 = int32(0)
.data bALIGN201094 = int16(0)
.data bALIGN301094 = int8(0)
.data b01095 = float32(95.0)
.data bALIGN101095 = int32(0)
.data b01096 = int16(96)
.data bALIGN101096 = int16(0)
.data bALIGN201096 = int32(0)
.data b01097 = int64(97)
.data b01098 = float32(98.0)
.data bALIGN101098 = int32(0)
.data b01099 = int32(99)
.data bALIGN101099 = int32(0)
.data b010100 = int32(100)
.data bALIGN1010100 = int32(0)
.data b010101 = int32(101)
.data bALIGN1010101 = int32(0)
.data b010102 = int8(102)
.data bALIGN1010102 = int32(0)
.data bALIGN2010102 = int16(0)
.data bALIGN3010102 = int8(0)
.data b010103 = int16(103)
.data bALIGN1010103 = int16(0)
.data bALIGN2010103 = int32(0)
.data b010104 = int8(104)
.data bALIGN1010104 = int32(0)
.data bALIGN2010104 = int16(0)
.data bALIGN3010104 = int8(0)
.data b010105 = float32(105.0)
.data bALIGN1010105 = int32(0)
.data b010106 = float32(106.0)
.data bALIGN1010106 = int32(0)
.data b010107 = int8(107)
.data bALIGN1010107 = int32(0)
.data bALIGN2010107 = int16(0)
.data bALIGN3010107 = int8(0)
.data b010108 = float32(108.0)
.data bALIGN1010108 = int32(0)
.data b010109 = int16(109)
.data bALIGN1010109 = int16(0)
.data bALIGN2010109 = int32(0)
.data b010110 = int8(110)
.data bALIGN1010110 = int32(0)
.data bALIGN2010110 = int16(0)
.data bALIGN3010110 = int8(0)
.data b010111 = int64(111)
.data b010112 = int16(112)
.data bALIGN1010112 = int16(0)
.data bALIGN2010112 = int32(0)
.data b010113 = int16(113)
.data bALIGN1010113 = int16(0)
.data bALIGN2010113 = int32(0)
.data b010114 = int32(114)
.data bALIGN1010114 = int32(0)
.data b010115 = int32(115)
.data bALIGN1010115 = int32(0)
.data b010116 = int8(116)
.data bALIGN1010116 = int32(0)
.data bALIGN2010116 = int16(0)
.data bALIGN3010116 = int8(0)
.data b010117 = int16(117)
.data bALIGN1010117 = int16(0)
.data bALIGN2010117 = int32(0)
.data b010118 = int64(118)
.data b010119 = int8(119)
.data bALIGN1010119 = int32(0)
.data bALIGN2010119 = int16(0)
.data bALIGN3010119 = int8(0)
.data b010120 = int64(120)
.data b010121 = int32(121)
.data bALIGN1010121 = int32(0)
.data b010122 = int8(2)
.data bALIGN1010122 = int32(0)
.data bALIGN2010122 = int16(0)
.data bALIGN3010122 = int8(0)
.data b010123 = int32(123)
.data bALIGN1010123 = int32(0)
.data b010124 = int8(4)
.data bALIGN1010124 = int32(0)
.data bALIGN2010124 = int16(0)
.data bALIGN3010124 = int8(0)
.data b010125 = int64(125)
.data b010126 = float32(126.0)
.data bALIGN1010126 = int32(0)
.data b010127 = int64(127)
| -1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/mono/mono/tests/subthread-exit.cs
|
using System;
using System.Threading;
public class foo {
public static int Main() {
Thread thr=new Thread(new ThreadStart(foo.thread));
thr.Start();
Thread.Sleep(1200);
Console.WriteLine("Main thread returns");
// the subthread calls Exit(0) before we reach here
return 1;
}
public static void thread() {
Console.WriteLine("Thread running");
Thread.Sleep(500);
Console.WriteLine("Thread exiting");
Environment.Exit(0);
}
}
|
using System;
using System.Threading;
public class foo {
public static int Main() {
Thread thr=new Thread(new ThreadStart(foo.thread));
thr.Start();
Thread.Sleep(1200);
Console.WriteLine("Main thread returns");
// the subthread calls Exit(0) before we reach here
return 1;
}
public static void thread() {
Console.WriteLine("Thread running");
Thread.Sleep(500);
Console.WriteLine("Thread exiting");
Environment.Exit(0);
}
}
| -1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/libraries/System.Text.RegularExpressions/src/System/Text/RegularExpressions/Symbolic/BDDAlgebra.cs
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Diagnostics;
namespace System.Text.RegularExpressions.Symbolic
{
/// <summary>
/// Boolean algebra for Binary Decision Diagrams. Boolean operations on BDDs are cached for efficiency. The
/// IBooleanAlgebra interface implemented by this class is thread safe.
/// TBD: policy for clearing/reducing the caches when they grow too large.
/// Ultimately, the caches are crucial for efficiency, not for correctness.
/// </summary>
internal abstract class BDDAlgebra : IBooleanAlgebra<BDD>
{
/// <summary>Boolean operations over BDDs.</summary>
private enum BoolOp
{
Or,
And,
Xor,
Not
}
/// <summary>
/// Operation cache for Boolean operations over BDDs.
/// </summary>
private readonly ConcurrentDictionary<(BoolOp op, BDD a, BDD? b), BDD> _opCache = new();
/// <summary>
/// Internalize the creation of BDDs so that two BDDs with same ordinal and identical children are the same object.
/// The algorithms do not rely on 100% internalization
/// (they could but this would make it difficult (or near impossible) to clear caches.
/// Allowing distinct but equivalent BDDs is also a tradeoff between efficiency and flexibility.
/// </summary>
private readonly ConcurrentDictionary<(int ordinal, BDD? one, BDD? zero), BDD> _bddCache = new();
/// <summary>
/// Generator for minterms.
/// </summary>
private readonly MintermGenerator<BDD> _mintermGen;
/// <summary>
/// Construct a solver for BDDs.
/// </summary>
public BDDAlgebra() => _mintermGen = new MintermGenerator<BDD>(this);
/// <summary>
/// Create a BDD with given ordinal and given one and zero child.
/// Returns the BDD from the cache if it already exists.
/// </summary>
public BDD GetOrCreateBDD(int ordinal, BDD? one, BDD? zero) =>
_bddCache.GetOrAdd((ordinal, one, zero), static key => new BDD(key.ordinal, key.one, key.zero));
#region IBooleanAlgebra members
/// <summary>
/// Make the union of a and b
/// </summary>
public BDD Or(BDD a, BDD b) => ApplyBinaryOp(BoolOp.Or, a, b);
/// <summary>
/// Make the intersection of a and b
/// </summary>
public BDD And(BDD a, BDD b) => ApplyBinaryOp(BoolOp.And, a, b);
/// <summary>
/// Complement a
/// </summary>
public BDD Not(BDD a) =>
a == False ? True :
a == True ? False :
_opCache.GetOrAdd((BoolOp.Not, a, null), static (key, algebra) =>
{
Debug.Assert(!key.a.IsLeaf, "Did not expect multi-terminal");
return algebra.GetOrCreateBDD(key.a.Ordinal, algebra.Not(key.a.One), algebra.Not(key.a.Zero));
}, this);
/// <summary>
/// Applies the binary Boolean operation op and constructs the BDD recursively from a and b.
/// </summary>
/// <param name="op">given binary Boolean operation</param>
/// <param name="a">first BDD</param>
/// <param name="b">second BDD</param>
/// <returns></returns>
private BDD ApplyBinaryOp(BoolOp op, BDD a, BDD b)
{
// Handle base cases
#region the cases when one of a or b is True or False or when a == b
switch (op)
{
case BoolOp.Or:
if (a == False)
return b;
if (b == False)
return a;
if (a == True || b == True)
return True;
if (a == b)
return a;
break;
case BoolOp.And:
if (a == True)
return b;
if (b == True)
return a;
if (a == False || b == False)
return False;
if (a == b)
return a;
break;
case BoolOp.Xor:
if (a == False)
return b;
if (b == False)
return a;
if (a == b)
return False;
if (a == True)
return Not(b);
if (b == True)
return Not(a);
break;
default:
Debug.Fail("Unhandled binary BoolOp case");
break;
}
#endregion
// Order operands by hash code to increase cache hits
if (a.GetHashCode() > b.GetHashCode())
{
BDD tmp = a;
a = b;
b = tmp;
}
return _opCache.GetOrAdd((op, a, b), static (key, algebra) =>
{
Debug.Assert(key.b is not null, "Validated it was non-null prior to calling GetOrAdd");
Debug.Assert(!key.a.IsLeaf || !key.b.IsLeaf, "Did not expect multi-terminal case");
if (key.a.IsLeaf || key.b!.Ordinal > key.a.Ordinal)
{
Debug.Assert(!key.b.IsLeaf);
BDD t = algebra.ApplyBinaryOp(key.op, key.a, key.b.One);
BDD f = algebra.ApplyBinaryOp(key.op, key.a, key.b.Zero);
return t == f ? t : algebra.GetOrCreateBDD(key.b.Ordinal, t, f);
}
if (key.b.IsLeaf || key.a.Ordinal > key.b.Ordinal)
{
Debug.Assert(!key.a.IsLeaf);
BDD t = algebra.ApplyBinaryOp(key.op, key.a.One, key.b);
BDD f = algebra.ApplyBinaryOp(key.op, key.a.Zero, key.b);
return t == f ? t : algebra.GetOrCreateBDD(key.a.Ordinal, t, f);
}
{
Debug.Assert(!key.a.IsLeaf);
Debug.Assert(!key.b.IsLeaf);
BDD t = algebra.ApplyBinaryOp(key.op, key.a.One, key.b.One);
BDD f = algebra.ApplyBinaryOp(key.op, key.a.Zero, key.b.Zero);
return t == f ? t : algebra.GetOrCreateBDD(key.a.Ordinal, t, f);
}
}, this);
}
/// <summary>
/// Intersect all sets in the enumeration
/// </summary>
public BDD And(ReadOnlySpan<BDD> sets)
{
BDD res = True;
foreach (BDD bdd in sets)
{
res = And(res, bdd);
}
return res;
}
/// <summary>
/// Take the union of all sets in the enumeration
/// </summary>
public BDD Or(ReadOnlySpan<BDD> sets)
{
BDD res = False;
foreach (BDD bdd in sets)
{
res = Or(res, bdd);
}
return res;
}
/// <summary>
/// Gets the full set.
/// </summary>
public BDD True => BDD.True;
/// <summary>
/// Gets the empty set.
/// </summary>
public BDD False => BDD.False;
/// <summary>
/// Returns true if the set is nonempty.
/// </summary>
public bool IsSatisfiable(BDD set) => set != False;
/// <summary>
/// Returns true if a and b represent equivalent BDDs.
/// </summary>
public bool AreEquivalent(BDD a, BDD b) => Xor(a, b) == False;
#endregion
/// <summary>
/// Make the XOR of a and b
/// </summary>
internal BDD Xor(BDD a, BDD b) => ApplyBinaryOp(BoolOp.Xor, a, b);
#region bit-shift operations
/// <summary>
/// Shift all elements k bits to the right.
/// For example if set denotes {*0000,*1110,*1111} then
/// ShiftRight(set) denotes {*000,*111} where * denotes any prefix of 0's or 1's.
/// </summary>
public BDD ShiftRight(BDD set, int k)
{
Debug.Assert(k >= 0);
return set.IsLeaf ? set : ShiftLeftImpl(new Dictionary<(BDD set, int k), BDD>(), set, 0 - k);
}
/// <summary>
/// Shift all elements k bits to the left.
/// For example if k=1 and set denotes {*0000,*1111} then
/// ShiftLeft(set) denotes {*00000,*00001,*11110,*11111} where * denotes any prefix of 0's or 1's.
/// </summary>
public BDD ShiftLeft(BDD set, int k)
{
Debug.Assert(k >= 0);
return set.IsLeaf ? set : ShiftLeftImpl(new Dictionary<(BDD set, int k), BDD>(), set, k);
}
/// <summary>
/// Uses shiftCache to avoid recomputations in shared BDDs (which are DAGs).
/// </summary>
private BDD ShiftLeftImpl(Dictionary<(BDD set, int k), BDD> shiftCache, BDD set, int k)
{
if (set.IsLeaf || k == 0)
return set;
int ordinal = set.Ordinal + k;
if (ordinal < 0)
return True; //this arises if k is negative
if (!shiftCache.TryGetValue((set, k), out BDD? res))
{
BDD zero = ShiftLeftImpl(shiftCache, set.Zero, k);
BDD one = ShiftLeftImpl(shiftCache, set.One, k);
res = (zero == one) ?
zero :
GetOrCreateBDD((ushort)ordinal, one, zero);
shiftCache[(set, k)] = res;
}
return res;
}
#endregion
/// <summary>
/// Generate all non-overlapping Boolean combinations of a set of BDDs.
/// </summary>
/// <param name="sets">the BDDs to create the minterms for</param>
/// <returns>BDDs for the minterm</returns>
public List<BDD> GenerateMinterms(HashSet<BDD> sets) => _mintermGen.GenerateMinterms(sets);
/// <summary>
/// Make the set containing all values greater than or equal to m and less than or equal to n when considering bits between 0 and maxBit.
/// </summary>
/// <param name="lower">lower bound</param>
/// <param name="upper">upper bound</param>
/// <param name="maxBit">bits above maxBit are unspecified</param>
public BDD CreateSetFromRange(uint lower, uint upper, int maxBit)
{
Debug.Assert(0 <= maxBit && maxBit <= 31, "maxBit must be between 0 and 31");
if (upper < lower)
return False;
// Filter out bits greater than maxBit
if (maxBit < 31)
{
uint filter = (1u << (maxBit + 1)) - 1;
lower &= filter;
upper &= filter;
}
return CreateSetFromRangeImpl(lower, upper, maxBit);
}
private BDD CreateSetFromRangeImpl(uint lower, uint upper, int maxBit)
{
// Mask with 1 at position of maxBit
uint mask = 1u << maxBit;
if (mask == 1) // Base case for least significant bit
{
return
upper == 0 ? GetOrCreateBDD(maxBit, False, True) : // lower must also be 0
lower == 1 ? GetOrCreateBDD(maxBit, True, False) : // upper must also be 1
True; // Otherwise both 0 and 1 are included
}
// Check if range includes all numbers up to bit
if (lower == 0 && upper == ((mask << 1) - 1))
{
return True;
}
// Mask out the highest bit for the first and last elements in the range
uint lowerMasked = lower & mask;
uint upperMasked = upper & mask;
if (upperMasked == 0)
{
// Highest value in range doesn't have maxBit set, so the one branch is empty
BDD zero = CreateSetFromRangeImpl(lower, upper, maxBit - 1);
return GetOrCreateBDD(maxBit, False, zero);
}
else if (lowerMasked == mask)
{
// Lowest value in range has maxBit set, so the zero branch is empty
BDD one = CreateSetFromRangeImpl(lower & ~mask, upper & ~mask, maxBit - 1);
return GetOrCreateBDD(maxBit, one, False);
}
else // Otherwise the range straddles (1<<maxBit) and thus both cases need to be considered
{
// If zero then less significant bits are from lower bound to maximum value with maxBit-1 bits
BDD zero = CreateSetFromRangeImpl(lower, mask - 1, maxBit - 1);
// If one then less significant bits are from zero to the upper bound with maxBit stripped away
BDD one = CreateSetFromRangeImpl(0, upper & ~mask, maxBit - 1);
return GetOrCreateBDD(maxBit, one, zero);
}
}
/// <summary>
/// Replace the True node in the BDD by a non-Boolean terminal.
/// Locks the algebra for single threaded use.
/// Observe that the Ordinal of False is -1 and the Ordinal of True is -2.
/// </summary>
public BDD ReplaceTrue(BDD bdd, int terminal)
{
Debug.Assert(terminal >= 0);
BDD leaf = GetOrCreateBDD(terminal, null, null);
return ReplaceTrueImpl(bdd, leaf, new Dictionary<BDD, BDD>());
}
private BDD ReplaceTrueImpl(BDD bdd, BDD leaf, Dictionary<BDD, BDD> cache)
{
if (bdd == True)
return leaf;
if (bdd.IsLeaf)
return bdd;
if (!cache.TryGetValue(bdd, out BDD? res))
{
BDD one = ReplaceTrueImpl(bdd.One, leaf, cache);
BDD zero = ReplaceTrueImpl(bdd.Zero, leaf, cache);
res = GetOrCreateBDD(bdd.Ordinal, one, zero);
cache[bdd] = res;
}
return res;
}
}
}
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Diagnostics;
namespace System.Text.RegularExpressions.Symbolic
{
/// <summary>
/// Boolean algebra for Binary Decision Diagrams. Boolean operations on BDDs are cached for efficiency. The
/// IBooleanAlgebra interface implemented by this class is thread safe.
/// TBD: policy for clearing/reducing the caches when they grow too large.
/// Ultimately, the caches are crucial for efficiency, not for correctness.
/// </summary>
internal abstract class BDDAlgebra : IBooleanAlgebra<BDD>
{
/// <summary>Boolean operations over BDDs.</summary>
private enum BoolOp
{
Or,
And,
Xor,
Not
}
/// <summary>
/// Operation cache for Boolean operations over BDDs.
/// </summary>
private readonly ConcurrentDictionary<(BoolOp op, BDD a, BDD? b), BDD> _opCache = new();
/// <summary>
/// Internalize the creation of BDDs so that two BDDs with same ordinal and identical children are the same object.
/// The algorithms do not rely on 100% internalization
/// (they could but this would make it difficult (or near impossible) to clear caches.
/// Allowing distinct but equivalent BDDs is also a tradeoff between efficiency and flexibility.
/// </summary>
private readonly ConcurrentDictionary<(int ordinal, BDD? one, BDD? zero), BDD> _bddCache = new();
/// <summary>
/// Generator for minterms.
/// </summary>
private readonly MintermGenerator<BDD> _mintermGen;
/// <summary>
/// Construct a solver for BDDs.
/// </summary>
public BDDAlgebra() => _mintermGen = new MintermGenerator<BDD>(this);
/// <summary>
/// Create a BDD with given ordinal and given one and zero child.
/// Returns the BDD from the cache if it already exists.
/// </summary>
public BDD GetOrCreateBDD(int ordinal, BDD? one, BDD? zero) =>
_bddCache.GetOrAdd((ordinal, one, zero), static key => new BDD(key.ordinal, key.one, key.zero));
#region IBooleanAlgebra members
/// <summary>
/// Make the union of a and b
/// </summary>
public BDD Or(BDD a, BDD b) => ApplyBinaryOp(BoolOp.Or, a, b);
/// <summary>
/// Make the intersection of a and b
/// </summary>
public BDD And(BDD a, BDD b) => ApplyBinaryOp(BoolOp.And, a, b);
/// <summary>
/// Complement a
/// </summary>
public BDD Not(BDD a) =>
a == False ? True :
a == True ? False :
_opCache.GetOrAdd((BoolOp.Not, a, null), static (key, algebra) =>
{
Debug.Assert(!key.a.IsLeaf, "Did not expect multi-terminal");
return algebra.GetOrCreateBDD(key.a.Ordinal, algebra.Not(key.a.One), algebra.Not(key.a.Zero));
}, this);
/// <summary>
/// Applies the binary Boolean operation op and constructs the BDD recursively from a and b.
/// </summary>
/// <param name="op">given binary Boolean operation</param>
/// <param name="a">first BDD</param>
/// <param name="b">second BDD</param>
/// <returns></returns>
private BDD ApplyBinaryOp(BoolOp op, BDD a, BDD b)
{
// Handle base cases
#region the cases when one of a or b is True or False or when a == b
switch (op)
{
case BoolOp.Or:
if (a == False)
return b;
if (b == False)
return a;
if (a == True || b == True)
return True;
if (a == b)
return a;
break;
case BoolOp.And:
if (a == True)
return b;
if (b == True)
return a;
if (a == False || b == False)
return False;
if (a == b)
return a;
break;
case BoolOp.Xor:
if (a == False)
return b;
if (b == False)
return a;
if (a == b)
return False;
if (a == True)
return Not(b);
if (b == True)
return Not(a);
break;
default:
Debug.Fail("Unhandled binary BoolOp case");
break;
}
#endregion
// Order operands by hash code to increase cache hits
if (a.GetHashCode() > b.GetHashCode())
{
BDD tmp = a;
a = b;
b = tmp;
}
return _opCache.GetOrAdd((op, a, b), static (key, algebra) =>
{
Debug.Assert(key.b is not null, "Validated it was non-null prior to calling GetOrAdd");
Debug.Assert(!key.a.IsLeaf || !key.b.IsLeaf, "Did not expect multi-terminal case");
if (key.a.IsLeaf || key.b!.Ordinal > key.a.Ordinal)
{
Debug.Assert(!key.b.IsLeaf);
BDD t = algebra.ApplyBinaryOp(key.op, key.a, key.b.One);
BDD f = algebra.ApplyBinaryOp(key.op, key.a, key.b.Zero);
return t == f ? t : algebra.GetOrCreateBDD(key.b.Ordinal, t, f);
}
if (key.b.IsLeaf || key.a.Ordinal > key.b.Ordinal)
{
Debug.Assert(!key.a.IsLeaf);
BDD t = algebra.ApplyBinaryOp(key.op, key.a.One, key.b);
BDD f = algebra.ApplyBinaryOp(key.op, key.a.Zero, key.b);
return t == f ? t : algebra.GetOrCreateBDD(key.a.Ordinal, t, f);
}
{
Debug.Assert(!key.a.IsLeaf);
Debug.Assert(!key.b.IsLeaf);
BDD t = algebra.ApplyBinaryOp(key.op, key.a.One, key.b.One);
BDD f = algebra.ApplyBinaryOp(key.op, key.a.Zero, key.b.Zero);
return t == f ? t : algebra.GetOrCreateBDD(key.a.Ordinal, t, f);
}
}, this);
}
/// <summary>
/// Intersect all sets in the enumeration
/// </summary>
public BDD And(ReadOnlySpan<BDD> sets)
{
BDD res = True;
foreach (BDD bdd in sets)
{
res = And(res, bdd);
}
return res;
}
/// <summary>
/// Take the union of all sets in the enumeration
/// </summary>
public BDD Or(ReadOnlySpan<BDD> sets)
{
BDD res = False;
foreach (BDD bdd in sets)
{
res = Or(res, bdd);
}
return res;
}
/// <summary>
/// Gets the full set.
/// </summary>
public BDD True => BDD.True;
/// <summary>
/// Gets the empty set.
/// </summary>
public BDD False => BDD.False;
/// <summary>
/// Returns true if the set is nonempty.
/// </summary>
public bool IsSatisfiable(BDD set) => set != False;
/// <summary>
/// Returns true if a and b represent equivalent BDDs.
/// </summary>
public bool AreEquivalent(BDD a, BDD b) => Xor(a, b) == False;
#endregion
/// <summary>
/// Make the XOR of a and b
/// </summary>
internal BDD Xor(BDD a, BDD b) => ApplyBinaryOp(BoolOp.Xor, a, b);
#region bit-shift operations
/// <summary>
/// Shift all elements k bits to the right.
/// For example if set denotes {*0000,*1110,*1111} then
/// ShiftRight(set) denotes {*000,*111} where * denotes any prefix of 0's or 1's.
/// </summary>
public BDD ShiftRight(BDD set, int k)
{
Debug.Assert(k >= 0);
return set.IsLeaf ? set : ShiftLeftImpl(new Dictionary<(BDD set, int k), BDD>(), set, 0 - k);
}
/// <summary>
/// Shift all elements k bits to the left.
/// For example if k=1 and set denotes {*0000,*1111} then
/// ShiftLeft(set) denotes {*00000,*00001,*11110,*11111} where * denotes any prefix of 0's or 1's.
/// </summary>
public BDD ShiftLeft(BDD set, int k)
{
Debug.Assert(k >= 0);
return set.IsLeaf ? set : ShiftLeftImpl(new Dictionary<(BDD set, int k), BDD>(), set, k);
}
/// <summary>
/// Uses shiftCache to avoid recomputations in shared BDDs (which are DAGs).
/// </summary>
private BDD ShiftLeftImpl(Dictionary<(BDD set, int k), BDD> shiftCache, BDD set, int k)
{
if (set.IsLeaf || k == 0)
return set;
int ordinal = set.Ordinal + k;
if (ordinal < 0)
return True; //this arises if k is negative
if (!shiftCache.TryGetValue((set, k), out BDD? res))
{
BDD zero = ShiftLeftImpl(shiftCache, set.Zero, k);
BDD one = ShiftLeftImpl(shiftCache, set.One, k);
res = (zero == one) ?
zero :
GetOrCreateBDD((ushort)ordinal, one, zero);
shiftCache[(set, k)] = res;
}
return res;
}
#endregion
/// <summary>
/// Generate all non-overlapping Boolean combinations of a set of BDDs.
/// </summary>
/// <param name="sets">the BDDs to create the minterms for</param>
/// <returns>BDDs for the minterm</returns>
public List<BDD> GenerateMinterms(HashSet<BDD> sets) => _mintermGen.GenerateMinterms(sets);
/// <summary>
/// Make the set containing all values greater than or equal to m and less than or equal to n when considering bits between 0 and maxBit.
/// </summary>
/// <param name="lower">lower bound</param>
/// <param name="upper">upper bound</param>
/// <param name="maxBit">bits above maxBit are unspecified</param>
public BDD CreateSetFromRange(uint lower, uint upper, int maxBit)
{
Debug.Assert(0 <= maxBit && maxBit <= 31, "maxBit must be between 0 and 31");
if (upper < lower)
return False;
// Filter out bits greater than maxBit
if (maxBit < 31)
{
uint filter = (1u << (maxBit + 1)) - 1;
lower &= filter;
upper &= filter;
}
return CreateSetFromRangeImpl(lower, upper, maxBit);
}
private BDD CreateSetFromRangeImpl(uint lower, uint upper, int maxBit)
{
// Mask with 1 at position of maxBit
uint mask = 1u << maxBit;
if (mask == 1) // Base case for least significant bit
{
return
upper == 0 ? GetOrCreateBDD(maxBit, False, True) : // lower must also be 0
lower == 1 ? GetOrCreateBDD(maxBit, True, False) : // upper must also be 1
True; // Otherwise both 0 and 1 are included
}
// Check if range includes all numbers up to bit
if (lower == 0 && upper == ((mask << 1) - 1))
{
return True;
}
// Mask out the highest bit for the first and last elements in the range
uint lowerMasked = lower & mask;
uint upperMasked = upper & mask;
if (upperMasked == 0)
{
// Highest value in range doesn't have maxBit set, so the one branch is empty
BDD zero = CreateSetFromRangeImpl(lower, upper, maxBit - 1);
return GetOrCreateBDD(maxBit, False, zero);
}
else if (lowerMasked == mask)
{
// Lowest value in range has maxBit set, so the zero branch is empty
BDD one = CreateSetFromRangeImpl(lower & ~mask, upper & ~mask, maxBit - 1);
return GetOrCreateBDD(maxBit, one, False);
}
else // Otherwise the range straddles (1<<maxBit) and thus both cases need to be considered
{
// If zero then less significant bits are from lower bound to maximum value with maxBit-1 bits
BDD zero = CreateSetFromRangeImpl(lower, mask - 1, maxBit - 1);
// If one then less significant bits are from zero to the upper bound with maxBit stripped away
BDD one = CreateSetFromRangeImpl(0, upper & ~mask, maxBit - 1);
return GetOrCreateBDD(maxBit, one, zero);
}
}
/// <summary>
/// Replace the True node in the BDD by a non-Boolean terminal.
/// Locks the algebra for single threaded use.
/// Observe that the Ordinal of False is -1 and the Ordinal of True is -2.
/// </summary>
public BDD ReplaceTrue(BDD bdd, int terminal)
{
Debug.Assert(terminal >= 0);
BDD leaf = GetOrCreateBDD(terminal, null, null);
return ReplaceTrueImpl(bdd, leaf, new Dictionary<BDD, BDD>());
}
private BDD ReplaceTrueImpl(BDD bdd, BDD leaf, Dictionary<BDD, BDD> cache)
{
if (bdd == True)
return leaf;
if (bdd.IsLeaf)
return bdd;
if (!cache.TryGetValue(bdd, out BDD? res))
{
BDD one = ReplaceTrueImpl(bdd.One, leaf, cache);
BDD zero = ReplaceTrueImpl(bdd.Zero, leaf, cache);
res = GetOrCreateBDD(bdd.Ordinal, one, zero);
cache[bdd] = res;
}
return res;
}
}
}
| -1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/tests/JIT/HardwareIntrinsics/General/NotSupported/Vector256BooleanAsGeneric_UInt32.cs
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics\General\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
namespace JIT.HardwareIntrinsics.General
{
public static partial class Program
{
private static void Vector256BooleanAsGeneric_UInt32()
{
bool succeeded = false;
try
{
Vector256<uint> result = default(Vector256<bool>).As<bool, uint>();
}
catch (NotSupportedException)
{
succeeded = true;
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"Vector256BooleanAsGeneric_UInt32: RunNotSupportedScenario failed to throw NotSupportedException.");
TestLibrary.TestFramework.LogInformation(string.Empty);
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
}
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************************
* This file is auto-generated from a template file by the GenerateTests.csx *
* script in tests\src\JIT\HardwareIntrinsics\General\Shared. In order to make *
* changes, please update the corresponding template and run according to the *
* directions listed in the file. *
******************************************************************************/
using System;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
using System.Runtime.Intrinsics;
namespace JIT.HardwareIntrinsics.General
{
public static partial class Program
{
private static void Vector256BooleanAsGeneric_UInt32()
{
bool succeeded = false;
try
{
Vector256<uint> result = default(Vector256<bool>).As<bool, uint>();
}
catch (NotSupportedException)
{
succeeded = true;
}
if (!succeeded)
{
TestLibrary.TestFramework.LogInformation($"Vector256BooleanAsGeneric_UInt32: RunNotSupportedScenario failed to throw NotSupportedException.");
TestLibrary.TestFramework.LogInformation(string.Empty);
throw new Exception("One or more scenarios did not complete as expected.");
}
}
}
}
| -1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/libraries/System.Globalization.Extensions/tests/System.Globalization.Extensions.Tests.csproj
|
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<TargetFramework>$(NetCoreAppCurrent)</TargetFramework>
<TestRuntime>true</TestRuntime>
</PropertyGroup>
<ItemGroup>
<Compile Include="GetStringComparerTests.cs" />
<Compile Include="IdnMapping\Data\ConformanceIdnaUnicodeTestResult.cs" />
<Compile Include="IdnMapping\Data\Unicode_IdnaTest.cs" />
<Compile Include="IdnMapping\Data\Unicode_9_0\Unicode_9_0_IdnaTest.cs" />
<Compile Include="IdnMapping\Data\Unicode_11_0\Unicode_11_0_IdnaTest.cs" />
<Compile Include="IdnMapping\Data\Unicode_13_0\Unicode_13_0_IdnaTest.cs" />
<Compile Include="IdnMapping\IdnMappingIdnaConformanceTests.cs" />
<Compile Include="IdnMapping\Data\Factory.cs" />
<Compile Include="IdnMapping\Data\ConformanceIdnaTestResult.cs" />
<Compile Include="IdnMapping\Data\Unicode_6_0\Unicode_6_0_IdnaTest.cs" />
<Compile Include="IdnMapping\Data\Unicode_Win7\Unicode_Win7_IdnaTest.cs" />
<Compile Include="IdnMapping\Data\IConformanceIdnaTest.cs" />
<Compile Include="IdnMapping\IdnMappingGetAsciiTests.cs" />
<Compile Include="IdnMapping\IdnMappingGetUnicodeTests.cs" />
<Compile Include="IdnMapping\IdnMappingUseStd3AsciiRulesTests.cs" />
<Compile Include="Normalization\StringNormalizationTests.cs" />
<Compile Include="Normalization\NormalizationAll.cs" />
</ItemGroup>
<ItemGroup>
<EmbeddedResource Include="IdnMapping\Data\Unicode_6_0\IdnaTest_6.txt" />
<EmbeddedResource Include="IdnMapping\Data\Unicode_Win7\IdnaTest_Win7.txt" />
<EmbeddedResource Include="IdnMapping\Data\Unicode_9_0\IdnaTest_9.txt" />
<EmbeddedResource Include="IdnMapping\Data\Unicode_11_0\IdnaTest_11.txt" />
<EmbeddedResource Include="IdnMapping\Data\Unicode_13_0\IdnaTest_13.txt" />
<EmbeddedResource Include="Normalization\Data\win8.txt">
<LogicalName>NormalizationDataWin8</LogicalName>
</EmbeddedResource>
<EmbeddedResource Include="Normalization\Data\win7.txt">
<LogicalName>NormalizationDataWin7</LogicalName>
</EmbeddedResource>
</ItemGroup>
</Project>
|
<Project Sdk="Microsoft.NET.Sdk">
<PropertyGroup>
<TargetFramework>$(NetCoreAppCurrent)</TargetFramework>
<TestRuntime>true</TestRuntime>
</PropertyGroup>
<ItemGroup>
<Compile Include="GetStringComparerTests.cs" />
<Compile Include="IdnMapping\Data\ConformanceIdnaUnicodeTestResult.cs" />
<Compile Include="IdnMapping\Data\Unicode_IdnaTest.cs" />
<Compile Include="IdnMapping\Data\Unicode_9_0\Unicode_9_0_IdnaTest.cs" />
<Compile Include="IdnMapping\Data\Unicode_11_0\Unicode_11_0_IdnaTest.cs" />
<Compile Include="IdnMapping\Data\Unicode_13_0\Unicode_13_0_IdnaTest.cs" />
<Compile Include="IdnMapping\IdnMappingIdnaConformanceTests.cs" />
<Compile Include="IdnMapping\Data\Factory.cs" />
<Compile Include="IdnMapping\Data\ConformanceIdnaTestResult.cs" />
<Compile Include="IdnMapping\Data\Unicode_6_0\Unicode_6_0_IdnaTest.cs" />
<Compile Include="IdnMapping\Data\Unicode_Win7\Unicode_Win7_IdnaTest.cs" />
<Compile Include="IdnMapping\Data\IConformanceIdnaTest.cs" />
<Compile Include="IdnMapping\IdnMappingGetAsciiTests.cs" />
<Compile Include="IdnMapping\IdnMappingGetUnicodeTests.cs" />
<Compile Include="IdnMapping\IdnMappingUseStd3AsciiRulesTests.cs" />
<Compile Include="Normalization\StringNormalizationTests.cs" />
<Compile Include="Normalization\NormalizationAll.cs" />
</ItemGroup>
<ItemGroup>
<EmbeddedResource Include="IdnMapping\Data\Unicode_6_0\IdnaTest_6.txt" />
<EmbeddedResource Include="IdnMapping\Data\Unicode_Win7\IdnaTest_Win7.txt" />
<EmbeddedResource Include="IdnMapping\Data\Unicode_9_0\IdnaTest_9.txt" />
<EmbeddedResource Include="IdnMapping\Data\Unicode_11_0\IdnaTest_11.txt" />
<EmbeddedResource Include="IdnMapping\Data\Unicode_13_0\IdnaTest_13.txt" />
<EmbeddedResource Include="Normalization\Data\win8.txt">
<LogicalName>NormalizationDataWin8</LogicalName>
</EmbeddedResource>
<EmbeddedResource Include="Normalization\Data\win7.txt">
<LogicalName>NormalizationDataWin7</LogicalName>
</EmbeddedResource>
</ItemGroup>
</Project>
| -1 |
|
dotnet/runtime
| 66,410 |
[mono][wasm] Add changes required by emscripten 3.1.4.
|
vargaz
| 2022-03-09T20:55:40Z | 2022-03-10T00:27:01Z |
613f8e7c33cc07f87be54f1f595e2724694f25ff
|
fddee67155a2f85b0f0949fe6d520dc089e224c6
|
[mono][wasm] Add changes required by emscripten 3.1.4..
|
./src/libraries/System.Net.HttpListener/src/System/Net/Windows/WebSockets/WebSocketProtocolComponent.cs
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.IO;
using System.Runtime.InteropServices;
using System.Security;
using System.Diagnostics;
using Microsoft.Win32.SafeHandles;
namespace System.Net.WebSockets
{
internal static class WebSocketProtocolComponent
{
private static readonly string s_dummyWebsocketKeyBase64 = Convert.ToBase64String(new byte[16]);
private static readonly IntPtr s_webSocketDllHandle;
private static readonly string? s_supportedVersion;
private static readonly Interop.WebSocket.HttpHeader[] s_initialClientRequestHeaders = new Interop.WebSocket.HttpHeader[]
{
new Interop.WebSocket.HttpHeader()
{
Name = HttpKnownHeaderNames.Connection,
NameLength = (uint)HttpKnownHeaderNames.Connection.Length,
Value = HttpKnownHeaderNames.Upgrade,
ValueLength = (uint)HttpKnownHeaderNames.Upgrade.Length
},
new Interop.WebSocket.HttpHeader()
{
Name = HttpKnownHeaderNames.Upgrade,
NameLength = (uint)HttpKnownHeaderNames.Upgrade.Length,
Value = HttpWebSocket.WebSocketUpgradeToken,
ValueLength = (uint)HttpWebSocket.WebSocketUpgradeToken.Length
}
};
private static readonly Interop.WebSocket.HttpHeader[]? s_ServerFakeRequestHeaders;
internal enum Action
{
NoAction = 0,
SendToNetwork = 1,
IndicateSendComplete = 2,
ReceiveFromNetwork = 3,
IndicateReceiveComplete = 4,
}
internal enum BufferType : uint
{
None = 0x00000000,
UTF8Message = 0x80000000,
UTF8Fragment = 0x80000001,
BinaryMessage = 0x80000002,
BinaryFragment = 0x80000003,
Close = 0x80000004,
PingPong = 0x80000005,
UnsolicitedPong = 0x80000006
}
internal enum PropertyType
{
ReceiveBufferSize = 0,
SendBufferSize = 1,
DisableMasking = 2,
AllocatedBuffer = 3,
DisableUtf8Verification = 4,
KeepAliveInterval = 5,
}
internal enum ActionQueue
{
Send = 1,
Receive = 2,
}
#pragma warning disable CA1810 // explicit static cctor
static WebSocketProtocolComponent()
{
s_webSocketDllHandle = Interop.Kernel32.LoadLibraryEx(Interop.Libraries.WebSocket, IntPtr.Zero, 0);
if (s_webSocketDllHandle == IntPtr.Zero)
return;
s_supportedVersion = GetSupportedVersion();
s_ServerFakeRequestHeaders = new Interop.WebSocket.HttpHeader[]
{
new Interop.WebSocket.HttpHeader()
{
Name = HttpKnownHeaderNames.Connection,
NameLength = (uint)HttpKnownHeaderNames.Connection.Length,
Value = HttpKnownHeaderNames.Upgrade,
ValueLength = (uint)HttpKnownHeaderNames.Upgrade.Length
},
new Interop.WebSocket.HttpHeader()
{
Name = HttpKnownHeaderNames.Upgrade,
NameLength = (uint)HttpKnownHeaderNames.Upgrade.Length,
Value = HttpWebSocket.WebSocketUpgradeToken,
ValueLength = (uint)HttpWebSocket.WebSocketUpgradeToken.Length
},
new Interop.WebSocket.HttpHeader()
{
Name = HttpKnownHeaderNames.Host,
NameLength = (uint)HttpKnownHeaderNames.Host.Length,
Value = string.Empty,
ValueLength = 0
},
new Interop.WebSocket.HttpHeader()
{
Name = HttpKnownHeaderNames.SecWebSocketVersion,
NameLength = (uint)HttpKnownHeaderNames.SecWebSocketVersion.Length,
Value = s_supportedVersion,
ValueLength = (uint)s_supportedVersion.Length
},
new Interop.WebSocket.HttpHeader()
{
Name = HttpKnownHeaderNames.SecWebSocketKey,
NameLength = (uint)HttpKnownHeaderNames.SecWebSocketKey.Length,
Value = s_dummyWebsocketKeyBase64,
ValueLength = (uint)s_dummyWebsocketKeyBase64.Length
}
};
}
#pragma warning restore CA1810
internal static string SupportedVersion
{
get
{
if (!IsSupported)
{
HttpWebSocket.ThrowPlatformNotSupportedException_WSPC();
}
return s_supportedVersion!;
}
}
internal static bool IsSupported
{
get
{
return s_webSocketDllHandle != IntPtr.Zero;
}
}
internal static string GetSupportedVersion()
{
if (!IsSupported)
{
HttpWebSocket.ThrowPlatformNotSupportedException_WSPC();
}
SafeWebSocketHandle? webSocketHandle = null;
try
{
int errorCode = Interop.WebSocket.WebSocketCreateClientHandle(null!, 0, out webSocketHandle);
ThrowOnError(errorCode);
if (webSocketHandle == null ||
webSocketHandle.IsInvalid)
{
HttpWebSocket.ThrowPlatformNotSupportedException_WSPC();
}
IntPtr additionalHeadersPtr;
uint additionalHeaderCount;
errorCode = Interop.WebSocket.WebSocketBeginClientHandshake(webSocketHandle!,
IntPtr.Zero,
0,
IntPtr.Zero,
0,
s_initialClientRequestHeaders,
(uint)s_initialClientRequestHeaders.Length,
out additionalHeadersPtr,
out additionalHeaderCount);
ThrowOnError(errorCode);
Interop.WebSocket.HttpHeader[] additionalHeaders = MarshalHttpHeaders(additionalHeadersPtr, (int)additionalHeaderCount);
string? version = null;
foreach (Interop.WebSocket.HttpHeader header in additionalHeaders)
{
if (string.Equals(header.Name,
HttpKnownHeaderNames.SecWebSocketVersion,
StringComparison.OrdinalIgnoreCase))
{
version = header.Value;
break;
}
}
Debug.Assert(version != null, "'version' MUST NOT be NULL.");
return version;
}
finally
{
if (webSocketHandle != null)
{
webSocketHandle.Dispose();
}
}
}
internal static void WebSocketCreateServerHandle(Interop.WebSocket.Property[] properties,
int propertyCount,
out SafeWebSocketHandle webSocketHandle)
{
Debug.Assert(propertyCount >= 0, "'propertyCount' MUST NOT be negative.");
Debug.Assert((properties == null && propertyCount == 0) ||
(properties != null && propertyCount == properties.Length),
"'propertyCount' MUST MATCH 'properties.Length'.");
if (!IsSupported)
{
HttpWebSocket.ThrowPlatformNotSupportedException_WSPC();
}
int errorCode = Interop.WebSocket.WebSocketCreateServerHandle(properties!, (uint)propertyCount, out webSocketHandle);
ThrowOnError(errorCode);
if (webSocketHandle == null ||
webSocketHandle.IsInvalid)
{
HttpWebSocket.ThrowPlatformNotSupportedException_WSPC();
}
// Currently the WSPC doesn't allow to initiate a data session
// without also being involved in the http handshake
// There is no information whatsoever, which is needed by the
// WSPC for parsing WebSocket frames from the HTTP handshake
// In the managed implementation the HTTP header handling
// will be done using the managed HTTP stack and we will
// just fake an HTTP handshake for the WSPC calling
// WebSocketBeginServerHandshake and WebSocketEndServerHandshake
// with statically defined dummy headers.
errorCode = Interop.WebSocket.WebSocketBeginServerHandshake(webSocketHandle!,
IntPtr.Zero,
IntPtr.Zero,
0,
s_ServerFakeRequestHeaders!,
(uint)s_ServerFakeRequestHeaders!.Length,
out _,
out _);
ThrowOnError(errorCode);
errorCode = Interop.WebSocket.WebSocketEndServerHandshake(webSocketHandle!);
ThrowOnError(errorCode);
Debug.Assert(webSocketHandle != null, "'webSocketHandle' MUST NOT be NULL at this point.");
}
internal static void WebSocketAbortHandle(SafeHandle webSocketHandle)
{
Debug.Assert(webSocketHandle != null && !webSocketHandle.IsInvalid,
"'webSocketHandle' MUST NOT be NULL or INVALID.");
Interop.WebSocket.WebSocketAbortHandle(webSocketHandle);
DrainActionQueue(webSocketHandle, ActionQueue.Send);
DrainActionQueue(webSocketHandle, ActionQueue.Receive);
}
internal static void WebSocketDeleteHandle(IntPtr webSocketPtr)
{
Debug.Assert(webSocketPtr != IntPtr.Zero, "'webSocketPtr' MUST NOT be IntPtr.Zero.");
Interop.WebSocket.WebSocketDeleteHandle(webSocketPtr);
}
internal static void WebSocketSend(WebSocketBase webSocket,
BufferType bufferType,
Interop.WebSocket.Buffer buffer)
{
Debug.Assert(webSocket != null,
"'webSocket' MUST NOT be NULL or INVALID.");
Debug.Assert(webSocket.SessionHandle != null && !webSocket.SessionHandle.IsInvalid,
"'webSocket.SessionHandle' MUST NOT be NULL or INVALID.");
ThrowIfSessionHandleClosed(webSocket);
int errorCode;
try
{
errorCode = Interop.WebSocket.WebSocketSend_Raw(webSocket.SessionHandle, bufferType, ref buffer, IntPtr.Zero);
}
catch (ObjectDisposedException innerException)
{
throw ConvertObjectDisposedException(webSocket, innerException);
}
ThrowOnError(errorCode);
}
internal static void WebSocketSendWithoutBody(WebSocketBase webSocket,
BufferType bufferType)
{
Debug.Assert(webSocket != null,
"'webSocket' MUST NOT be NULL or INVALID.");
Debug.Assert(webSocket.SessionHandle != null && !webSocket.SessionHandle.IsInvalid,
"'webSocket.SessionHandle' MUST NOT be NULL or INVALID.");
ThrowIfSessionHandleClosed(webSocket);
int errorCode;
try
{
errorCode = Interop.WebSocket.WebSocketSendWithoutBody_Raw(webSocket.SessionHandle, bufferType, IntPtr.Zero, IntPtr.Zero);
}
catch (ObjectDisposedException innerException)
{
throw ConvertObjectDisposedException(webSocket, innerException);
}
ThrowOnError(errorCode);
}
internal static void WebSocketReceive(WebSocketBase webSocket)
{
Debug.Assert(webSocket != null,
"'webSocket' MUST NOT be NULL or INVALID.");
Debug.Assert(webSocket.SessionHandle != null && !webSocket.SessionHandle.IsInvalid,
"'webSocket.SessionHandle' MUST NOT be NULL or INVALID.");
ThrowIfSessionHandleClosed(webSocket);
int errorCode;
try
{
errorCode = Interop.WebSocket.WebSocketReceive(webSocket.SessionHandle, IntPtr.Zero, IntPtr.Zero);
}
catch (ObjectDisposedException innerException)
{
throw ConvertObjectDisposedException(webSocket, innerException);
}
ThrowOnError(errorCode);
}
internal static void WebSocketGetAction(WebSocketBase webSocket,
ActionQueue actionQueue,
Interop.WebSocket.Buffer[] dataBuffers,
ref uint dataBufferCount,
out Action action,
out BufferType bufferType,
out IntPtr actionContext)
{
Debug.Assert(webSocket != null,
"'webSocket' MUST NOT be NULL or INVALID.");
Debug.Assert(webSocket.SessionHandle != null && !webSocket.SessionHandle.IsInvalid,
"'webSocket.SessionHandle' MUST NOT be NULL or INVALID.");
Debug.Assert(dataBufferCount >= 0, "'dataBufferCount' MUST NOT be negative.");
Debug.Assert((dataBuffers == null && dataBufferCount == 0) ||
(dataBuffers != null && dataBufferCount == dataBuffers.Length),
"'dataBufferCount' MUST MATCH 'dataBuffers.Length'.");
action = Action.NoAction;
bufferType = BufferType.None;
actionContext = IntPtr.Zero;
IntPtr dummy;
ThrowIfSessionHandleClosed(webSocket);
int errorCode;
try
{
errorCode = Interop.WebSocket.WebSocketGetAction(webSocket.SessionHandle,
actionQueue,
dataBuffers!,
ref dataBufferCount,
out action,
out bufferType,
out dummy,
out actionContext);
}
catch (ObjectDisposedException innerException)
{
throw ConvertObjectDisposedException(webSocket, innerException);
}
ThrowOnError(errorCode);
webSocket.ValidateNativeBuffers(action, bufferType, dataBuffers!, dataBufferCount);
Debug.Assert(dataBufferCount >= 0);
Debug.Assert((dataBufferCount == 0 && dataBuffers == null) ||
(dataBufferCount <= dataBuffers!.Length));
}
internal static void WebSocketCompleteAction(WebSocketBase webSocket,
IntPtr actionContext,
int bytesTransferred)
{
Debug.Assert(webSocket != null,
"'webSocket' MUST NOT be NULL or INVALID.");
Debug.Assert(webSocket.SessionHandle != null && !webSocket.SessionHandle.IsInvalid,
"'webSocket.SessionHandle' MUST NOT be NULL or INVALID.");
Debug.Assert(actionContext != IntPtr.Zero, "'actionContext' MUST NOT be IntPtr.Zero.");
Debug.Assert(bytesTransferred >= 0, "'bytesTransferred' MUST NOT be negative.");
if (webSocket.SessionHandle.IsClosed)
{
return;
}
try
{
Interop.WebSocket.WebSocketCompleteAction(webSocket.SessionHandle, actionContext, (uint)bytesTransferred);
}
catch (ObjectDisposedException)
{
}
}
private static void DrainActionQueue(SafeHandle webSocketHandle, ActionQueue actionQueue)
{
Debug.Assert(webSocketHandle != null && !webSocketHandle.IsInvalid,
"'webSocketHandle' MUST NOT be NULL or INVALID.");
IntPtr actionContext;
Action action;
while (true)
{
Interop.WebSocket.Buffer[] dataBuffers = new Interop.WebSocket.Buffer[1];
uint dataBufferCount = 1;
int errorCode = Interop.WebSocket.WebSocketGetAction(webSocketHandle,
actionQueue,
dataBuffers,
ref dataBufferCount,
out action,
out _,
out _,
out actionContext);
if (!Succeeded(errorCode))
{
Debug.Assert(errorCode == 0, "'errorCode' MUST be 0.");
return;
}
if (action == Action.NoAction)
{
return;
}
Interop.WebSocket.WebSocketCompleteAction(webSocketHandle, actionContext, 0);
}
}
private static void MarshalAndVerifyHttpHeader(IntPtr httpHeaderPtr,
ref Interop.WebSocket.HttpHeader httpHeader)
{
Debug.Assert(httpHeaderPtr != IntPtr.Zero, "'currentHttpHeaderPtr' MUST NOT be IntPtr.Zero.");
IntPtr httpHeaderNamePtr = Marshal.ReadIntPtr(httpHeaderPtr);
IntPtr lengthPtr = IntPtr.Add(httpHeaderPtr, IntPtr.Size);
int length = Marshal.ReadInt32(lengthPtr);
Debug.Assert(length >= 0, "'length' MUST NOT be negative.");
if (httpHeaderNamePtr != IntPtr.Zero)
{
httpHeader.Name = Marshal.PtrToStringAnsi(httpHeaderNamePtr, length);
}
if ((httpHeader.Name == null && length != 0) ||
(httpHeader.Name != null && length != httpHeader.Name.Length))
{
Debug.Fail("The length of 'httpHeader.Name' MUST MATCH 'length'.");
throw new AccessViolationException();
}
// structure of Interop.WebSocket.HttpHeader:
// Name = string*
// NameLength = uint*
// Value = string*
// ValueLength = uint*
// NOTE - All fields in the object are pointers to the actual value, hence the use of
// n * IntPtr.Size to get to the correct place in the object.
int valueOffset = 2 * IntPtr.Size;
int lengthOffset = 3 * IntPtr.Size;
IntPtr httpHeaderValuePtr =
Marshal.ReadIntPtr(IntPtr.Add(httpHeaderPtr, valueOffset));
lengthPtr = IntPtr.Add(httpHeaderPtr, lengthOffset);
length = Marshal.ReadInt32(lengthPtr);
httpHeader.Value = Marshal.PtrToStringAnsi(httpHeaderValuePtr, (int)length);
if ((httpHeader.Value == null && length != 0) ||
(httpHeader.Value != null && length != httpHeader.Value.Length))
{
Debug.Fail("The length of 'httpHeader.Value' MUST MATCH 'length'.");
throw new AccessViolationException();
}
}
private static Interop.WebSocket.HttpHeader[] MarshalHttpHeaders(IntPtr nativeHeadersPtr,
int nativeHeaderCount)
{
Debug.Assert(nativeHeaderCount >= 0, "'nativeHeaderCount' MUST NOT be negative.");
Debug.Assert(nativeHeadersPtr != IntPtr.Zero || nativeHeaderCount == 0,
"'nativeHeaderCount' MUST be 0.");
Interop.WebSocket.HttpHeader[] httpHeaders = new Interop.WebSocket.HttpHeader[nativeHeaderCount];
// structure of Interop.WebSocket.HttpHeader:
// Name = string*
// NameLength = uint*
// Value = string*
// ValueLength = uint*
// NOTE - All fields in the object are pointers to the actual value, hence the use of
// 4 * IntPtr.Size to get to the next header.
int httpHeaderStructSize = 4 * IntPtr.Size;
for (int i = 0; i < nativeHeaderCount; i++)
{
int offset = httpHeaderStructSize * i;
IntPtr currentHttpHeaderPtr = IntPtr.Add(nativeHeadersPtr, offset);
MarshalAndVerifyHttpHeader(currentHttpHeaderPtr, ref httpHeaders[i]);
}
Debug.Assert(httpHeaders != null);
Debug.Assert(httpHeaders.Length == nativeHeaderCount);
return httpHeaders;
}
public static bool Succeeded(int hr)
{
return (hr >= 0);
}
private static void ThrowOnError(int errorCode)
{
if (Succeeded(errorCode))
{
return;
}
throw new WebSocketException(errorCode);
}
private static void ThrowIfSessionHandleClosed(WebSocketBase webSocket)
{
if (webSocket.SessionHandle.IsClosed)
{
throw new WebSocketException(WebSocketError.InvalidState,
SR.Format(SR.net_WebSockets_InvalidState_ClosedOrAborted, webSocket.GetType().FullName, webSocket.State));
}
}
private static WebSocketException ConvertObjectDisposedException(WebSocketBase webSocket, ObjectDisposedException innerException)
{
return new WebSocketException(WebSocketError.InvalidState,
SR.Format(SR.net_WebSockets_InvalidState_ClosedOrAborted, webSocket.GetType().FullName, webSocket.State),
innerException);
}
}
}
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
using System.IO;
using System.Runtime.InteropServices;
using System.Security;
using System.Diagnostics;
using Microsoft.Win32.SafeHandles;
namespace System.Net.WebSockets
{
internal static class WebSocketProtocolComponent
{
private static readonly string s_dummyWebsocketKeyBase64 = Convert.ToBase64String(new byte[16]);
private static readonly IntPtr s_webSocketDllHandle;
private static readonly string? s_supportedVersion;
private static readonly Interop.WebSocket.HttpHeader[] s_initialClientRequestHeaders = new Interop.WebSocket.HttpHeader[]
{
new Interop.WebSocket.HttpHeader()
{
Name = HttpKnownHeaderNames.Connection,
NameLength = (uint)HttpKnownHeaderNames.Connection.Length,
Value = HttpKnownHeaderNames.Upgrade,
ValueLength = (uint)HttpKnownHeaderNames.Upgrade.Length
},
new Interop.WebSocket.HttpHeader()
{
Name = HttpKnownHeaderNames.Upgrade,
NameLength = (uint)HttpKnownHeaderNames.Upgrade.Length,
Value = HttpWebSocket.WebSocketUpgradeToken,
ValueLength = (uint)HttpWebSocket.WebSocketUpgradeToken.Length
}
};
private static readonly Interop.WebSocket.HttpHeader[]? s_ServerFakeRequestHeaders;
internal enum Action
{
NoAction = 0,
SendToNetwork = 1,
IndicateSendComplete = 2,
ReceiveFromNetwork = 3,
IndicateReceiveComplete = 4,
}
internal enum BufferType : uint
{
None = 0x00000000,
UTF8Message = 0x80000000,
UTF8Fragment = 0x80000001,
BinaryMessage = 0x80000002,
BinaryFragment = 0x80000003,
Close = 0x80000004,
PingPong = 0x80000005,
UnsolicitedPong = 0x80000006
}
internal enum PropertyType
{
ReceiveBufferSize = 0,
SendBufferSize = 1,
DisableMasking = 2,
AllocatedBuffer = 3,
DisableUtf8Verification = 4,
KeepAliveInterval = 5,
}
internal enum ActionQueue
{
Send = 1,
Receive = 2,
}
#pragma warning disable CA1810 // explicit static cctor
static WebSocketProtocolComponent()
{
s_webSocketDllHandle = Interop.Kernel32.LoadLibraryEx(Interop.Libraries.WebSocket, IntPtr.Zero, 0);
if (s_webSocketDllHandle == IntPtr.Zero)
return;
s_supportedVersion = GetSupportedVersion();
s_ServerFakeRequestHeaders = new Interop.WebSocket.HttpHeader[]
{
new Interop.WebSocket.HttpHeader()
{
Name = HttpKnownHeaderNames.Connection,
NameLength = (uint)HttpKnownHeaderNames.Connection.Length,
Value = HttpKnownHeaderNames.Upgrade,
ValueLength = (uint)HttpKnownHeaderNames.Upgrade.Length
},
new Interop.WebSocket.HttpHeader()
{
Name = HttpKnownHeaderNames.Upgrade,
NameLength = (uint)HttpKnownHeaderNames.Upgrade.Length,
Value = HttpWebSocket.WebSocketUpgradeToken,
ValueLength = (uint)HttpWebSocket.WebSocketUpgradeToken.Length
},
new Interop.WebSocket.HttpHeader()
{
Name = HttpKnownHeaderNames.Host,
NameLength = (uint)HttpKnownHeaderNames.Host.Length,
Value = string.Empty,
ValueLength = 0
},
new Interop.WebSocket.HttpHeader()
{
Name = HttpKnownHeaderNames.SecWebSocketVersion,
NameLength = (uint)HttpKnownHeaderNames.SecWebSocketVersion.Length,
Value = s_supportedVersion,
ValueLength = (uint)s_supportedVersion.Length
},
new Interop.WebSocket.HttpHeader()
{
Name = HttpKnownHeaderNames.SecWebSocketKey,
NameLength = (uint)HttpKnownHeaderNames.SecWebSocketKey.Length,
Value = s_dummyWebsocketKeyBase64,
ValueLength = (uint)s_dummyWebsocketKeyBase64.Length
}
};
}
#pragma warning restore CA1810
internal static string SupportedVersion
{
get
{
if (!IsSupported)
{
HttpWebSocket.ThrowPlatformNotSupportedException_WSPC();
}
return s_supportedVersion!;
}
}
internal static bool IsSupported
{
get
{
return s_webSocketDllHandle != IntPtr.Zero;
}
}
internal static string GetSupportedVersion()
{
if (!IsSupported)
{
HttpWebSocket.ThrowPlatformNotSupportedException_WSPC();
}
SafeWebSocketHandle? webSocketHandle = null;
try
{
int errorCode = Interop.WebSocket.WebSocketCreateClientHandle(null!, 0, out webSocketHandle);
ThrowOnError(errorCode);
if (webSocketHandle == null ||
webSocketHandle.IsInvalid)
{
HttpWebSocket.ThrowPlatformNotSupportedException_WSPC();
}
IntPtr additionalHeadersPtr;
uint additionalHeaderCount;
errorCode = Interop.WebSocket.WebSocketBeginClientHandshake(webSocketHandle!,
IntPtr.Zero,
0,
IntPtr.Zero,
0,
s_initialClientRequestHeaders,
(uint)s_initialClientRequestHeaders.Length,
out additionalHeadersPtr,
out additionalHeaderCount);
ThrowOnError(errorCode);
Interop.WebSocket.HttpHeader[] additionalHeaders = MarshalHttpHeaders(additionalHeadersPtr, (int)additionalHeaderCount);
string? version = null;
foreach (Interop.WebSocket.HttpHeader header in additionalHeaders)
{
if (string.Equals(header.Name,
HttpKnownHeaderNames.SecWebSocketVersion,
StringComparison.OrdinalIgnoreCase))
{
version = header.Value;
break;
}
}
Debug.Assert(version != null, "'version' MUST NOT be NULL.");
return version;
}
finally
{
if (webSocketHandle != null)
{
webSocketHandle.Dispose();
}
}
}
internal static void WebSocketCreateServerHandle(Interop.WebSocket.Property[] properties,
int propertyCount,
out SafeWebSocketHandle webSocketHandle)
{
Debug.Assert(propertyCount >= 0, "'propertyCount' MUST NOT be negative.");
Debug.Assert((properties == null && propertyCount == 0) ||
(properties != null && propertyCount == properties.Length),
"'propertyCount' MUST MATCH 'properties.Length'.");
if (!IsSupported)
{
HttpWebSocket.ThrowPlatformNotSupportedException_WSPC();
}
int errorCode = Interop.WebSocket.WebSocketCreateServerHandle(properties!, (uint)propertyCount, out webSocketHandle);
ThrowOnError(errorCode);
if (webSocketHandle == null ||
webSocketHandle.IsInvalid)
{
HttpWebSocket.ThrowPlatformNotSupportedException_WSPC();
}
// Currently the WSPC doesn't allow to initiate a data session
// without also being involved in the http handshake
// There is no information whatsoever, which is needed by the
// WSPC for parsing WebSocket frames from the HTTP handshake
// In the managed implementation the HTTP header handling
// will be done using the managed HTTP stack and we will
// just fake an HTTP handshake for the WSPC calling
// WebSocketBeginServerHandshake and WebSocketEndServerHandshake
// with statically defined dummy headers.
errorCode = Interop.WebSocket.WebSocketBeginServerHandshake(webSocketHandle!,
IntPtr.Zero,
IntPtr.Zero,
0,
s_ServerFakeRequestHeaders!,
(uint)s_ServerFakeRequestHeaders!.Length,
out _,
out _);
ThrowOnError(errorCode);
errorCode = Interop.WebSocket.WebSocketEndServerHandshake(webSocketHandle!);
ThrowOnError(errorCode);
Debug.Assert(webSocketHandle != null, "'webSocketHandle' MUST NOT be NULL at this point.");
}
internal static void WebSocketAbortHandle(SafeHandle webSocketHandle)
{
Debug.Assert(webSocketHandle != null && !webSocketHandle.IsInvalid,
"'webSocketHandle' MUST NOT be NULL or INVALID.");
Interop.WebSocket.WebSocketAbortHandle(webSocketHandle);
DrainActionQueue(webSocketHandle, ActionQueue.Send);
DrainActionQueue(webSocketHandle, ActionQueue.Receive);
}
internal static void WebSocketDeleteHandle(IntPtr webSocketPtr)
{
Debug.Assert(webSocketPtr != IntPtr.Zero, "'webSocketPtr' MUST NOT be IntPtr.Zero.");
Interop.WebSocket.WebSocketDeleteHandle(webSocketPtr);
}
internal static void WebSocketSend(WebSocketBase webSocket,
BufferType bufferType,
Interop.WebSocket.Buffer buffer)
{
Debug.Assert(webSocket != null,
"'webSocket' MUST NOT be NULL or INVALID.");
Debug.Assert(webSocket.SessionHandle != null && !webSocket.SessionHandle.IsInvalid,
"'webSocket.SessionHandle' MUST NOT be NULL or INVALID.");
ThrowIfSessionHandleClosed(webSocket);
int errorCode;
try
{
errorCode = Interop.WebSocket.WebSocketSend_Raw(webSocket.SessionHandle, bufferType, ref buffer, IntPtr.Zero);
}
catch (ObjectDisposedException innerException)
{
throw ConvertObjectDisposedException(webSocket, innerException);
}
ThrowOnError(errorCode);
}
internal static void WebSocketSendWithoutBody(WebSocketBase webSocket,
BufferType bufferType)
{
Debug.Assert(webSocket != null,
"'webSocket' MUST NOT be NULL or INVALID.");
Debug.Assert(webSocket.SessionHandle != null && !webSocket.SessionHandle.IsInvalid,
"'webSocket.SessionHandle' MUST NOT be NULL or INVALID.");
ThrowIfSessionHandleClosed(webSocket);
int errorCode;
try
{
errorCode = Interop.WebSocket.WebSocketSendWithoutBody_Raw(webSocket.SessionHandle, bufferType, IntPtr.Zero, IntPtr.Zero);
}
catch (ObjectDisposedException innerException)
{
throw ConvertObjectDisposedException(webSocket, innerException);
}
ThrowOnError(errorCode);
}
internal static void WebSocketReceive(WebSocketBase webSocket)
{
Debug.Assert(webSocket != null,
"'webSocket' MUST NOT be NULL or INVALID.");
Debug.Assert(webSocket.SessionHandle != null && !webSocket.SessionHandle.IsInvalid,
"'webSocket.SessionHandle' MUST NOT be NULL or INVALID.");
ThrowIfSessionHandleClosed(webSocket);
int errorCode;
try
{
errorCode = Interop.WebSocket.WebSocketReceive(webSocket.SessionHandle, IntPtr.Zero, IntPtr.Zero);
}
catch (ObjectDisposedException innerException)
{
throw ConvertObjectDisposedException(webSocket, innerException);
}
ThrowOnError(errorCode);
}
internal static void WebSocketGetAction(WebSocketBase webSocket,
ActionQueue actionQueue,
Interop.WebSocket.Buffer[] dataBuffers,
ref uint dataBufferCount,
out Action action,
out BufferType bufferType,
out IntPtr actionContext)
{
Debug.Assert(webSocket != null,
"'webSocket' MUST NOT be NULL or INVALID.");
Debug.Assert(webSocket.SessionHandle != null && !webSocket.SessionHandle.IsInvalid,
"'webSocket.SessionHandle' MUST NOT be NULL or INVALID.");
Debug.Assert(dataBufferCount >= 0, "'dataBufferCount' MUST NOT be negative.");
Debug.Assert((dataBuffers == null && dataBufferCount == 0) ||
(dataBuffers != null && dataBufferCount == dataBuffers.Length),
"'dataBufferCount' MUST MATCH 'dataBuffers.Length'.");
action = Action.NoAction;
bufferType = BufferType.None;
actionContext = IntPtr.Zero;
IntPtr dummy;
ThrowIfSessionHandleClosed(webSocket);
int errorCode;
try
{
errorCode = Interop.WebSocket.WebSocketGetAction(webSocket.SessionHandle,
actionQueue,
dataBuffers!,
ref dataBufferCount,
out action,
out bufferType,
out dummy,
out actionContext);
}
catch (ObjectDisposedException innerException)
{
throw ConvertObjectDisposedException(webSocket, innerException);
}
ThrowOnError(errorCode);
webSocket.ValidateNativeBuffers(action, bufferType, dataBuffers!, dataBufferCount);
Debug.Assert(dataBufferCount >= 0);
Debug.Assert((dataBufferCount == 0 && dataBuffers == null) ||
(dataBufferCount <= dataBuffers!.Length));
}
internal static void WebSocketCompleteAction(WebSocketBase webSocket,
IntPtr actionContext,
int bytesTransferred)
{
Debug.Assert(webSocket != null,
"'webSocket' MUST NOT be NULL or INVALID.");
Debug.Assert(webSocket.SessionHandle != null && !webSocket.SessionHandle.IsInvalid,
"'webSocket.SessionHandle' MUST NOT be NULL or INVALID.");
Debug.Assert(actionContext != IntPtr.Zero, "'actionContext' MUST NOT be IntPtr.Zero.");
Debug.Assert(bytesTransferred >= 0, "'bytesTransferred' MUST NOT be negative.");
if (webSocket.SessionHandle.IsClosed)
{
return;
}
try
{
Interop.WebSocket.WebSocketCompleteAction(webSocket.SessionHandle, actionContext, (uint)bytesTransferred);
}
catch (ObjectDisposedException)
{
}
}
private static void DrainActionQueue(SafeHandle webSocketHandle, ActionQueue actionQueue)
{
Debug.Assert(webSocketHandle != null && !webSocketHandle.IsInvalid,
"'webSocketHandle' MUST NOT be NULL or INVALID.");
IntPtr actionContext;
Action action;
while (true)
{
Interop.WebSocket.Buffer[] dataBuffers = new Interop.WebSocket.Buffer[1];
uint dataBufferCount = 1;
int errorCode = Interop.WebSocket.WebSocketGetAction(webSocketHandle,
actionQueue,
dataBuffers,
ref dataBufferCount,
out action,
out _,
out _,
out actionContext);
if (!Succeeded(errorCode))
{
Debug.Assert(errorCode == 0, "'errorCode' MUST be 0.");
return;
}
if (action == Action.NoAction)
{
return;
}
Interop.WebSocket.WebSocketCompleteAction(webSocketHandle, actionContext, 0);
}
}
private static void MarshalAndVerifyHttpHeader(IntPtr httpHeaderPtr,
ref Interop.WebSocket.HttpHeader httpHeader)
{
Debug.Assert(httpHeaderPtr != IntPtr.Zero, "'currentHttpHeaderPtr' MUST NOT be IntPtr.Zero.");
IntPtr httpHeaderNamePtr = Marshal.ReadIntPtr(httpHeaderPtr);
IntPtr lengthPtr = IntPtr.Add(httpHeaderPtr, IntPtr.Size);
int length = Marshal.ReadInt32(lengthPtr);
Debug.Assert(length >= 0, "'length' MUST NOT be negative.");
if (httpHeaderNamePtr != IntPtr.Zero)
{
httpHeader.Name = Marshal.PtrToStringAnsi(httpHeaderNamePtr, length);
}
if ((httpHeader.Name == null && length != 0) ||
(httpHeader.Name != null && length != httpHeader.Name.Length))
{
Debug.Fail("The length of 'httpHeader.Name' MUST MATCH 'length'.");
throw new AccessViolationException();
}
// structure of Interop.WebSocket.HttpHeader:
// Name = string*
// NameLength = uint*
// Value = string*
// ValueLength = uint*
// NOTE - All fields in the object are pointers to the actual value, hence the use of
// n * IntPtr.Size to get to the correct place in the object.
int valueOffset = 2 * IntPtr.Size;
int lengthOffset = 3 * IntPtr.Size;
IntPtr httpHeaderValuePtr =
Marshal.ReadIntPtr(IntPtr.Add(httpHeaderPtr, valueOffset));
lengthPtr = IntPtr.Add(httpHeaderPtr, lengthOffset);
length = Marshal.ReadInt32(lengthPtr);
httpHeader.Value = Marshal.PtrToStringAnsi(httpHeaderValuePtr, (int)length);
if ((httpHeader.Value == null && length != 0) ||
(httpHeader.Value != null && length != httpHeader.Value.Length))
{
Debug.Fail("The length of 'httpHeader.Value' MUST MATCH 'length'.");
throw new AccessViolationException();
}
}
private static Interop.WebSocket.HttpHeader[] MarshalHttpHeaders(IntPtr nativeHeadersPtr,
int nativeHeaderCount)
{
Debug.Assert(nativeHeaderCount >= 0, "'nativeHeaderCount' MUST NOT be negative.");
Debug.Assert(nativeHeadersPtr != IntPtr.Zero || nativeHeaderCount == 0,
"'nativeHeaderCount' MUST be 0.");
Interop.WebSocket.HttpHeader[] httpHeaders = new Interop.WebSocket.HttpHeader[nativeHeaderCount];
// structure of Interop.WebSocket.HttpHeader:
// Name = string*
// NameLength = uint*
// Value = string*
// ValueLength = uint*
// NOTE - All fields in the object are pointers to the actual value, hence the use of
// 4 * IntPtr.Size to get to the next header.
int httpHeaderStructSize = 4 * IntPtr.Size;
for (int i = 0; i < nativeHeaderCount; i++)
{
int offset = httpHeaderStructSize * i;
IntPtr currentHttpHeaderPtr = IntPtr.Add(nativeHeadersPtr, offset);
MarshalAndVerifyHttpHeader(currentHttpHeaderPtr, ref httpHeaders[i]);
}
Debug.Assert(httpHeaders != null);
Debug.Assert(httpHeaders.Length == nativeHeaderCount);
return httpHeaders;
}
public static bool Succeeded(int hr)
{
return (hr >= 0);
}
private static void ThrowOnError(int errorCode)
{
if (Succeeded(errorCode))
{
return;
}
throw new WebSocketException(errorCode);
}
private static void ThrowIfSessionHandleClosed(WebSocketBase webSocket)
{
if (webSocket.SessionHandle.IsClosed)
{
throw new WebSocketException(WebSocketError.InvalidState,
SR.Format(SR.net_WebSockets_InvalidState_ClosedOrAborted, webSocket.GetType().FullName, webSocket.State));
}
}
private static WebSocketException ConvertObjectDisposedException(WebSocketBase webSocket, ObjectDisposedException innerException)
{
return new WebSocketException(WebSocketError.InvalidState,
SR.Format(SR.net_WebSockets_InvalidState_ClosedOrAborted, webSocket.GetType().FullName, webSocket.State),
innerException);
}
}
}
| -1 |
|
dotnet/runtime
| 66,407 |
ARM64 - Optimizing a % b operations part 2
|
Addressing part of this issue: https://github.com/dotnet/runtime/issues/34937
**Description**
There are various ways to optimize `%` for integers on ARM64.
`a % b` can be transformed into a specific sequence of instructions for ARM64 if the operation is signed and `b` is a constant with the power of 2.
**Acceptance Criteria**
- [x] Merge https://github.com/dotnet/runtime/pull/65535
- [x] Add Tests
ARM64 diffs based on tests
```diff
- asr w1, w0, #31
- and w1, w1, #15
- add w1, w1, w0
- asr w1, w1, #4
- lsl w1, w1, #4
- sub w0, w0, w1
- ;; bbWeight=1 PerfScore 4.50
+ and w1, w0, #15
+ negs w0, w0
+ and w0, w0, #15
+ csneg w0, w1, w0, mi
+ ;; bbWeight=1 PerfScore 2.00
```
|
TIHan
| 2022-03-09T20:33:02Z | 2022-04-13T20:01:26Z |
3b6a539c7219383ec6181a112465f904fe9e2edb
|
a81f4bc9d53dedefe03f49da53ed198abbf9c467
|
ARM64 - Optimizing a % b operations part 2. Addressing part of this issue: https://github.com/dotnet/runtime/issues/34937
**Description**
There are various ways to optimize `%` for integers on ARM64.
`a % b` can be transformed into a specific sequence of instructions for ARM64 if the operation is signed and `b` is a constant with the power of 2.
**Acceptance Criteria**
- [x] Merge https://github.com/dotnet/runtime/pull/65535
- [x] Add Tests
ARM64 diffs based on tests
```diff
- asr w1, w0, #31
- and w1, w1, #15
- add w1, w1, w0
- asr w1, w1, #4
- lsl w1, w1, #4
- sub w0, w0, w1
- ;; bbWeight=1 PerfScore 4.50
+ and w1, w0, #15
+ negs w0, w0
+ and w0, w0, #15
+ csneg w0, w1, w0, mi
+ ;; bbWeight=1 PerfScore 2.00
```
|
./src/coreclr/jit/codegen.h
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// This class contains all the data & functionality for code generation
// of a method, except for the target-specific elements, which are
// primarily in the Target class.
//
#ifndef _CODEGEN_H_
#define _CODEGEN_H_
#include "codegeninterface.h"
#include "compiler.h" // temporary??
#include "regset.h"
#include "jitgcinfo.h"
class CodeGen final : public CodeGenInterface
{
friend class emitter;
friend class DisAssembler;
public:
// This could use further abstraction
CodeGen(Compiler* theCompiler);
virtual void genGenerateCode(void** codePtr, uint32_t* nativeSizeOfCode);
void genGenerateMachineCode();
void genEmitMachineCode();
void genEmitUnwindDebugGCandEH();
// TODO-Cleanup: Abstract out the part of this that finds the addressing mode, and
// move it to Lower
virtual bool genCreateAddrMode(
GenTree* addr, bool fold, bool* revPtr, GenTree** rv1Ptr, GenTree** rv2Ptr, unsigned* mulPtr, ssize_t* cnsPtr);
private:
#if defined(TARGET_XARCH)
// Bit masks used in negating a float or double number.
// This is to avoid creating more than one data constant for these bitmasks when a
// method has more than one GT_NEG operation on floating point values.
CORINFO_FIELD_HANDLE negBitmaskFlt;
CORINFO_FIELD_HANDLE negBitmaskDbl;
// Bit masks used in computing Math.Abs() of a float or double number.
CORINFO_FIELD_HANDLE absBitmaskFlt;
CORINFO_FIELD_HANDLE absBitmaskDbl;
// Bit mask used in U8 -> double conversion to adjust the result.
CORINFO_FIELD_HANDLE u8ToDblBitmask;
// Generates SSE2 code for the given tree as "Operand BitWiseOp BitMask"
void genSSE2BitwiseOp(GenTree* treeNode);
// Generates SSE41 code for the given tree as a round operation
void genSSE41RoundOp(GenTreeOp* treeNode);
instruction simdAlignedMovIns()
{
// We use movaps when non-VEX because it is a smaller instruction;
// however the VEX version vmovaps would be used which is the same size as vmovdqa;
// also vmovdqa has more available CPU ports on older processors so we switch to that
return compiler->canUseVexEncoding() ? INS_movdqa : INS_movaps;
}
instruction simdUnalignedMovIns()
{
// We use movups when non-VEX because it is a smaller instruction;
// however the VEX version vmovups would be used which is the same size as vmovdqu;
// but vmovdqu has more available CPU ports on older processors so we switch to that
return compiler->canUseVexEncoding() ? INS_movdqu : INS_movups;
}
#endif // defined(TARGET_XARCH)
void genPrepForCompiler();
void genMarkLabelsForCodegen();
inline RegState* regStateForType(var_types t)
{
return varTypeUsesFloatReg(t) ? &floatRegState : &intRegState;
}
inline RegState* regStateForReg(regNumber reg)
{
return genIsValidFloatReg(reg) ? &floatRegState : &intRegState;
}
regNumber genFramePointerReg()
{
if (isFramePointerUsed())
{
return REG_FPBASE;
}
else
{
return REG_SPBASE;
}
}
static bool genShouldRoundFP();
static GenTreeIndir indirForm(var_types type, GenTree* base);
static GenTreeStoreInd storeIndirForm(var_types type, GenTree* base, GenTree* data);
GenTreeIntCon intForm(var_types type, ssize_t value);
void genRangeCheck(GenTree* node);
void genLockedInstructions(GenTreeOp* node);
#ifdef TARGET_XARCH
void genCodeForLockAdd(GenTreeOp* node);
#endif
#ifdef REG_OPT_RSVD
// On some targets such as the ARM we may need to have an extra reserved register
// that is used when addressing stack based locals and stack based temps.
// This method returns the regNumber that should be used when an extra register
// is needed to access the stack based locals and stack based temps.
//
regNumber rsGetRsvdReg()
{
// We should have already added this register to the mask
// of reserved registers in regSet.rdMaskResvd
noway_assert((regSet.rsMaskResvd & RBM_OPT_RSVD) != 0);
return REG_OPT_RSVD;
}
#endif // REG_OPT_RSVD
//-------------------------------------------------------------------------
bool genUseBlockInit; // true if we plan to block-initialize the local stack frame
unsigned genInitStkLclCnt; // The count of local variables that we need to zero init
void SubtractStackLevel(unsigned adjustment)
{
assert(genStackLevel >= adjustment);
unsigned newStackLevel = genStackLevel - adjustment;
if (genStackLevel != newStackLevel)
{
JITDUMP("Adjusting stack level from %d to %d\n", genStackLevel, newStackLevel);
}
genStackLevel = newStackLevel;
}
void AddStackLevel(unsigned adjustment)
{
unsigned newStackLevel = genStackLevel + adjustment;
if (genStackLevel != newStackLevel)
{
JITDUMP("Adjusting stack level from %d to %d\n", genStackLevel, newStackLevel);
}
genStackLevel = newStackLevel;
}
void SetStackLevel(unsigned newStackLevel)
{
if (genStackLevel != newStackLevel)
{
JITDUMP("Setting stack level from %d to %d\n", genStackLevel, newStackLevel);
}
genStackLevel = newStackLevel;
}
//-------------------------------------------------------------------------
void genReportEH();
// Allocates storage for the GC info, writes the GC info into that storage, records the address of the
// GC info of the method with the EE, and returns a pointer to the "info" portion (just post-header) of
// the GC info. Requires "codeSize" to be the size of the generated code, "prologSize" and "epilogSize"
// to be the sizes of the prolog and epilog, respectively. In DEBUG, makes a check involving the
// "codePtr", assumed to be a pointer to the start of the generated code.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef JIT32_GCENCODER
void* genCreateAndStoreGCInfo(unsigned codeSize, unsigned prologSize, unsigned epilogSize DEBUGARG(void* codePtr));
void* genCreateAndStoreGCInfoJIT32(unsigned codeSize,
unsigned prologSize,
unsigned epilogSize DEBUGARG(void* codePtr));
#else // !JIT32_GCENCODER
void genCreateAndStoreGCInfo(unsigned codeSize, unsigned prologSize, unsigned epilogSize DEBUGARG(void* codePtr));
void genCreateAndStoreGCInfoX64(unsigned codeSize, unsigned prologSize DEBUGARG(void* codePtr));
#endif // !JIT32_GCENCODER
/**************************************************************************
* PROTECTED
*************************************************************************/
protected:
// the current (pending) label ref, a label which has been referenced but not yet seen
BasicBlock* genPendingCallLabel;
void** codePtr;
uint32_t* nativeSizeOfCode;
unsigned codeSize;
void* coldCodePtr;
void* consPtr;
#ifdef DEBUG
// Last instr we have displayed for dspInstrs
unsigned genCurDispOffset;
static const char* genInsName(instruction ins);
const char* genInsDisplayName(emitter::instrDesc* id);
static const char* genSizeStr(emitAttr size);
#endif // DEBUG
void genInitialize();
void genInitializeRegisterState();
void genCodeForBBlist();
public:
void genSpillVar(GenTree* tree);
protected:
void genEmitHelperCall(unsigned helper, int argSize, emitAttr retSize, regNumber callTarget = REG_NA);
void genGCWriteBarrier(GenTree* tgt, GCInfo::WriteBarrierForm wbf);
BasicBlock* genCreateTempLabel();
private:
void genLogLabel(BasicBlock* bb);
protected:
void genDefineTempLabel(BasicBlock* label);
void genDefineInlineTempLabel(BasicBlock* label);
void genAdjustStackLevel(BasicBlock* block);
void genExitCode(BasicBlock* block);
void genJumpToThrowHlpBlk(emitJumpKind jumpKind, SpecialCodeKind codeKind, BasicBlock* failBlk = nullptr);
#ifdef TARGET_LOONGARCH64
void genSetRegToIcon(regNumber reg, ssize_t val, var_types type);
void genJumpToThrowHlpBlk_la(SpecialCodeKind codeKind,
instruction ins,
regNumber reg1,
BasicBlock* failBlk = nullptr,
regNumber reg2 = REG_R0);
#else
void genCheckOverflow(GenTree* tree);
#endif
//-------------------------------------------------------------------------
//
// Prolog/epilog generation
//
//-------------------------------------------------------------------------
unsigned prologSize;
unsigned epilogSize;
//
// Prolog functions and data (there are a few exceptions for more generally used things)
//
void genEstablishFramePointer(int delta, bool reportUnwindData);
#if defined(TARGET_LOONGARCH64)
void genFnPrologCalleeRegArgs();
#else
void genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbered, RegState* regState);
#endif
void genEnregisterIncomingStackArgs();
#if defined(TARGET_ARM64)
void genEnregisterOSRArgsAndLocals(regNumber initReg, bool* pInitRegZeroed);
#else
void genEnregisterOSRArgsAndLocals();
#endif
void genCheckUseBlockInit();
#if defined(UNIX_AMD64_ABI) && defined(FEATURE_SIMD)
void genClearStackVec3ArgUpperBits();
#endif // UNIX_AMD64_ABI && FEATURE_SIMD
#if defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)
bool genInstrWithConstant(instruction ins,
emitAttr attr,
regNumber reg1,
regNumber reg2,
ssize_t imm,
regNumber tmpReg,
bool inUnwindRegion = false);
void genStackPointerAdjustment(ssize_t spAdjustment, regNumber tmpReg, bool* pTmpRegIsZero, bool reportUnwindData);
void genPrologSaveRegPair(regNumber reg1,
regNumber reg2,
int spOffset,
int spDelta,
bool useSaveNextPair,
regNumber tmpReg,
bool* pTmpRegIsZero);
void genPrologSaveReg(regNumber reg1, int spOffset, int spDelta, regNumber tmpReg, bool* pTmpRegIsZero);
void genEpilogRestoreRegPair(regNumber reg1,
regNumber reg2,
int spOffset,
int spDelta,
bool useSaveNextPair,
regNumber tmpReg,
bool* pTmpRegIsZero);
void genEpilogRestoreReg(regNumber reg1, int spOffset, int spDelta, regNumber tmpReg, bool* pTmpRegIsZero);
// A simple struct to keep register pairs for prolog and epilog.
struct RegPair
{
regNumber reg1;
regNumber reg2;
bool useSaveNextPair;
RegPair(regNumber reg1) : reg1(reg1), reg2(REG_NA), useSaveNextPair(false)
{
}
RegPair(regNumber reg1, regNumber reg2) : reg1(reg1), reg2(reg2), useSaveNextPair(false)
{
assert(reg2 == REG_NEXT(reg1));
}
};
static void genBuildRegPairsStack(regMaskTP regsMask, ArrayStack<RegPair>* regStack);
static void genSetUseSaveNextPairs(ArrayStack<RegPair>* regStack);
static int genGetSlotSizeForRegsInMask(regMaskTP regsMask);
void genSaveCalleeSavedRegisterGroup(regMaskTP regsMask, int spDelta, int spOffset);
void genRestoreCalleeSavedRegisterGroup(regMaskTP regsMask, int spDelta, int spOffset);
void genSaveCalleeSavedRegistersHelp(regMaskTP regsToSaveMask, int lowestCalleeSavedOffset, int spDelta);
void genRestoreCalleeSavedRegistersHelp(regMaskTP regsToRestoreMask, int lowestCalleeSavedOffset, int spDelta);
void genPushCalleeSavedRegisters(regNumber initReg, bool* pInitRegZeroed);
#else
void genPushCalleeSavedRegisters();
#endif
#if defined(TARGET_AMD64)
void genOSRRecordTier0CalleeSavedRegistersAndFrame();
void genOSRSaveRemainingCalleeSavedRegisters();
#endif // TARGET_AMD64
void genAllocLclFrame(unsigned frameSize, regNumber initReg, bool* pInitRegZeroed, regMaskTP maskArgRegsLiveIn);
void genPoisonFrame(regMaskTP bbRegLiveIn);
#if defined(TARGET_ARM)
bool genInstrWithConstant(
instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, ssize_t imm, insFlags flags, regNumber tmpReg);
bool genStackPointerAdjustment(ssize_t spAdjustment, regNumber tmpReg);
void genPushFltRegs(regMaskTP regMask);
void genPopFltRegs(regMaskTP regMask);
regMaskTP genStackAllocRegisterMask(unsigned frameSize, regMaskTP maskCalleeSavedFloat);
regMaskTP genJmpCallArgMask();
void genFreeLclFrame(unsigned frameSize,
/* IN OUT */ bool* pUnwindStarted);
void genMov32RelocatableDisplacement(BasicBlock* block, regNumber reg);
void genMov32RelocatableDataLabel(unsigned value, regNumber reg);
void genMov32RelocatableImmediate(emitAttr size, BYTE* addr, regNumber reg);
bool genUsedPopToReturn; // True if we use the pop into PC to return,
// False if we didn't and must branch to LR to return.
// A set of information that is used by funclet prolog and epilog generation. It is collected once, before
// funclet prologs and epilogs are generated, and used by all funclet prologs and epilogs, which must all be the
// same.
struct FuncletFrameInfoDsc
{
regMaskTP fiSaveRegs; // Set of registers saved in the funclet prolog (includes LR)
unsigned fiFunctionCallerSPtoFPdelta; // Delta between caller SP and the frame pointer
unsigned fiSpDelta; // Stack pointer delta
unsigned fiPSP_slot_SP_offset; // PSP slot offset from SP
int fiPSP_slot_CallerSP_offset; // PSP slot offset from Caller SP
};
FuncletFrameInfoDsc genFuncletInfo;
#elif defined(TARGET_ARM64)
// A set of information that is used by funclet prolog and epilog generation. It is collected once, before
// funclet prologs and epilogs are generated, and used by all funclet prologs and epilogs, which must all be the
// same.
struct FuncletFrameInfoDsc
{
regMaskTP fiSaveRegs; // Set of callee-saved registers saved in the funclet prolog (includes LR)
int fiFunction_CallerSP_to_FP_delta; // Delta between caller SP and the frame pointer in the parent function
// (negative)
int fiSP_to_FPLR_save_delta; // FP/LR register save offset from SP (positive)
int fiSP_to_PSP_slot_delta; // PSP slot offset from SP (positive)
int fiSP_to_CalleeSave_delta; // First callee-saved register slot offset from SP (positive)
int fiCallerSP_to_PSP_slot_delta; // PSP slot offset from Caller SP (negative)
int fiFrameType; // Funclet frame types are numbered. See genFuncletProlog() for details.
int fiSpDelta1; // Stack pointer delta 1 (negative)
int fiSpDelta2; // Stack pointer delta 2 (negative)
};
FuncletFrameInfoDsc genFuncletInfo;
#elif defined(TARGET_AMD64)
// A set of information that is used by funclet prolog and epilog generation. It is collected once, before
// funclet prologs and epilogs are generated, and used by all funclet prologs and epilogs, which must all be the
// same.
struct FuncletFrameInfoDsc
{
unsigned fiFunction_InitialSP_to_FP_delta; // Delta between Initial-SP and the frame pointer
unsigned fiSpDelta; // Stack pointer delta
int fiPSP_slot_InitialSP_offset; // PSP slot offset from Initial-SP
};
FuncletFrameInfoDsc genFuncletInfo;
#elif defined(TARGET_LOONGARCH64)
// A set of information that is used by funclet prolog and epilog generation.
// It is collected once, before funclet prologs and epilogs are generated,
// and used by all funclet prologs and epilogs, which must all be the same.
struct FuncletFrameInfoDsc
{
regMaskTP fiSaveRegs; // Set of callee-saved registers saved in the funclet prolog (includes RA)
int fiFunction_CallerSP_to_FP_delta; // Delta between caller SP and the frame pointer in the parent function
// (negative)
int fiSP_to_FPRA_save_delta; // FP/RA register save offset from SP (positive)
int fiSP_to_PSP_slot_delta; // PSP slot offset from SP (positive)
int fiCallerSP_to_PSP_slot_delta; // PSP slot offset from Caller SP (negative)
int fiFrameType; // Funclet frame types are numbered. See genFuncletProlog() for details.
int fiSpDelta1; // Stack pointer delta 1 (negative)
};
FuncletFrameInfoDsc genFuncletInfo;
#endif // TARGET_LOONGARCH64
#if defined(TARGET_XARCH)
// Save/Restore callee saved float regs to stack
void genPreserveCalleeSavedFltRegs(unsigned lclFrameSize);
void genRestoreCalleeSavedFltRegs(unsigned lclFrameSize);
// Generate VZeroupper instruction to avoid AVX/SSE transition penalty
void genVzeroupperIfNeeded(bool check256bitOnly = true);
#endif // TARGET_XARCH
void genZeroInitFltRegs(const regMaskTP& initFltRegs, const regMaskTP& initDblRegs, const regNumber& initReg);
regNumber genGetZeroReg(regNumber initReg, bool* pInitRegZeroed);
void genZeroInitFrame(int untrLclHi, int untrLclLo, regNumber initReg, bool* pInitRegZeroed);
void genZeroInitFrameUsingBlockInit(int untrLclHi, int untrLclLo, regNumber initReg, bool* pInitRegZeroed);
void genReportGenericContextArg(regNumber initReg, bool* pInitRegZeroed);
void genSetGSSecurityCookie(regNumber initReg, bool* pInitRegZeroed);
void genFinalizeFrame();
#ifdef PROFILING_SUPPORTED
void genProfilingEnterCallback(regNumber initReg, bool* pInitRegZeroed);
void genProfilingLeaveCallback(unsigned helper);
#endif // PROFILING_SUPPORTED
// clang-format off
void genEmitCall(int callType,
CORINFO_METHOD_HANDLE methHnd,
INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo)
void* addr
X86_ARG(int argSize),
emitAttr retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize),
const DebugInfo& di,
regNumber base,
bool isJump);
// clang-format on
// clang-format off
void genEmitCallIndir(int callType,
CORINFO_METHOD_HANDLE methHnd,
INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo)
GenTreeIndir* indir
X86_ARG(int argSize),
emitAttr retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize),
const DebugInfo& di,
bool isJump);
// clang-format on
//
// Epilog functions
//
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_ARM)
bool genCanUsePopToReturn(regMaskTP maskPopRegsInt, bool jmpEpilog);
#endif
#if defined(TARGET_ARM64)
void genPopCalleeSavedRegistersAndFreeLclFrame(bool jmpEpilog);
#else // !defined(TARGET_ARM64)
void genPopCalleeSavedRegisters(bool jmpEpilog = false);
#if defined(TARGET_XARCH)
unsigned genPopCalleeSavedRegistersFromMask(regMaskTP rsPopRegs);
#endif // !defined(TARGET_XARCH)
#endif // !defined(TARGET_ARM64)
//
// Common or driving functions
//
void genReserveProlog(BasicBlock* block); // currently unused
void genReserveEpilog(BasicBlock* block);
void genFnProlog();
void genFnEpilog(BasicBlock* block);
#if defined(FEATURE_EH_FUNCLETS)
void genReserveFuncletProlog(BasicBlock* block);
void genReserveFuncletEpilog(BasicBlock* block);
void genFuncletProlog(BasicBlock* block);
void genFuncletEpilog();
void genCaptureFuncletPrologEpilogInfo();
/*-----------------------------------------------------------------------------
*
* Set the main function PSPSym value in the frame.
* Funclets use different code to load the PSP sym and save it in their frame.
* See the document "CLR ABI.md" for a full description of the PSPSym.
* The PSPSym section of that document is copied here.
*
***********************************
* The name PSPSym stands for Previous Stack Pointer Symbol. It is how a funclet
* accesses locals from the main function body.
*
* First, two definitions.
*
* Caller-SP is the value of the stack pointer in a function's caller before the call
* instruction is executed. That is, when function A calls function B, Caller-SP for B
* is the value of the stack pointer immediately before the call instruction in A
* (calling B) was executed. Note that this definition holds for both AMD64, which
* pushes the return value when a call instruction is executed, and for ARM, which
* doesn't. For AMD64, Caller-SP is the address above the call return address.
*
* Initial-SP is the initial value of the stack pointer after the fixed-size portion of
* the frame has been allocated. That is, before any "alloca"-type allocations.
*
* The PSPSym is a pointer-sized local variable in the frame of the main function and
* of each funclet. The value stored in PSPSym is the value of Initial-SP/Caller-SP
* for the main function. The stack offset of the PSPSym is reported to the VM in the
* GC information header. The value reported in the GC information is the offset of the
* PSPSym from Initial-SP/Caller-SP. (Note that both the value stored, and the way the
* value is reported to the VM, differs between architectures. In particular, note that
* most things in the GC information header are reported as offsets relative to Caller-SP,
* but PSPSym on AMD64 is one (maybe the only) exception.)
*
* The VM uses the PSPSym to find other locals it cares about (such as the generics context
* in a funclet frame). The JIT uses it to re-establish the frame pointer register, so that
* the frame pointer is the same value in a funclet as it is in the main function body.
*
* When a funclet is called, it is passed the Establisher Frame Pointer. For AMD64 this is
* true for all funclets and it is passed as the first argument in RCX, but for ARM this is
* only true for first pass funclets (currently just filters) and it is passed as the second
* argument in R1. The Establisher Frame Pointer is a stack pointer of an interesting "parent"
* frame in the exception processing system. For the CLR, it points either to the main function
* frame or a dynamically enclosing funclet frame from the same function, for the funclet being
* invoked. The value of the Establisher Frame Pointer is Initial-SP on AMD64, Caller-SP on ARM.
*
* Using the establisher frame, the funclet wants to load the value of the PSPSym. Since we
* don't know if the Establisher Frame is from the main function or a funclet, we design the
* main function and funclet frame layouts to place the PSPSym at an identical, small, constant
* offset from the Establisher Frame in each case. (This is also required because we only report
* a single offset to the PSPSym in the GC information, and that offset must be valid for the main
* function and all of its funclets). Then, the funclet uses this known offset to compute the
* PSPSym address and read its value. From this, it can compute the value of the frame pointer
* (which is a constant offset from the PSPSym value) and set the frame register to be the same
* as the parent function. Also, the funclet writes the value of the PSPSym to its own frame's
* PSPSym. This "copying" of the PSPSym happens for every funclet invocation, in particular,
* for every nested funclet invocation.
*
* On ARM, for all second pass funclets (finally, fault, catch, and filter-handler) the VM
* restores all non-volatile registers to their values within the parent frame. This includes
* the frame register (R11). Thus, the PSPSym is not used to recompute the frame pointer register
* in this case, though the PSPSym is copied to the funclet's frame, as for all funclets.
*
* Catch, Filter, and Filter-handlers also get an Exception object (GC ref) as an argument
* (REG_EXCEPTION_OBJECT). On AMD64 it is the second argument and thus passed in RDX. On
* ARM this is the first argument and passed in R0.
*
* (Note that the JIT64 source code contains a comment that says, "The current CLR doesn't always
* pass the correct establisher frame to the funclet. Funclet may receive establisher frame of
* funclet when expecting that of original routine." It indicates this is the reason that a PSPSym
* is required in all funclets as well as the main function, whereas if the establisher frame was
* correctly reported, the PSPSym could be omitted in some cases.)
***********************************
*/
void genSetPSPSym(regNumber initReg, bool* pInitRegZeroed);
void genUpdateCurrentFunclet(BasicBlock* block);
#if defined(TARGET_ARM)
void genInsertNopForUnwinder(BasicBlock* block);
#endif
#else // !FEATURE_EH_FUNCLETS
// This is a no-op when there are no funclets!
void genUpdateCurrentFunclet(BasicBlock* block)
{
return;
}
#endif // !FEATURE_EH_FUNCLETS
void genGeneratePrologsAndEpilogs();
#if defined(DEBUG) && defined(TARGET_ARM64)
void genArm64EmitterUnitTests();
#endif
#if defined(DEBUG) && defined(TARGET_LOONGARCH64)
void genLoongArch64EmitterUnitTests();
#endif
#if defined(DEBUG) && defined(LATE_DISASM) && defined(TARGET_AMD64)
void genAmd64EmitterUnitTests();
#endif
#ifdef TARGET_ARM64
virtual void SetSaveFpLrWithAllCalleeSavedRegisters(bool value);
virtual bool IsSaveFpLrWithAllCalleeSavedRegisters() const;
bool genSaveFpLrWithAllCalleeSavedRegisters;
#endif // TARGET_ARM64
//-------------------------------------------------------------------------
//
// End prolog/epilog generation
//
//-------------------------------------------------------------------------
void genSinglePush();
void genSinglePop();
regMaskTP genPushRegs(regMaskTP regs, regMaskTP* byrefRegs, regMaskTP* noRefRegs);
void genPopRegs(regMaskTP regs, regMaskTP byrefRegs, regMaskTP noRefRegs);
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Debugging Support XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#ifdef DEBUG
void genIPmappingDisp(unsigned mappingNum, IPmappingDsc* ipMapping);
void genIPmappingListDisp();
#endif // DEBUG
void genIPmappingAdd(IPmappingDscKind kind, const DebugInfo& di, bool isLabel);
void genIPmappingAddToFront(IPmappingDscKind kind, const DebugInfo& di, bool isLabel);
void genIPmappingGen();
#ifdef DEBUG
void genDumpPreciseDebugInfo();
void genDumpPreciseDebugInfoInlineTree(FILE* file, InlineContext* context, bool* first);
void genAddPreciseIPMappingHere(const DebugInfo& di);
#endif
void genEnsureCodeEmitted(const DebugInfo& di);
//-------------------------------------------------------------------------
// scope info for the variables
void genSetScopeInfo(unsigned which,
UNATIVE_OFFSET startOffs,
UNATIVE_OFFSET length,
unsigned varNum,
unsigned LVnum,
bool avail,
siVarLoc* varLoc);
void genSetScopeInfo();
#ifdef USING_VARIABLE_LIVE_RANGE
// Send VariableLiveRanges as debug info to the debugger
void genSetScopeInfoUsingVariableRanges();
#endif // USING_VARIABLE_LIVE_RANGE
#ifdef USING_SCOPE_INFO
void genSetScopeInfoUsingsiScope();
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX ScopeInfo XX
XX XX
XX Keeps track of the scopes during code-generation. XX
XX This is used to translate the local-variable debugging information XX
XX from IL offsets to native code offsets. XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
/*****************************************************************************/
/*****************************************************************************
* ScopeInfo
*
* This class is called during code gen at block-boundaries, and when the
* set of live variables changes. It keeps track of the scope of the variables
* in terms of the native code PC.
*/
#endif // USING_SCOPE_INFO
public:
void siInit();
void checkICodeDebugInfo();
// The logic used to report debug info on debug code is the same for ScopeInfo and
// VariableLiveRange
void siBeginBlock(BasicBlock* block);
void siEndBlock(BasicBlock* block);
// VariableLiveRange and siScope needs this method to report variables on debug code
void siOpenScopesForNonTrackedVars(const BasicBlock* block, unsigned int lastBlockILEndOffset);
protected:
#if defined(FEATURE_EH_FUNCLETS)
bool siInFuncletRegion; // Have we seen the start of the funclet region?
#endif // FEATURE_EH_FUNCLETS
IL_OFFSET siLastEndOffs; // IL offset of the (exclusive) end of the last block processed
#ifdef USING_SCOPE_INFO
public:
// Closes the "ScopeInfo" of the tracked variables that has become dead.
virtual void siUpdate();
void siCheckVarScope(unsigned varNum, IL_OFFSET offs);
void siCloseAllOpenScopes();
#ifdef DEBUG
void siDispOpenScopes();
#endif
/**************************************************************************
* PROTECTED
*************************************************************************/
protected:
struct siScope
{
emitLocation scStartLoc; // emitter location of start of scope
emitLocation scEndLoc; // emitter location of end of scope
unsigned scVarNum; // index into lvaTable
unsigned scLVnum; // 'which' in eeGetLVinfo()
unsigned scStackLevel; // Only for stk-vars
siScope* scPrev;
siScope* scNext;
};
// Returns a "siVarLoc" instance representing the place where the variable lives base on
// varDsc and scope description.
CodeGenInterface::siVarLoc getSiVarLoc(const LclVarDsc* varDsc, const siScope* scope) const;
siScope siOpenScopeList, siScopeList, *siOpenScopeLast, *siScopeLast;
unsigned siScopeCnt;
VARSET_TP siLastLife; // Life at last call to siUpdate()
// Tracks the last entry for each tracked register variable
siScope** siLatestTrackedScopes;
// Functions
siScope* siNewScope(unsigned LVnum, unsigned varNum);
void siRemoveFromOpenScopeList(siScope* scope);
void siEndTrackedScope(unsigned varIndex);
void siEndScope(unsigned varNum);
void siEndScope(siScope* scope);
#ifdef DEBUG
bool siVerifyLocalVarTab();
#endif
#ifdef LATE_DISASM
public:
/* virtual */
const char* siRegVarName(size_t offs, size_t size, unsigned reg);
/* virtual */
const char* siStackVarName(size_t offs, size_t size, unsigned reg, unsigned stkOffs);
#endif // LATE_DISASM
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX PrologScopeInfo XX
XX XX
XX We need special handling in the prolog block, as the parameter variables XX
XX may not be in the same position described by genLclVarTable - they all XX
XX start out on the stack XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#endif // USING_SCOPE_INFO
public:
void psiBegProlog();
void psiEndProlog();
#ifdef USING_SCOPE_INFO
void psiAdjustStackLevel(unsigned size);
// For EBP-frames, the parameters are accessed via ESP on entry to the function,
// but via EBP right after a "mov ebp,esp" instruction.
void psiMoveESPtoEBP();
// Close previous psiScope and open a new one on the location described by the registers.
void psiMoveToReg(unsigned varNum, regNumber reg = REG_NA, regNumber otherReg = REG_NA);
// Search the open "psiScope" of the "varNum" parameter, close it and open
// a new one using "LclVarDsc" fields.
void psiMoveToStack(unsigned varNum);
/**************************************************************************
* PROTECTED
*************************************************************************/
protected:
struct psiScope
{
emitLocation scStartLoc; // emitter location of start of scope
emitLocation scEndLoc; // emitter location of end of scope
unsigned scSlotNum; // index into lclVarTab
unsigned scLVnum; // 'which' in eeGetLVinfo()
bool scRegister;
union {
struct
{
regNumberSmall scRegNum;
// Used for:
// - "other half" of long var on architectures with 32 bit size registers - x86.
// - for System V structs it stores the second register
// used to pass a register passed struct.
regNumberSmall scOtherReg;
} u1;
struct
{
regNumberSmall scBaseReg;
NATIVE_OFFSET scOffset;
} u2;
};
psiScope* scPrev;
psiScope* scNext;
// Returns a "siVarLoc" instance representing the place where the variable lives base on
// psiScope properties.
CodeGenInterface::siVarLoc getSiVarLoc() const;
};
psiScope psiOpenScopeList, psiScopeList, *psiOpenScopeLast, *psiScopeLast;
unsigned psiScopeCnt;
// Implementation Functions
psiScope* psiNewPrologScope(unsigned LVnum, unsigned slotNum);
void psiEndPrologScope(psiScope* scope);
void psiSetScopeOffset(psiScope* newScope, const LclVarDsc* lclVarDsc) const;
#endif // USING_SCOPE_INFO
NATIVE_OFFSET psiGetVarStackOffset(const LclVarDsc* lclVarDsc) const;
/*****************************************************************************
* TrnslLocalVarInfo
*
* This struct holds the LocalVarInfo in terms of the generated native code
* after a call to genSetScopeInfo()
*/
protected:
#ifdef DEBUG
struct TrnslLocalVarInfo
{
unsigned tlviVarNum;
unsigned tlviLVnum;
VarName tlviName;
UNATIVE_OFFSET tlviStartPC;
size_t tlviLength;
bool tlviAvailable;
siVarLoc tlviVarLoc;
};
// Array of scopes of LocalVars in terms of native code
TrnslLocalVarInfo* genTrnslLocalVarInfo;
unsigned genTrnslLocalVarCount;
#endif
void genSetRegToConst(regNumber targetReg, var_types targetType, GenTree* tree);
void genCodeForTreeNode(GenTree* treeNode);
void genCodeForBinary(GenTreeOp* treeNode);
#if defined(TARGET_X86)
void genCodeForLongUMod(GenTreeOp* node);
#endif // TARGET_X86
void genCodeForDivMod(GenTreeOp* treeNode);
void genCodeForMul(GenTreeOp* treeNode);
void genCodeForIncSaturate(GenTree* treeNode);
void genCodeForMulHi(GenTreeOp* treeNode);
void genLeaInstruction(GenTreeAddrMode* lea);
void genSetRegToCond(regNumber dstReg, GenTree* tree);
#if defined(TARGET_ARMARCH)
void genScaledAdd(emitAttr attr, regNumber targetReg, regNumber baseReg, regNumber indexReg, int scale);
void genCodeForMulLong(GenTreeOp* mul);
#endif // TARGET_ARMARCH
#if !defined(TARGET_64BIT)
void genLongToIntCast(GenTree* treeNode);
#endif
// Generate code for a GT_BITCAST that is not contained.
void genCodeForBitCast(GenTreeOp* treeNode);
// Generate the instruction to move a value between register files
void genBitCast(var_types targetType, regNumber targetReg, var_types srcType, regNumber srcReg);
struct GenIntCastDesc
{
enum CheckKind
{
CHECK_NONE,
CHECK_SMALL_INT_RANGE,
CHECK_POSITIVE,
#ifdef TARGET_64BIT
CHECK_UINT_RANGE,
CHECK_POSITIVE_INT_RANGE,
CHECK_INT_RANGE,
#endif
};
enum ExtendKind
{
COPY,
ZERO_EXTEND_SMALL_INT,
SIGN_EXTEND_SMALL_INT,
#ifdef TARGET_64BIT
ZERO_EXTEND_INT,
SIGN_EXTEND_INT,
#endif
};
private:
CheckKind m_checkKind;
unsigned m_checkSrcSize;
int m_checkSmallIntMin;
int m_checkSmallIntMax;
ExtendKind m_extendKind;
unsigned m_extendSrcSize;
public:
GenIntCastDesc(GenTreeCast* cast);
CheckKind CheckKind() const
{
return m_checkKind;
}
unsigned CheckSrcSize() const
{
assert(m_checkKind != CHECK_NONE);
return m_checkSrcSize;
}
int CheckSmallIntMin() const
{
assert(m_checkKind == CHECK_SMALL_INT_RANGE);
return m_checkSmallIntMin;
}
int CheckSmallIntMax() const
{
assert(m_checkKind == CHECK_SMALL_INT_RANGE);
return m_checkSmallIntMax;
}
ExtendKind ExtendKind() const
{
return m_extendKind;
}
unsigned ExtendSrcSize() const
{
return m_extendSrcSize;
}
};
void genIntCastOverflowCheck(GenTreeCast* cast, const GenIntCastDesc& desc, regNumber reg);
void genIntToIntCast(GenTreeCast* cast);
void genFloatToFloatCast(GenTree* treeNode);
void genFloatToIntCast(GenTree* treeNode);
void genIntToFloatCast(GenTree* treeNode);
void genCkfinite(GenTree* treeNode);
void genCodeForCompare(GenTreeOp* tree);
void genIntrinsic(GenTree* treeNode);
void genPutArgStk(GenTreePutArgStk* treeNode);
void genPutArgReg(GenTreeOp* tree);
#if FEATURE_ARG_SPLIT
void genPutArgSplit(GenTreePutArgSplit* treeNode);
#endif // FEATURE_ARG_SPLIT
#if defined(TARGET_XARCH)
unsigned getBaseVarForPutArgStk(GenTree* treeNode);
#endif // TARGET_XARCH
unsigned getFirstArgWithStackSlot();
void genCompareFloat(GenTree* treeNode);
void genCompareInt(GenTree* treeNode);
#ifdef FEATURE_SIMD
enum SIMDScalarMoveType{
SMT_ZeroInitUpper, // zero initlaize target upper bits
SMT_ZeroInitUpper_SrcHasUpperZeros, // zero initialize target upper bits; source upper bits are known to be zero
SMT_PreserveUpper // preserve target upper bits
};
#ifdef TARGET_ARM64
insOpts genGetSimdInsOpt(emitAttr size, var_types elementType);
#endif
instruction getOpForSIMDIntrinsic(SIMDIntrinsicID intrinsicId, var_types baseType, unsigned* ival = nullptr);
void genSIMDScalarMove(
var_types targetType, var_types type, regNumber target, regNumber src, SIMDScalarMoveType moveType);
void genSIMDZero(var_types targetType, var_types baseType, regNumber targetReg);
void genSIMDIntrinsicInit(GenTreeSIMD* simdNode);
void genSIMDIntrinsicInitN(GenTreeSIMD* simdNode);
void genSIMDIntrinsicUnOp(GenTreeSIMD* simdNode);
void genSIMDIntrinsicBinOp(GenTreeSIMD* simdNode);
void genSIMDIntrinsicRelOp(GenTreeSIMD* simdNode);
void genSIMDIntrinsicShuffleSSE2(GenTreeSIMD* simdNode);
void genSIMDIntrinsicUpperSave(GenTreeSIMD* simdNode);
void genSIMDIntrinsicUpperRestore(GenTreeSIMD* simdNode);
void genSIMDLo64BitConvert(SIMDIntrinsicID intrinsicID,
var_types simdType,
var_types baseType,
regNumber tmpReg,
regNumber tmpIntReg,
regNumber targetReg);
void genSIMDIntrinsic32BitConvert(GenTreeSIMD* simdNode);
void genSIMDIntrinsic64BitConvert(GenTreeSIMD* simdNode);
void genSIMDExtractUpperHalf(GenTreeSIMD* simdNode, regNumber srcReg, regNumber tgtReg);
void genSIMDIntrinsic(GenTreeSIMD* simdNode);
// TYP_SIMD12 (i.e Vector3 of size 12 bytes) is not a hardware supported size and requires
// two reads/writes on 64-bit targets. These routines abstract reading/writing of Vector3
// values through an indirection. Note that Vector3 locals allocated on stack would have
// their size rounded to TARGET_POINTER_SIZE (which is 8 bytes on 64-bit targets) and hence
// Vector3 locals could be treated as TYP_SIMD16 while reading/writing.
void genStoreIndTypeSIMD12(GenTree* treeNode);
void genLoadIndTypeSIMD12(GenTree* treeNode);
void genStoreLclTypeSIMD12(GenTree* treeNode);
void genLoadLclTypeSIMD12(GenTree* treeNode);
#ifdef TARGET_X86
void genStoreSIMD12ToStack(regNumber operandReg, regNumber tmpReg);
void genPutArgStkSIMD12(GenTree* treeNode);
#endif // TARGET_X86
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
void genHWIntrinsic(GenTreeHWIntrinsic* node);
#if defined(TARGET_XARCH)
void genHWIntrinsic_R_RM(GenTreeHWIntrinsic* node, instruction ins, emitAttr attr, regNumber reg, GenTree* rmOp);
void genHWIntrinsic_R_RM_I(GenTreeHWIntrinsic* node, instruction ins, emitAttr attr, int8_t ival);
void genHWIntrinsic_R_R_RM(GenTreeHWIntrinsic* node, instruction ins, emitAttr attr);
void genHWIntrinsic_R_R_RM(
GenTreeHWIntrinsic* node, instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, GenTree* op2);
void genHWIntrinsic_R_R_RM_I(GenTreeHWIntrinsic* node, instruction ins, emitAttr attr, int8_t ival);
void genHWIntrinsic_R_R_RM_R(GenTreeHWIntrinsic* node, instruction ins, emitAttr attr);
void genHWIntrinsic_R_R_R_RM(
instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, regNumber op2Reg, GenTree* op3);
void genBaseIntrinsic(GenTreeHWIntrinsic* node);
void genX86BaseIntrinsic(GenTreeHWIntrinsic* node);
void genSSEIntrinsic(GenTreeHWIntrinsic* node);
void genSSE2Intrinsic(GenTreeHWIntrinsic* node);
void genSSE41Intrinsic(GenTreeHWIntrinsic* node);
void genSSE42Intrinsic(GenTreeHWIntrinsic* node);
void genAvxOrAvx2Intrinsic(GenTreeHWIntrinsic* node);
void genAESIntrinsic(GenTreeHWIntrinsic* node);
void genBMI1OrBMI2Intrinsic(GenTreeHWIntrinsic* node);
void genFMAIntrinsic(GenTreeHWIntrinsic* node);
void genLZCNTIntrinsic(GenTreeHWIntrinsic* node);
void genPCLMULQDQIntrinsic(GenTreeHWIntrinsic* node);
void genPOPCNTIntrinsic(GenTreeHWIntrinsic* node);
void genXCNTIntrinsic(GenTreeHWIntrinsic* node, instruction ins);
template <typename HWIntrinsicSwitchCaseBody>
void genHWIntrinsicJumpTableFallback(NamedIntrinsic intrinsic,
regNumber nonConstImmReg,
regNumber baseReg,
regNumber offsReg,
HWIntrinsicSwitchCaseBody emitSwCase);
#endif // defined(TARGET_XARCH)
#ifdef TARGET_ARM64
class HWIntrinsicImmOpHelper final
{
public:
HWIntrinsicImmOpHelper(CodeGen* codeGen, GenTree* immOp, GenTreeHWIntrinsic* intrin);
void EmitBegin();
void EmitCaseEnd();
// Returns true after the last call to EmitCaseEnd() (i.e. this signals that code generation is done).
bool Done() const
{
return (immValue > immUpperBound);
}
// Returns a value of the immediate operand that should be used for a case.
int ImmValue() const
{
return immValue;
}
private:
// Returns true if immOp is non contained immediate (i.e. the value of the immediate operand is enregistered in
// nonConstImmReg).
bool NonConstImmOp() const
{
return nonConstImmReg != REG_NA;
}
// Returns true if a non constant immediate operand can be either 0 or 1.
bool TestImmOpZeroOrOne() const
{
assert(NonConstImmOp());
return (immLowerBound == 0) && (immUpperBound == 1);
}
emitter* GetEmitter() const
{
return codeGen->GetEmitter();
}
CodeGen* const codeGen;
BasicBlock* endLabel;
BasicBlock* nonZeroLabel;
int immValue;
int immLowerBound;
int immUpperBound;
regNumber nonConstImmReg;
regNumber branchTargetReg;
};
#endif // TARGET_ARM64
#endif // FEATURE_HW_INTRINSICS
#if !defined(TARGET_64BIT)
// CodeGen for Long Ints
void genStoreLongLclVar(GenTree* treeNode);
#endif // !defined(TARGET_64BIT)
// Do liveness update for register produced by the current node in codegen after
// code has been emitted for it.
void genProduceReg(GenTree* tree);
void genSpillLocal(unsigned varNum, var_types type, GenTreeLclVar* lclNode, regNumber regNum);
void genUnspillLocal(
unsigned varNum, var_types type, GenTreeLclVar* lclNode, regNumber regNum, bool reSpill, bool isLastUse);
void genUnspillRegIfNeeded(GenTree* tree);
void genUnspillRegIfNeeded(GenTree* tree, unsigned multiRegIndex);
regNumber genConsumeReg(GenTree* tree);
regNumber genConsumeReg(GenTree* tree, unsigned multiRegIndex);
void genCopyRegIfNeeded(GenTree* tree, regNumber needReg);
void genConsumeRegAndCopy(GenTree* tree, regNumber needReg);
void genConsumeIfReg(GenTree* tree)
{
if (!tree->isContained())
{
(void)genConsumeReg(tree);
}
}
void genRegCopy(GenTree* tree);
regNumber genRegCopy(GenTree* tree, unsigned multiRegIndex);
void genTransferRegGCState(regNumber dst, regNumber src);
void genConsumeAddress(GenTree* addr);
void genConsumeAddrMode(GenTreeAddrMode* mode);
void genSetBlockSize(GenTreeBlk* blkNode, regNumber sizeReg);
void genConsumeBlockSrc(GenTreeBlk* blkNode);
void genSetBlockSrc(GenTreeBlk* blkNode, regNumber srcReg);
void genConsumeBlockOp(GenTreeBlk* blkNode, regNumber dstReg, regNumber srcReg, regNumber sizeReg);
#ifdef FEATURE_PUT_STRUCT_ARG_STK
void genConsumePutStructArgStk(GenTreePutArgStk* putArgStkNode,
regNumber dstReg,
regNumber srcReg,
regNumber sizeReg);
#endif // FEATURE_PUT_STRUCT_ARG_STK
#if FEATURE_ARG_SPLIT
void genConsumeArgSplitStruct(GenTreePutArgSplit* putArgNode);
#endif // FEATURE_ARG_SPLIT
void genConsumeRegs(GenTree* tree);
void genConsumeOperands(GenTreeOp* tree);
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
void genConsumeMultiOpOperands(GenTreeMultiOp* tree);
#endif
void genEmitGSCookieCheck(bool pushReg);
void genCodeForShift(GenTree* tree);
#if defined(TARGET_X86) || defined(TARGET_ARM)
void genCodeForShiftLong(GenTree* tree);
#endif
#ifdef TARGET_XARCH
void genCodeForShiftRMW(GenTreeStoreInd* storeInd);
void genCodeForBT(GenTreeOp* bt);
#endif // TARGET_XARCH
void genCodeForCast(GenTreeOp* tree);
void genCodeForLclAddr(GenTreeLclVarCommon* lclAddrNode);
void genCodeForIndexAddr(GenTreeIndexAddr* tree);
void genCodeForIndir(GenTreeIndir* tree);
void genCodeForNegNot(GenTree* tree);
void genCodeForBswap(GenTree* tree);
void genCodeForLclVar(GenTreeLclVar* tree);
void genCodeForLclFld(GenTreeLclFld* tree);
void genCodeForStoreLclFld(GenTreeLclFld* tree);
void genCodeForStoreLclVar(GenTreeLclVar* tree);
void genCodeForReturnTrap(GenTreeOp* tree);
void genCodeForStoreInd(GenTreeStoreInd* tree);
void genCodeForSwap(GenTreeOp* tree);
void genCodeForCpObj(GenTreeObj* cpObjNode);
void genCodeForCpBlkRepMovs(GenTreeBlk* cpBlkNode);
void genCodeForCpBlkUnroll(GenTreeBlk* cpBlkNode);
#ifndef TARGET_X86
void genCodeForCpBlkHelper(GenTreeBlk* cpBlkNode);
#endif
void genCodeForPhysReg(GenTreePhysReg* tree);
void genCodeForNullCheck(GenTreeIndir* tree);
void genCodeForCmpXchg(GenTreeCmpXchg* tree);
void genAlignStackBeforeCall(GenTreePutArgStk* putArgStk);
void genAlignStackBeforeCall(GenTreeCall* call);
void genRemoveAlignmentAfterCall(GenTreeCall* call, unsigned bias = 0);
#if defined(UNIX_X86_ABI)
unsigned curNestedAlignment; // Keep track of alignment adjustment required during codegen.
unsigned maxNestedAlignment; // The maximum amount of alignment adjustment required.
void SubtractNestedAlignment(unsigned adjustment)
{
assert(curNestedAlignment >= adjustment);
unsigned newNestedAlignment = curNestedAlignment - adjustment;
if (curNestedAlignment != newNestedAlignment)
{
JITDUMP("Adjusting stack nested alignment from %d to %d\n", curNestedAlignment, newNestedAlignment);
}
curNestedAlignment = newNestedAlignment;
}
void AddNestedAlignment(unsigned adjustment)
{
unsigned newNestedAlignment = curNestedAlignment + adjustment;
if (curNestedAlignment != newNestedAlignment)
{
JITDUMP("Adjusting stack nested alignment from %d to %d\n", curNestedAlignment, newNestedAlignment);
}
curNestedAlignment = newNestedAlignment;
if (curNestedAlignment > maxNestedAlignment)
{
JITDUMP("Max stack nested alignment changed from %d to %d\n", maxNestedAlignment, curNestedAlignment);
maxNestedAlignment = curNestedAlignment;
}
}
#endif
#ifndef TARGET_X86
void genPutArgStkFieldList(GenTreePutArgStk* putArgStk, unsigned outArgVarNum);
#endif // !TARGET_X86
#ifdef FEATURE_PUT_STRUCT_ARG_STK
#ifdef TARGET_X86
bool genAdjustStackForPutArgStk(GenTreePutArgStk* putArgStk);
void genPushReg(var_types type, regNumber srcReg);
void genPutArgStkFieldList(GenTreePutArgStk* putArgStk);
#endif // TARGET_X86
void genPutStructArgStk(GenTreePutArgStk* treeNode);
unsigned genMove8IfNeeded(unsigned size, regNumber tmpReg, GenTree* srcAddr, unsigned offset);
unsigned genMove4IfNeeded(unsigned size, regNumber tmpReg, GenTree* srcAddr, unsigned offset);
unsigned genMove2IfNeeded(unsigned size, regNumber tmpReg, GenTree* srcAddr, unsigned offset);
unsigned genMove1IfNeeded(unsigned size, regNumber tmpReg, GenTree* srcAddr, unsigned offset);
void genCodeForLoadOffset(instruction ins, emitAttr size, regNumber dst, GenTree* base, unsigned offset);
void genStoreRegToStackArg(var_types type, regNumber reg, int offset);
void genStructPutArgRepMovs(GenTreePutArgStk* putArgStkNode);
void genStructPutArgUnroll(GenTreePutArgStk* putArgStkNode);
#ifdef TARGET_X86
void genStructPutArgPush(GenTreePutArgStk* putArgStkNode);
#else
void genStructPutArgPartialRepMovs(GenTreePutArgStk* putArgStkNode);
#endif
#endif // FEATURE_PUT_STRUCT_ARG_STK
void genCodeForStoreBlk(GenTreeBlk* storeBlkNode);
#ifndef TARGET_X86
void genCodeForInitBlkHelper(GenTreeBlk* initBlkNode);
#endif
void genCodeForInitBlkRepStos(GenTreeBlk* initBlkNode);
void genCodeForInitBlkUnroll(GenTreeBlk* initBlkNode);
void genJumpTable(GenTree* tree);
void genTableBasedSwitch(GenTree* tree);
void genCodeForArrIndex(GenTreeArrIndex* treeNode);
void genCodeForArrOffset(GenTreeArrOffs* treeNode);
#if defined(TARGET_LOONGARCH64)
instruction genGetInsForOper(GenTree* treeNode);
#else
instruction genGetInsForOper(genTreeOps oper, var_types type);
#endif
bool genEmitOptimizedGCWriteBarrier(GCInfo::WriteBarrierForm writeBarrierForm, GenTree* addr, GenTree* data);
GenTree* getCallTarget(const GenTreeCall* call, CORINFO_METHOD_HANDLE* methHnd);
regNumber getCallIndirectionCellReg(const GenTreeCall* call);
void genCall(GenTreeCall* call);
void genCallInstruction(GenTreeCall* call X86_ARG(target_ssize_t stackArgBytes));
void genJmpMethod(GenTree* jmp);
BasicBlock* genCallFinally(BasicBlock* block);
void genCodeForJumpTrue(GenTreeOp* jtrue);
#if defined(TARGET_LOONGARCH64)
// TODO: refactor for LA.
void genCodeForJumpCompare(GenTreeOp* tree);
#endif
#if defined(TARGET_ARM64)
void genCodeForJumpCompare(GenTreeOp* tree);
void genCodeForMadd(GenTreeOp* tree);
void genCodeForMsub(GenTreeOp* tree);
void genCodeForBfiz(GenTreeOp* tree);
void genCodeForAddEx(GenTreeOp* tree);
#endif // TARGET_ARM64
#if defined(FEATURE_EH_FUNCLETS)
void genEHCatchRet(BasicBlock* block);
#else // !FEATURE_EH_FUNCLETS
void genEHFinallyOrFilterRet(BasicBlock* block);
#endif // !FEATURE_EH_FUNCLETS
void genMultiRegStoreToSIMDLocal(GenTreeLclVar* lclNode);
void genMultiRegStoreToLocal(GenTreeLclVar* lclNode);
#if defined(TARGET_LOONGARCH64)
void genMultiRegCallStoreToLocal(GenTree* treeNode);
#endif
// Codegen for multi-register struct returns.
bool isStructReturn(GenTree* treeNode);
#ifdef FEATURE_SIMD
void genSIMDSplitReturn(GenTree* src, ReturnTypeDesc* retTypeDesc);
#endif
void genStructReturn(GenTree* treeNode);
#if defined(TARGET_X86) || defined(TARGET_ARM)
void genLongReturn(GenTree* treeNode);
#endif // TARGET_X86 || TARGET_ARM
#if defined(TARGET_X86)
void genFloatReturn(GenTree* treeNode);
#endif // TARGET_X86
#if defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)
void genSimpleReturn(GenTree* treeNode);
#endif // TARGET_ARM64 || TARGET_LOONGARCH64
void genReturn(GenTree* treeNode);
void genStackPointerConstantAdjustment(ssize_t spDelta, regNumber regTmp);
void genStackPointerConstantAdjustmentWithProbe(ssize_t spDelta, regNumber regTmp);
target_ssize_t genStackPointerConstantAdjustmentLoopWithProbe(ssize_t spDelta, regNumber regTmp);
#if defined(TARGET_XARCH)
void genStackPointerDynamicAdjustmentWithProbe(regNumber regSpDelta, regNumber regTmp);
#endif // defined(TARGET_XARCH)
void genLclHeap(GenTree* tree);
bool genIsRegCandidateLocal(GenTree* tree)
{
if (!tree->IsLocal())
{
return false;
}
return compiler->lvaGetDesc(tree->AsLclVarCommon())->lvIsRegCandidate();
}
#ifdef FEATURE_PUT_STRUCT_ARG_STK
#ifdef TARGET_X86
bool m_pushStkArg;
#else // !TARGET_X86
unsigned m_stkArgVarNum;
unsigned m_stkArgOffset;
#endif // !TARGET_X86
#endif // !FEATURE_PUT_STRUCT_ARG_STK
#if defined(DEBUG) && defined(TARGET_XARCH)
void genStackPointerCheck(bool doStackPointerCheck, unsigned lvaStackPointerVar);
#endif // defined(DEBUG) && defined(TARGET_XARCH)
#ifdef DEBUG
GenTree* lastConsumedNode;
void genNumberOperandUse(GenTree* const operand, int& useNum) const;
void genCheckConsumeNode(GenTree* const node);
#else // !DEBUG
inline void genCheckConsumeNode(GenTree* treeNode)
{
}
#endif // DEBUG
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Instruction XX
XX XX
XX The interface to generate a machine-instruction. XX
XX Currently specific to x86 XX
XX TODO-Cleanup: Consider factoring this out of CodeGen XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
void instGen(instruction ins);
void inst_JMP(emitJumpKind jmp, BasicBlock* tgtBlock);
void inst_SET(emitJumpKind condition, regNumber reg);
void inst_RV(instruction ins, regNumber reg, var_types type, emitAttr size = EA_UNKNOWN);
void inst_Mov(var_types dstType,
regNumber dstReg,
regNumber srcReg,
bool canSkip,
emitAttr size = EA_UNKNOWN,
insFlags flags = INS_FLAGS_DONT_CARE);
void inst_Mov_Extend(var_types srcType,
bool srcInReg,
regNumber dstReg,
regNumber srcReg,
bool canSkip,
emitAttr size = EA_UNKNOWN,
insFlags flags = INS_FLAGS_DONT_CARE);
void inst_RV_RV(instruction ins,
regNumber reg1,
regNumber reg2,
var_types type = TYP_I_IMPL,
emitAttr size = EA_UNKNOWN,
insFlags flags = INS_FLAGS_DONT_CARE);
void inst_RV_RV_RV(instruction ins,
regNumber reg1,
regNumber reg2,
regNumber reg3,
emitAttr size,
insFlags flags = INS_FLAGS_DONT_CARE);
void inst_IV(instruction ins, cnsval_ssize_t val);
void inst_IV_handle(instruction ins, cnsval_ssize_t val);
void inst_RV_IV(
instruction ins, regNumber reg, target_ssize_t val, emitAttr size, insFlags flags = INS_FLAGS_DONT_CARE);
void inst_ST_RV(instruction ins, TempDsc* tmp, unsigned ofs, regNumber reg, var_types type);
void inst_FS_ST(instruction ins, emitAttr size, TempDsc* tmp, unsigned ofs);
void inst_TT_RV(instruction ins, emitAttr size, GenTree* tree, regNumber reg);
void inst_RV_SH(instruction ins, emitAttr size, regNumber reg, unsigned val, insFlags flags = INS_FLAGS_DONT_CARE);
#if defined(TARGET_XARCH)
enum class OperandKind{
ClsVar, // [CLS_VAR_ADDR] - "C" in the emitter.
Local, // [Local or spill temp + offset] - "S" in the emitter.
Indir, // [base+index*scale+disp] - "A" in the emitter.
Imm, // immediate - "I" in the emitter.
Reg // reg - "R" in the emitter.
};
class OperandDesc
{
OperandKind m_kind;
union {
struct
{
CORINFO_FIELD_HANDLE m_fieldHnd;
};
struct
{
int m_varNum;
uint16_t m_offset;
};
struct
{
GenTree* m_addr;
GenTreeIndir* m_indir;
var_types m_indirType;
};
struct
{
ssize_t m_immediate;
bool m_immediateNeedsReloc;
};
struct
{
regNumber m_reg;
};
};
public:
OperandDesc(CORINFO_FIELD_HANDLE fieldHnd) : m_kind(OperandKind::ClsVar), m_fieldHnd(fieldHnd)
{
}
OperandDesc(int varNum, uint16_t offset) : m_kind(OperandKind::Local), m_varNum(varNum), m_offset(offset)
{
}
OperandDesc(GenTreeIndir* indir)
: m_kind(OperandKind::Indir), m_addr(indir->Addr()), m_indir(indir), m_indirType(indir->TypeGet())
{
}
OperandDesc(var_types indirType, GenTree* addr)
: m_kind(OperandKind::Indir), m_addr(addr), m_indir(nullptr), m_indirType(indirType)
{
}
OperandDesc(ssize_t immediate, bool immediateNeedsReloc)
: m_kind(OperandKind::Imm), m_immediate(immediate), m_immediateNeedsReloc(immediateNeedsReloc)
{
}
OperandDesc(regNumber reg) : m_kind(OperandKind::Reg), m_reg(reg)
{
}
OperandKind GetKind() const
{
return m_kind;
}
CORINFO_FIELD_HANDLE GetFieldHnd() const
{
assert(m_kind == OperandKind::ClsVar);
return m_fieldHnd;
}
int GetVarNum() const
{
assert(m_kind == OperandKind::Local);
return m_varNum;
}
int GetLclOffset() const
{
assert(m_kind == OperandKind::Local);
return m_offset;
}
// TODO-Cleanup: instead of this rather unsightly workaround with
// "indirForm", create a new abstraction for address modes to pass
// to the emitter (or at least just use "addr"...).
GenTreeIndir* GetIndirForm(GenTreeIndir* pIndirForm)
{
if (m_indir == nullptr)
{
GenTreeIndir indirForm = CodeGen::indirForm(m_indirType, m_addr);
memcpy(pIndirForm, &indirForm, sizeof(GenTreeIndir));
}
else
{
pIndirForm = m_indir;
}
return pIndirForm;
}
ssize_t GetImmediate() const
{
assert(m_kind == OperandKind::Imm);
return m_immediate;
}
emitAttr GetEmitAttrForImmediate(emitAttr baseAttr) const
{
assert(m_kind == OperandKind::Imm);
return m_immediateNeedsReloc ? EA_SET_FLG(baseAttr, EA_CNS_RELOC_FLG) : baseAttr;
}
regNumber GetReg() const
{
return m_reg;
}
bool IsContained() const
{
return m_kind != OperandKind::Reg;
}
};
OperandDesc genOperandDesc(GenTree* op);
void inst_TT(instruction ins, emitAttr size, GenTree* op1);
void inst_RV_TT(instruction ins, emitAttr size, regNumber op1Reg, GenTree* op2);
void inst_RV_RV_IV(instruction ins, emitAttr size, regNumber reg1, regNumber reg2, unsigned ival);
void inst_RV_TT_IV(instruction ins, emitAttr attr, regNumber reg1, GenTree* rmOp, int ival);
void inst_RV_RV_TT(instruction ins, emitAttr size, regNumber targetReg, regNumber op1Reg, GenTree* op2, bool isRMW);
#endif
void inst_set_SV_var(GenTree* tree);
#ifdef TARGET_ARM
bool arm_Valid_Imm_For_Instr(instruction ins, target_ssize_t imm, insFlags flags);
bool arm_Valid_Imm_For_Add(target_ssize_t imm, insFlags flag);
bool arm_Valid_Imm_For_Add_SP(target_ssize_t imm);
#endif
instruction ins_Move_Extend(var_types srcType, bool srcInReg);
instruction ins_Copy(var_types dstType);
instruction ins_Copy(regNumber srcReg, var_types dstType);
instruction ins_FloatConv(var_types to, var_types from);
instruction ins_MathOp(genTreeOps oper, var_types type);
void instGen_Return(unsigned stkArgSize);
enum BarrierKind
{
BARRIER_FULL, // full barrier
BARRIER_LOAD_ONLY, // load barier
};
void instGen_MemoryBarrier(BarrierKind barrierKind = BARRIER_FULL);
void instGen_Set_Reg_To_Zero(emitAttr size, regNumber reg, insFlags flags = INS_FLAGS_DONT_CARE);
void instGen_Set_Reg_To_Imm(emitAttr size,
regNumber reg,
ssize_t imm,
insFlags flags = INS_FLAGS_DONT_CARE DEBUGARG(size_t targetHandle = 0)
DEBUGARG(GenTreeFlags gtFlags = GTF_EMPTY));
#ifdef TARGET_XARCH
instruction genMapShiftInsToShiftByConstantIns(instruction ins, int shiftByValue);
#endif // TARGET_XARCH
#ifndef TARGET_LOONGARCH64
// Maps a GenCondition code to a sequence of conditional jumps or other conditional instructions
// such as X86's SETcc. A sequence of instructions rather than just a single one is required for
// certain floating point conditions.
// For example, X86's UCOMISS sets ZF to indicate equality but it also sets it, together with PF,
// to indicate an unordered result. So for GenCondition::FEQ we first need to check if PF is 0
// and then jump if ZF is 1:
// JP fallThroughBlock
// JE jumpDestBlock
// fallThroughBlock:
// ...
// jumpDestBlock:
//
// This is very similar to the way shortcircuit evaluation of bool AND and OR operators works so
// in order to make the GenConditionDesc mapping tables easier to read, a bool expression-like
// pattern is used to encode the above:
// { EJ_jnp, GT_AND, EJ_je }
// { EJ_jp, GT_OR, EJ_jne }
//
// For more details check inst_JCC and inst_SETCC functions.
//
struct GenConditionDesc
{
emitJumpKind jumpKind1;
genTreeOps oper;
emitJumpKind jumpKind2;
char padTo4Bytes;
static const GenConditionDesc& Get(GenCondition condition)
{
assert(condition.GetCode() < ArrLen(map));
const GenConditionDesc& desc = map[condition.GetCode()];
assert(desc.jumpKind1 != EJ_NONE);
assert((desc.oper == GT_NONE) || (desc.oper == GT_AND) || (desc.oper == GT_OR));
assert((desc.oper == GT_NONE) == (desc.jumpKind2 == EJ_NONE));
return desc;
}
private:
static const GenConditionDesc map[32];
};
void inst_JCC(GenCondition condition, BasicBlock* target);
void inst_SETCC(GenCondition condition, var_types type, regNumber dstReg);
void genCodeForJcc(GenTreeCC* tree);
void genCodeForSetcc(GenTreeCC* setcc);
#endif // !TARGET_LOONGARCH64
};
// A simple phase that just invokes a method on the codegen instance
//
class CodeGenPhase final : public Phase
{
public:
CodeGenPhase(CodeGen* _codeGen, Phases _phase, void (CodeGen::*_action)())
: Phase(_codeGen->GetCompiler(), _phase), codeGen(_codeGen), action(_action)
{
}
protected:
virtual PhaseStatus DoPhase() override
{
(codeGen->*action)();
return PhaseStatus::MODIFIED_EVERYTHING;
}
private:
CodeGen* codeGen;
void (CodeGen::*action)();
};
// Wrapper for using CodeGenPhase
//
inline void DoPhase(CodeGen* _codeGen, Phases _phase, void (CodeGen::*_action)())
{
CodeGenPhase phase(_codeGen, _phase, _action);
phase.Run();
}
#endif // _CODEGEN_H_
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//
// This class contains all the data & functionality for code generation
// of a method, except for the target-specific elements, which are
// primarily in the Target class.
//
#ifndef _CODEGEN_H_
#define _CODEGEN_H_
#include "codegeninterface.h"
#include "compiler.h" // temporary??
#include "regset.h"
#include "jitgcinfo.h"
class CodeGen final : public CodeGenInterface
{
friend class emitter;
friend class DisAssembler;
public:
// This could use further abstraction
CodeGen(Compiler* theCompiler);
virtual void genGenerateCode(void** codePtr, uint32_t* nativeSizeOfCode);
void genGenerateMachineCode();
void genEmitMachineCode();
void genEmitUnwindDebugGCandEH();
// TODO-Cleanup: Abstract out the part of this that finds the addressing mode, and
// move it to Lower
virtual bool genCreateAddrMode(
GenTree* addr, bool fold, bool* revPtr, GenTree** rv1Ptr, GenTree** rv2Ptr, unsigned* mulPtr, ssize_t* cnsPtr);
private:
#if defined(TARGET_XARCH)
// Bit masks used in negating a float or double number.
// This is to avoid creating more than one data constant for these bitmasks when a
// method has more than one GT_NEG operation on floating point values.
CORINFO_FIELD_HANDLE negBitmaskFlt;
CORINFO_FIELD_HANDLE negBitmaskDbl;
// Bit masks used in computing Math.Abs() of a float or double number.
CORINFO_FIELD_HANDLE absBitmaskFlt;
CORINFO_FIELD_HANDLE absBitmaskDbl;
// Bit mask used in U8 -> double conversion to adjust the result.
CORINFO_FIELD_HANDLE u8ToDblBitmask;
// Generates SSE2 code for the given tree as "Operand BitWiseOp BitMask"
void genSSE2BitwiseOp(GenTree* treeNode);
// Generates SSE41 code for the given tree as a round operation
void genSSE41RoundOp(GenTreeOp* treeNode);
instruction simdAlignedMovIns()
{
// We use movaps when non-VEX because it is a smaller instruction;
// however the VEX version vmovaps would be used which is the same size as vmovdqa;
// also vmovdqa has more available CPU ports on older processors so we switch to that
return compiler->canUseVexEncoding() ? INS_movdqa : INS_movaps;
}
instruction simdUnalignedMovIns()
{
// We use movups when non-VEX because it is a smaller instruction;
// however the VEX version vmovups would be used which is the same size as vmovdqu;
// but vmovdqu has more available CPU ports on older processors so we switch to that
return compiler->canUseVexEncoding() ? INS_movdqu : INS_movups;
}
#endif // defined(TARGET_XARCH)
void genPrepForCompiler();
void genMarkLabelsForCodegen();
inline RegState* regStateForType(var_types t)
{
return varTypeUsesFloatReg(t) ? &floatRegState : &intRegState;
}
inline RegState* regStateForReg(regNumber reg)
{
return genIsValidFloatReg(reg) ? &floatRegState : &intRegState;
}
regNumber genFramePointerReg()
{
if (isFramePointerUsed())
{
return REG_FPBASE;
}
else
{
return REG_SPBASE;
}
}
static bool genShouldRoundFP();
static GenTreeIndir indirForm(var_types type, GenTree* base);
static GenTreeStoreInd storeIndirForm(var_types type, GenTree* base, GenTree* data);
GenTreeIntCon intForm(var_types type, ssize_t value);
void genRangeCheck(GenTree* node);
void genLockedInstructions(GenTreeOp* node);
#ifdef TARGET_XARCH
void genCodeForLockAdd(GenTreeOp* node);
#endif
#ifdef REG_OPT_RSVD
// On some targets such as the ARM we may need to have an extra reserved register
// that is used when addressing stack based locals and stack based temps.
// This method returns the regNumber that should be used when an extra register
// is needed to access the stack based locals and stack based temps.
//
regNumber rsGetRsvdReg()
{
// We should have already added this register to the mask
// of reserved registers in regSet.rdMaskResvd
noway_assert((regSet.rsMaskResvd & RBM_OPT_RSVD) != 0);
return REG_OPT_RSVD;
}
#endif // REG_OPT_RSVD
//-------------------------------------------------------------------------
bool genUseBlockInit; // true if we plan to block-initialize the local stack frame
unsigned genInitStkLclCnt; // The count of local variables that we need to zero init
void SubtractStackLevel(unsigned adjustment)
{
assert(genStackLevel >= adjustment);
unsigned newStackLevel = genStackLevel - adjustment;
if (genStackLevel != newStackLevel)
{
JITDUMP("Adjusting stack level from %d to %d\n", genStackLevel, newStackLevel);
}
genStackLevel = newStackLevel;
}
void AddStackLevel(unsigned adjustment)
{
unsigned newStackLevel = genStackLevel + adjustment;
if (genStackLevel != newStackLevel)
{
JITDUMP("Adjusting stack level from %d to %d\n", genStackLevel, newStackLevel);
}
genStackLevel = newStackLevel;
}
void SetStackLevel(unsigned newStackLevel)
{
if (genStackLevel != newStackLevel)
{
JITDUMP("Setting stack level from %d to %d\n", genStackLevel, newStackLevel);
}
genStackLevel = newStackLevel;
}
//-------------------------------------------------------------------------
void genReportEH();
// Allocates storage for the GC info, writes the GC info into that storage, records the address of the
// GC info of the method with the EE, and returns a pointer to the "info" portion (just post-header) of
// the GC info. Requires "codeSize" to be the size of the generated code, "prologSize" and "epilogSize"
// to be the sizes of the prolog and epilog, respectively. In DEBUG, makes a check involving the
// "codePtr", assumed to be a pointer to the start of the generated code.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef JIT32_GCENCODER
void* genCreateAndStoreGCInfo(unsigned codeSize, unsigned prologSize, unsigned epilogSize DEBUGARG(void* codePtr));
void* genCreateAndStoreGCInfoJIT32(unsigned codeSize,
unsigned prologSize,
unsigned epilogSize DEBUGARG(void* codePtr));
#else // !JIT32_GCENCODER
void genCreateAndStoreGCInfo(unsigned codeSize, unsigned prologSize, unsigned epilogSize DEBUGARG(void* codePtr));
void genCreateAndStoreGCInfoX64(unsigned codeSize, unsigned prologSize DEBUGARG(void* codePtr));
#endif // !JIT32_GCENCODER
/**************************************************************************
* PROTECTED
*************************************************************************/
protected:
// the current (pending) label ref, a label which has been referenced but not yet seen
BasicBlock* genPendingCallLabel;
void** codePtr;
uint32_t* nativeSizeOfCode;
unsigned codeSize;
void* coldCodePtr;
void* consPtr;
#ifdef DEBUG
// Last instr we have displayed for dspInstrs
unsigned genCurDispOffset;
static const char* genInsName(instruction ins);
const char* genInsDisplayName(emitter::instrDesc* id);
static const char* genSizeStr(emitAttr size);
#endif // DEBUG
void genInitialize();
void genInitializeRegisterState();
void genCodeForBBlist();
public:
void genSpillVar(GenTree* tree);
protected:
void genEmitHelperCall(unsigned helper, int argSize, emitAttr retSize, regNumber callTarget = REG_NA);
void genGCWriteBarrier(GenTree* tgt, GCInfo::WriteBarrierForm wbf);
BasicBlock* genCreateTempLabel();
private:
void genLogLabel(BasicBlock* bb);
protected:
void genDefineTempLabel(BasicBlock* label);
void genDefineInlineTempLabel(BasicBlock* label);
void genAdjustStackLevel(BasicBlock* block);
void genExitCode(BasicBlock* block);
void genJumpToThrowHlpBlk(emitJumpKind jumpKind, SpecialCodeKind codeKind, BasicBlock* failBlk = nullptr);
#ifdef TARGET_LOONGARCH64
void genSetRegToIcon(regNumber reg, ssize_t val, var_types type);
void genJumpToThrowHlpBlk_la(SpecialCodeKind codeKind,
instruction ins,
regNumber reg1,
BasicBlock* failBlk = nullptr,
regNumber reg2 = REG_R0);
#else
void genCheckOverflow(GenTree* tree);
#endif
//-------------------------------------------------------------------------
//
// Prolog/epilog generation
//
//-------------------------------------------------------------------------
unsigned prologSize;
unsigned epilogSize;
//
// Prolog functions and data (there are a few exceptions for more generally used things)
//
void genEstablishFramePointer(int delta, bool reportUnwindData);
#if defined(TARGET_LOONGARCH64)
void genFnPrologCalleeRegArgs();
#else
void genFnPrologCalleeRegArgs(regNumber xtraReg, bool* pXtraRegClobbered, RegState* regState);
#endif
void genEnregisterIncomingStackArgs();
#if defined(TARGET_ARM64)
void genEnregisterOSRArgsAndLocals(regNumber initReg, bool* pInitRegZeroed);
#else
void genEnregisterOSRArgsAndLocals();
#endif
void genCheckUseBlockInit();
#if defined(UNIX_AMD64_ABI) && defined(FEATURE_SIMD)
void genClearStackVec3ArgUpperBits();
#endif // UNIX_AMD64_ABI && FEATURE_SIMD
#if defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)
bool genInstrWithConstant(instruction ins,
emitAttr attr,
regNumber reg1,
regNumber reg2,
ssize_t imm,
regNumber tmpReg,
bool inUnwindRegion = false);
void genStackPointerAdjustment(ssize_t spAdjustment, regNumber tmpReg, bool* pTmpRegIsZero, bool reportUnwindData);
void genPrologSaveRegPair(regNumber reg1,
regNumber reg2,
int spOffset,
int spDelta,
bool useSaveNextPair,
regNumber tmpReg,
bool* pTmpRegIsZero);
void genPrologSaveReg(regNumber reg1, int spOffset, int spDelta, regNumber tmpReg, bool* pTmpRegIsZero);
void genEpilogRestoreRegPair(regNumber reg1,
regNumber reg2,
int spOffset,
int spDelta,
bool useSaveNextPair,
regNumber tmpReg,
bool* pTmpRegIsZero);
void genEpilogRestoreReg(regNumber reg1, int spOffset, int spDelta, regNumber tmpReg, bool* pTmpRegIsZero);
// A simple struct to keep register pairs for prolog and epilog.
struct RegPair
{
regNumber reg1;
regNumber reg2;
bool useSaveNextPair;
RegPair(regNumber reg1) : reg1(reg1), reg2(REG_NA), useSaveNextPair(false)
{
}
RegPair(regNumber reg1, regNumber reg2) : reg1(reg1), reg2(reg2), useSaveNextPair(false)
{
assert(reg2 == REG_NEXT(reg1));
}
};
static void genBuildRegPairsStack(regMaskTP regsMask, ArrayStack<RegPair>* regStack);
static void genSetUseSaveNextPairs(ArrayStack<RegPair>* regStack);
static int genGetSlotSizeForRegsInMask(regMaskTP regsMask);
void genSaveCalleeSavedRegisterGroup(regMaskTP regsMask, int spDelta, int spOffset);
void genRestoreCalleeSavedRegisterGroup(regMaskTP regsMask, int spDelta, int spOffset);
void genSaveCalleeSavedRegistersHelp(regMaskTP regsToSaveMask, int lowestCalleeSavedOffset, int spDelta);
void genRestoreCalleeSavedRegistersHelp(regMaskTP regsToRestoreMask, int lowestCalleeSavedOffset, int spDelta);
void genPushCalleeSavedRegisters(regNumber initReg, bool* pInitRegZeroed);
#else
void genPushCalleeSavedRegisters();
#endif
#if defined(TARGET_AMD64)
void genOSRRecordTier0CalleeSavedRegistersAndFrame();
void genOSRSaveRemainingCalleeSavedRegisters();
#endif // TARGET_AMD64
void genAllocLclFrame(unsigned frameSize, regNumber initReg, bool* pInitRegZeroed, regMaskTP maskArgRegsLiveIn);
void genPoisonFrame(regMaskTP bbRegLiveIn);
#if defined(TARGET_ARM)
bool genInstrWithConstant(
instruction ins, emitAttr attr, regNumber reg1, regNumber reg2, ssize_t imm, insFlags flags, regNumber tmpReg);
bool genStackPointerAdjustment(ssize_t spAdjustment, regNumber tmpReg);
void genPushFltRegs(regMaskTP regMask);
void genPopFltRegs(regMaskTP regMask);
regMaskTP genStackAllocRegisterMask(unsigned frameSize, regMaskTP maskCalleeSavedFloat);
regMaskTP genJmpCallArgMask();
void genFreeLclFrame(unsigned frameSize,
/* IN OUT */ bool* pUnwindStarted);
void genMov32RelocatableDisplacement(BasicBlock* block, regNumber reg);
void genMov32RelocatableDataLabel(unsigned value, regNumber reg);
void genMov32RelocatableImmediate(emitAttr size, BYTE* addr, regNumber reg);
bool genUsedPopToReturn; // True if we use the pop into PC to return,
// False if we didn't and must branch to LR to return.
// A set of information that is used by funclet prolog and epilog generation. It is collected once, before
// funclet prologs and epilogs are generated, and used by all funclet prologs and epilogs, which must all be the
// same.
struct FuncletFrameInfoDsc
{
regMaskTP fiSaveRegs; // Set of registers saved in the funclet prolog (includes LR)
unsigned fiFunctionCallerSPtoFPdelta; // Delta between caller SP and the frame pointer
unsigned fiSpDelta; // Stack pointer delta
unsigned fiPSP_slot_SP_offset; // PSP slot offset from SP
int fiPSP_slot_CallerSP_offset; // PSP slot offset from Caller SP
};
FuncletFrameInfoDsc genFuncletInfo;
#elif defined(TARGET_ARM64)
// A set of information that is used by funclet prolog and epilog generation. It is collected once, before
// funclet prologs and epilogs are generated, and used by all funclet prologs and epilogs, which must all be the
// same.
struct FuncletFrameInfoDsc
{
regMaskTP fiSaveRegs; // Set of callee-saved registers saved in the funclet prolog (includes LR)
int fiFunction_CallerSP_to_FP_delta; // Delta between caller SP and the frame pointer in the parent function
// (negative)
int fiSP_to_FPLR_save_delta; // FP/LR register save offset from SP (positive)
int fiSP_to_PSP_slot_delta; // PSP slot offset from SP (positive)
int fiSP_to_CalleeSave_delta; // First callee-saved register slot offset from SP (positive)
int fiCallerSP_to_PSP_slot_delta; // PSP slot offset from Caller SP (negative)
int fiFrameType; // Funclet frame types are numbered. See genFuncletProlog() for details.
int fiSpDelta1; // Stack pointer delta 1 (negative)
int fiSpDelta2; // Stack pointer delta 2 (negative)
};
FuncletFrameInfoDsc genFuncletInfo;
#elif defined(TARGET_AMD64)
// A set of information that is used by funclet prolog and epilog generation. It is collected once, before
// funclet prologs and epilogs are generated, and used by all funclet prologs and epilogs, which must all be the
// same.
struct FuncletFrameInfoDsc
{
unsigned fiFunction_InitialSP_to_FP_delta; // Delta between Initial-SP and the frame pointer
unsigned fiSpDelta; // Stack pointer delta
int fiPSP_slot_InitialSP_offset; // PSP slot offset from Initial-SP
};
FuncletFrameInfoDsc genFuncletInfo;
#elif defined(TARGET_LOONGARCH64)
// A set of information that is used by funclet prolog and epilog generation.
// It is collected once, before funclet prologs and epilogs are generated,
// and used by all funclet prologs and epilogs, which must all be the same.
struct FuncletFrameInfoDsc
{
regMaskTP fiSaveRegs; // Set of callee-saved registers saved in the funclet prolog (includes RA)
int fiFunction_CallerSP_to_FP_delta; // Delta between caller SP and the frame pointer in the parent function
// (negative)
int fiSP_to_FPRA_save_delta; // FP/RA register save offset from SP (positive)
int fiSP_to_PSP_slot_delta; // PSP slot offset from SP (positive)
int fiCallerSP_to_PSP_slot_delta; // PSP slot offset from Caller SP (negative)
int fiFrameType; // Funclet frame types are numbered. See genFuncletProlog() for details.
int fiSpDelta1; // Stack pointer delta 1 (negative)
};
FuncletFrameInfoDsc genFuncletInfo;
#endif // TARGET_LOONGARCH64
#if defined(TARGET_XARCH)
// Save/Restore callee saved float regs to stack
void genPreserveCalleeSavedFltRegs(unsigned lclFrameSize);
void genRestoreCalleeSavedFltRegs(unsigned lclFrameSize);
// Generate VZeroupper instruction to avoid AVX/SSE transition penalty
void genVzeroupperIfNeeded(bool check256bitOnly = true);
#endif // TARGET_XARCH
void genZeroInitFltRegs(const regMaskTP& initFltRegs, const regMaskTP& initDblRegs, const regNumber& initReg);
regNumber genGetZeroReg(regNumber initReg, bool* pInitRegZeroed);
void genZeroInitFrame(int untrLclHi, int untrLclLo, regNumber initReg, bool* pInitRegZeroed);
void genZeroInitFrameUsingBlockInit(int untrLclHi, int untrLclLo, regNumber initReg, bool* pInitRegZeroed);
void genReportGenericContextArg(regNumber initReg, bool* pInitRegZeroed);
void genSetGSSecurityCookie(regNumber initReg, bool* pInitRegZeroed);
void genFinalizeFrame();
#ifdef PROFILING_SUPPORTED
void genProfilingEnterCallback(regNumber initReg, bool* pInitRegZeroed);
void genProfilingLeaveCallback(unsigned helper);
#endif // PROFILING_SUPPORTED
// clang-format off
void genEmitCall(int callType,
CORINFO_METHOD_HANDLE methHnd,
INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo)
void* addr
X86_ARG(int argSize),
emitAttr retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize),
const DebugInfo& di,
regNumber base,
bool isJump);
// clang-format on
// clang-format off
void genEmitCallIndir(int callType,
CORINFO_METHOD_HANDLE methHnd,
INDEBUG_LDISASM_COMMA(CORINFO_SIG_INFO* sigInfo)
GenTreeIndir* indir
X86_ARG(int argSize),
emitAttr retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(emitAttr secondRetSize),
const DebugInfo& di,
bool isJump);
// clang-format on
//
// Epilog functions
//
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_ARM)
bool genCanUsePopToReturn(regMaskTP maskPopRegsInt, bool jmpEpilog);
#endif
#if defined(TARGET_ARM64)
void genPopCalleeSavedRegistersAndFreeLclFrame(bool jmpEpilog);
#else // !defined(TARGET_ARM64)
void genPopCalleeSavedRegisters(bool jmpEpilog = false);
#if defined(TARGET_XARCH)
unsigned genPopCalleeSavedRegistersFromMask(regMaskTP rsPopRegs);
#endif // !defined(TARGET_XARCH)
#endif // !defined(TARGET_ARM64)
//
// Common or driving functions
//
void genReserveProlog(BasicBlock* block); // currently unused
void genReserveEpilog(BasicBlock* block);
void genFnProlog();
void genFnEpilog(BasicBlock* block);
#if defined(FEATURE_EH_FUNCLETS)
void genReserveFuncletProlog(BasicBlock* block);
void genReserveFuncletEpilog(BasicBlock* block);
void genFuncletProlog(BasicBlock* block);
void genFuncletEpilog();
void genCaptureFuncletPrologEpilogInfo();
/*-----------------------------------------------------------------------------
*
* Set the main function PSPSym value in the frame.
* Funclets use different code to load the PSP sym and save it in their frame.
* See the document "CLR ABI.md" for a full description of the PSPSym.
* The PSPSym section of that document is copied here.
*
***********************************
* The name PSPSym stands for Previous Stack Pointer Symbol. It is how a funclet
* accesses locals from the main function body.
*
* First, two definitions.
*
* Caller-SP is the value of the stack pointer in a function's caller before the call
* instruction is executed. That is, when function A calls function B, Caller-SP for B
* is the value of the stack pointer immediately before the call instruction in A
* (calling B) was executed. Note that this definition holds for both AMD64, which
* pushes the return value when a call instruction is executed, and for ARM, which
* doesn't. For AMD64, Caller-SP is the address above the call return address.
*
* Initial-SP is the initial value of the stack pointer after the fixed-size portion of
* the frame has been allocated. That is, before any "alloca"-type allocations.
*
* The PSPSym is a pointer-sized local variable in the frame of the main function and
* of each funclet. The value stored in PSPSym is the value of Initial-SP/Caller-SP
* for the main function. The stack offset of the PSPSym is reported to the VM in the
* GC information header. The value reported in the GC information is the offset of the
* PSPSym from Initial-SP/Caller-SP. (Note that both the value stored, and the way the
* value is reported to the VM, differs between architectures. In particular, note that
* most things in the GC information header are reported as offsets relative to Caller-SP,
* but PSPSym on AMD64 is one (maybe the only) exception.)
*
* The VM uses the PSPSym to find other locals it cares about (such as the generics context
* in a funclet frame). The JIT uses it to re-establish the frame pointer register, so that
* the frame pointer is the same value in a funclet as it is in the main function body.
*
* When a funclet is called, it is passed the Establisher Frame Pointer. For AMD64 this is
* true for all funclets and it is passed as the first argument in RCX, but for ARM this is
* only true for first pass funclets (currently just filters) and it is passed as the second
* argument in R1. The Establisher Frame Pointer is a stack pointer of an interesting "parent"
* frame in the exception processing system. For the CLR, it points either to the main function
* frame or a dynamically enclosing funclet frame from the same function, for the funclet being
* invoked. The value of the Establisher Frame Pointer is Initial-SP on AMD64, Caller-SP on ARM.
*
* Using the establisher frame, the funclet wants to load the value of the PSPSym. Since we
* don't know if the Establisher Frame is from the main function or a funclet, we design the
* main function and funclet frame layouts to place the PSPSym at an identical, small, constant
* offset from the Establisher Frame in each case. (This is also required because we only report
* a single offset to the PSPSym in the GC information, and that offset must be valid for the main
* function and all of its funclets). Then, the funclet uses this known offset to compute the
* PSPSym address and read its value. From this, it can compute the value of the frame pointer
* (which is a constant offset from the PSPSym value) and set the frame register to be the same
* as the parent function. Also, the funclet writes the value of the PSPSym to its own frame's
* PSPSym. This "copying" of the PSPSym happens for every funclet invocation, in particular,
* for every nested funclet invocation.
*
* On ARM, for all second pass funclets (finally, fault, catch, and filter-handler) the VM
* restores all non-volatile registers to their values within the parent frame. This includes
* the frame register (R11). Thus, the PSPSym is not used to recompute the frame pointer register
* in this case, though the PSPSym is copied to the funclet's frame, as for all funclets.
*
* Catch, Filter, and Filter-handlers also get an Exception object (GC ref) as an argument
* (REG_EXCEPTION_OBJECT). On AMD64 it is the second argument and thus passed in RDX. On
* ARM this is the first argument and passed in R0.
*
* (Note that the JIT64 source code contains a comment that says, "The current CLR doesn't always
* pass the correct establisher frame to the funclet. Funclet may receive establisher frame of
* funclet when expecting that of original routine." It indicates this is the reason that a PSPSym
* is required in all funclets as well as the main function, whereas if the establisher frame was
* correctly reported, the PSPSym could be omitted in some cases.)
***********************************
*/
void genSetPSPSym(regNumber initReg, bool* pInitRegZeroed);
void genUpdateCurrentFunclet(BasicBlock* block);
#if defined(TARGET_ARM)
void genInsertNopForUnwinder(BasicBlock* block);
#endif
#else // !FEATURE_EH_FUNCLETS
// This is a no-op when there are no funclets!
void genUpdateCurrentFunclet(BasicBlock* block)
{
return;
}
#endif // !FEATURE_EH_FUNCLETS
void genGeneratePrologsAndEpilogs();
#if defined(DEBUG) && defined(TARGET_ARM64)
void genArm64EmitterUnitTests();
#endif
#if defined(DEBUG) && defined(TARGET_LOONGARCH64)
void genLoongArch64EmitterUnitTests();
#endif
#if defined(DEBUG) && defined(LATE_DISASM) && defined(TARGET_AMD64)
void genAmd64EmitterUnitTests();
#endif
#ifdef TARGET_ARM64
virtual void SetSaveFpLrWithAllCalleeSavedRegisters(bool value);
virtual bool IsSaveFpLrWithAllCalleeSavedRegisters() const;
bool genSaveFpLrWithAllCalleeSavedRegisters;
#endif // TARGET_ARM64
//-------------------------------------------------------------------------
//
// End prolog/epilog generation
//
//-------------------------------------------------------------------------
void genSinglePush();
void genSinglePop();
regMaskTP genPushRegs(regMaskTP regs, regMaskTP* byrefRegs, regMaskTP* noRefRegs);
void genPopRegs(regMaskTP regs, regMaskTP byrefRegs, regMaskTP noRefRegs);
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Debugging Support XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#ifdef DEBUG
void genIPmappingDisp(unsigned mappingNum, IPmappingDsc* ipMapping);
void genIPmappingListDisp();
#endif // DEBUG
void genIPmappingAdd(IPmappingDscKind kind, const DebugInfo& di, bool isLabel);
void genIPmappingAddToFront(IPmappingDscKind kind, const DebugInfo& di, bool isLabel);
void genIPmappingGen();
#ifdef DEBUG
void genDumpPreciseDebugInfo();
void genDumpPreciseDebugInfoInlineTree(FILE* file, InlineContext* context, bool* first);
void genAddPreciseIPMappingHere(const DebugInfo& di);
#endif
void genEnsureCodeEmitted(const DebugInfo& di);
//-------------------------------------------------------------------------
// scope info for the variables
void genSetScopeInfo(unsigned which,
UNATIVE_OFFSET startOffs,
UNATIVE_OFFSET length,
unsigned varNum,
unsigned LVnum,
bool avail,
siVarLoc* varLoc);
void genSetScopeInfo();
#ifdef USING_VARIABLE_LIVE_RANGE
// Send VariableLiveRanges as debug info to the debugger
void genSetScopeInfoUsingVariableRanges();
#endif // USING_VARIABLE_LIVE_RANGE
#ifdef USING_SCOPE_INFO
void genSetScopeInfoUsingsiScope();
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX ScopeInfo XX
XX XX
XX Keeps track of the scopes during code-generation. XX
XX This is used to translate the local-variable debugging information XX
XX from IL offsets to native code offsets. XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
/*****************************************************************************/
/*****************************************************************************
* ScopeInfo
*
* This class is called during code gen at block-boundaries, and when the
* set of live variables changes. It keeps track of the scope of the variables
* in terms of the native code PC.
*/
#endif // USING_SCOPE_INFO
public:
void siInit();
void checkICodeDebugInfo();
// The logic used to report debug info on debug code is the same for ScopeInfo and
// VariableLiveRange
void siBeginBlock(BasicBlock* block);
void siEndBlock(BasicBlock* block);
// VariableLiveRange and siScope needs this method to report variables on debug code
void siOpenScopesForNonTrackedVars(const BasicBlock* block, unsigned int lastBlockILEndOffset);
protected:
#if defined(FEATURE_EH_FUNCLETS)
bool siInFuncletRegion; // Have we seen the start of the funclet region?
#endif // FEATURE_EH_FUNCLETS
IL_OFFSET siLastEndOffs; // IL offset of the (exclusive) end of the last block processed
#ifdef USING_SCOPE_INFO
public:
// Closes the "ScopeInfo" of the tracked variables that has become dead.
virtual void siUpdate();
void siCheckVarScope(unsigned varNum, IL_OFFSET offs);
void siCloseAllOpenScopes();
#ifdef DEBUG
void siDispOpenScopes();
#endif
/**************************************************************************
* PROTECTED
*************************************************************************/
protected:
struct siScope
{
emitLocation scStartLoc; // emitter location of start of scope
emitLocation scEndLoc; // emitter location of end of scope
unsigned scVarNum; // index into lvaTable
unsigned scLVnum; // 'which' in eeGetLVinfo()
unsigned scStackLevel; // Only for stk-vars
siScope* scPrev;
siScope* scNext;
};
// Returns a "siVarLoc" instance representing the place where the variable lives base on
// varDsc and scope description.
CodeGenInterface::siVarLoc getSiVarLoc(const LclVarDsc* varDsc, const siScope* scope) const;
siScope siOpenScopeList, siScopeList, *siOpenScopeLast, *siScopeLast;
unsigned siScopeCnt;
VARSET_TP siLastLife; // Life at last call to siUpdate()
// Tracks the last entry for each tracked register variable
siScope** siLatestTrackedScopes;
// Functions
siScope* siNewScope(unsigned LVnum, unsigned varNum);
void siRemoveFromOpenScopeList(siScope* scope);
void siEndTrackedScope(unsigned varIndex);
void siEndScope(unsigned varNum);
void siEndScope(siScope* scope);
#ifdef DEBUG
bool siVerifyLocalVarTab();
#endif
#ifdef LATE_DISASM
public:
/* virtual */
const char* siRegVarName(size_t offs, size_t size, unsigned reg);
/* virtual */
const char* siStackVarName(size_t offs, size_t size, unsigned reg, unsigned stkOffs);
#endif // LATE_DISASM
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX PrologScopeInfo XX
XX XX
XX We need special handling in the prolog block, as the parameter variables XX
XX may not be in the same position described by genLclVarTable - they all XX
XX start out on the stack XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#endif // USING_SCOPE_INFO
public:
void psiBegProlog();
void psiEndProlog();
#ifdef USING_SCOPE_INFO
void psiAdjustStackLevel(unsigned size);
// For EBP-frames, the parameters are accessed via ESP on entry to the function,
// but via EBP right after a "mov ebp,esp" instruction.
void psiMoveESPtoEBP();
// Close previous psiScope and open a new one on the location described by the registers.
void psiMoveToReg(unsigned varNum, regNumber reg = REG_NA, regNumber otherReg = REG_NA);
// Search the open "psiScope" of the "varNum" parameter, close it and open
// a new one using "LclVarDsc" fields.
void psiMoveToStack(unsigned varNum);
/**************************************************************************
* PROTECTED
*************************************************************************/
protected:
struct psiScope
{
emitLocation scStartLoc; // emitter location of start of scope
emitLocation scEndLoc; // emitter location of end of scope
unsigned scSlotNum; // index into lclVarTab
unsigned scLVnum; // 'which' in eeGetLVinfo()
bool scRegister;
union {
struct
{
regNumberSmall scRegNum;
// Used for:
// - "other half" of long var on architectures with 32 bit size registers - x86.
// - for System V structs it stores the second register
// used to pass a register passed struct.
regNumberSmall scOtherReg;
} u1;
struct
{
regNumberSmall scBaseReg;
NATIVE_OFFSET scOffset;
} u2;
};
psiScope* scPrev;
psiScope* scNext;
// Returns a "siVarLoc" instance representing the place where the variable lives base on
// psiScope properties.
CodeGenInterface::siVarLoc getSiVarLoc() const;
};
psiScope psiOpenScopeList, psiScopeList, *psiOpenScopeLast, *psiScopeLast;
unsigned psiScopeCnt;
// Implementation Functions
psiScope* psiNewPrologScope(unsigned LVnum, unsigned slotNum);
void psiEndPrologScope(psiScope* scope);
void psiSetScopeOffset(psiScope* newScope, const LclVarDsc* lclVarDsc) const;
#endif // USING_SCOPE_INFO
NATIVE_OFFSET psiGetVarStackOffset(const LclVarDsc* lclVarDsc) const;
/*****************************************************************************
* TrnslLocalVarInfo
*
* This struct holds the LocalVarInfo in terms of the generated native code
* after a call to genSetScopeInfo()
*/
protected:
#ifdef DEBUG
struct TrnslLocalVarInfo
{
unsigned tlviVarNum;
unsigned tlviLVnum;
VarName tlviName;
UNATIVE_OFFSET tlviStartPC;
size_t tlviLength;
bool tlviAvailable;
siVarLoc tlviVarLoc;
};
// Array of scopes of LocalVars in terms of native code
TrnslLocalVarInfo* genTrnslLocalVarInfo;
unsigned genTrnslLocalVarCount;
#endif
void genSetRegToConst(regNumber targetReg, var_types targetType, GenTree* tree);
void genCodeForTreeNode(GenTree* treeNode);
void genCodeForBinary(GenTreeOp* treeNode);
#if defined(TARGET_X86)
void genCodeForLongUMod(GenTreeOp* node);
#endif // TARGET_X86
void genCodeForDivMod(GenTreeOp* treeNode);
void genCodeForMul(GenTreeOp* treeNode);
void genCodeForIncSaturate(GenTree* treeNode);
void genCodeForMulHi(GenTreeOp* treeNode);
void genLeaInstruction(GenTreeAddrMode* lea);
void genSetRegToCond(regNumber dstReg, GenTree* tree);
#if defined(TARGET_ARMARCH)
void genScaledAdd(emitAttr attr, regNumber targetReg, regNumber baseReg, regNumber indexReg, int scale);
void genCodeForMulLong(GenTreeOp* mul);
#endif // TARGET_ARMARCH
#if !defined(TARGET_64BIT)
void genLongToIntCast(GenTree* treeNode);
#endif
// Generate code for a GT_BITCAST that is not contained.
void genCodeForBitCast(GenTreeOp* treeNode);
// Generate the instruction to move a value between register files
void genBitCast(var_types targetType, regNumber targetReg, var_types srcType, regNumber srcReg);
struct GenIntCastDesc
{
enum CheckKind
{
CHECK_NONE,
CHECK_SMALL_INT_RANGE,
CHECK_POSITIVE,
#ifdef TARGET_64BIT
CHECK_UINT_RANGE,
CHECK_POSITIVE_INT_RANGE,
CHECK_INT_RANGE,
#endif
};
enum ExtendKind
{
COPY,
ZERO_EXTEND_SMALL_INT,
SIGN_EXTEND_SMALL_INT,
#ifdef TARGET_64BIT
ZERO_EXTEND_INT,
SIGN_EXTEND_INT,
#endif
};
private:
CheckKind m_checkKind;
unsigned m_checkSrcSize;
int m_checkSmallIntMin;
int m_checkSmallIntMax;
ExtendKind m_extendKind;
unsigned m_extendSrcSize;
public:
GenIntCastDesc(GenTreeCast* cast);
CheckKind CheckKind() const
{
return m_checkKind;
}
unsigned CheckSrcSize() const
{
assert(m_checkKind != CHECK_NONE);
return m_checkSrcSize;
}
int CheckSmallIntMin() const
{
assert(m_checkKind == CHECK_SMALL_INT_RANGE);
return m_checkSmallIntMin;
}
int CheckSmallIntMax() const
{
assert(m_checkKind == CHECK_SMALL_INT_RANGE);
return m_checkSmallIntMax;
}
ExtendKind ExtendKind() const
{
return m_extendKind;
}
unsigned ExtendSrcSize() const
{
return m_extendSrcSize;
}
};
void genIntCastOverflowCheck(GenTreeCast* cast, const GenIntCastDesc& desc, regNumber reg);
void genIntToIntCast(GenTreeCast* cast);
void genFloatToFloatCast(GenTree* treeNode);
void genFloatToIntCast(GenTree* treeNode);
void genIntToFloatCast(GenTree* treeNode);
void genCkfinite(GenTree* treeNode);
void genCodeForCompare(GenTreeOp* tree);
void genIntrinsic(GenTree* treeNode);
void genPutArgStk(GenTreePutArgStk* treeNode);
void genPutArgReg(GenTreeOp* tree);
#if FEATURE_ARG_SPLIT
void genPutArgSplit(GenTreePutArgSplit* treeNode);
#endif // FEATURE_ARG_SPLIT
#if defined(TARGET_XARCH)
unsigned getBaseVarForPutArgStk(GenTree* treeNode);
#endif // TARGET_XARCH
unsigned getFirstArgWithStackSlot();
void genCompareFloat(GenTree* treeNode);
void genCompareInt(GenTree* treeNode);
#ifdef FEATURE_SIMD
enum SIMDScalarMoveType{
SMT_ZeroInitUpper, // zero initlaize target upper bits
SMT_ZeroInitUpper_SrcHasUpperZeros, // zero initialize target upper bits; source upper bits are known to be zero
SMT_PreserveUpper // preserve target upper bits
};
#ifdef TARGET_ARM64
insOpts genGetSimdInsOpt(emitAttr size, var_types elementType);
#endif
instruction getOpForSIMDIntrinsic(SIMDIntrinsicID intrinsicId, var_types baseType, unsigned* ival = nullptr);
void genSIMDScalarMove(
var_types targetType, var_types type, regNumber target, regNumber src, SIMDScalarMoveType moveType);
void genSIMDZero(var_types targetType, var_types baseType, regNumber targetReg);
void genSIMDIntrinsicInit(GenTreeSIMD* simdNode);
void genSIMDIntrinsicInitN(GenTreeSIMD* simdNode);
void genSIMDIntrinsicUnOp(GenTreeSIMD* simdNode);
void genSIMDIntrinsicBinOp(GenTreeSIMD* simdNode);
void genSIMDIntrinsicRelOp(GenTreeSIMD* simdNode);
void genSIMDIntrinsicShuffleSSE2(GenTreeSIMD* simdNode);
void genSIMDIntrinsicUpperSave(GenTreeSIMD* simdNode);
void genSIMDIntrinsicUpperRestore(GenTreeSIMD* simdNode);
void genSIMDLo64BitConvert(SIMDIntrinsicID intrinsicID,
var_types simdType,
var_types baseType,
regNumber tmpReg,
regNumber tmpIntReg,
regNumber targetReg);
void genSIMDIntrinsic32BitConvert(GenTreeSIMD* simdNode);
void genSIMDIntrinsic64BitConvert(GenTreeSIMD* simdNode);
void genSIMDExtractUpperHalf(GenTreeSIMD* simdNode, regNumber srcReg, regNumber tgtReg);
void genSIMDIntrinsic(GenTreeSIMD* simdNode);
// TYP_SIMD12 (i.e Vector3 of size 12 bytes) is not a hardware supported size and requires
// two reads/writes on 64-bit targets. These routines abstract reading/writing of Vector3
// values through an indirection. Note that Vector3 locals allocated on stack would have
// their size rounded to TARGET_POINTER_SIZE (which is 8 bytes on 64-bit targets) and hence
// Vector3 locals could be treated as TYP_SIMD16 while reading/writing.
void genStoreIndTypeSIMD12(GenTree* treeNode);
void genLoadIndTypeSIMD12(GenTree* treeNode);
void genStoreLclTypeSIMD12(GenTree* treeNode);
void genLoadLclTypeSIMD12(GenTree* treeNode);
#ifdef TARGET_X86
void genStoreSIMD12ToStack(regNumber operandReg, regNumber tmpReg);
void genPutArgStkSIMD12(GenTree* treeNode);
#endif // TARGET_X86
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
void genHWIntrinsic(GenTreeHWIntrinsic* node);
#if defined(TARGET_XARCH)
void genHWIntrinsic_R_RM(GenTreeHWIntrinsic* node, instruction ins, emitAttr attr, regNumber reg, GenTree* rmOp);
void genHWIntrinsic_R_RM_I(GenTreeHWIntrinsic* node, instruction ins, emitAttr attr, int8_t ival);
void genHWIntrinsic_R_R_RM(GenTreeHWIntrinsic* node, instruction ins, emitAttr attr);
void genHWIntrinsic_R_R_RM(
GenTreeHWIntrinsic* node, instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, GenTree* op2);
void genHWIntrinsic_R_R_RM_I(GenTreeHWIntrinsic* node, instruction ins, emitAttr attr, int8_t ival);
void genHWIntrinsic_R_R_RM_R(GenTreeHWIntrinsic* node, instruction ins, emitAttr attr);
void genHWIntrinsic_R_R_R_RM(
instruction ins, emitAttr attr, regNumber targetReg, regNumber op1Reg, regNumber op2Reg, GenTree* op3);
void genBaseIntrinsic(GenTreeHWIntrinsic* node);
void genX86BaseIntrinsic(GenTreeHWIntrinsic* node);
void genSSEIntrinsic(GenTreeHWIntrinsic* node);
void genSSE2Intrinsic(GenTreeHWIntrinsic* node);
void genSSE41Intrinsic(GenTreeHWIntrinsic* node);
void genSSE42Intrinsic(GenTreeHWIntrinsic* node);
void genAvxOrAvx2Intrinsic(GenTreeHWIntrinsic* node);
void genAESIntrinsic(GenTreeHWIntrinsic* node);
void genBMI1OrBMI2Intrinsic(GenTreeHWIntrinsic* node);
void genFMAIntrinsic(GenTreeHWIntrinsic* node);
void genLZCNTIntrinsic(GenTreeHWIntrinsic* node);
void genPCLMULQDQIntrinsic(GenTreeHWIntrinsic* node);
void genPOPCNTIntrinsic(GenTreeHWIntrinsic* node);
void genXCNTIntrinsic(GenTreeHWIntrinsic* node, instruction ins);
template <typename HWIntrinsicSwitchCaseBody>
void genHWIntrinsicJumpTableFallback(NamedIntrinsic intrinsic,
regNumber nonConstImmReg,
regNumber baseReg,
regNumber offsReg,
HWIntrinsicSwitchCaseBody emitSwCase);
#endif // defined(TARGET_XARCH)
#ifdef TARGET_ARM64
class HWIntrinsicImmOpHelper final
{
public:
HWIntrinsicImmOpHelper(CodeGen* codeGen, GenTree* immOp, GenTreeHWIntrinsic* intrin);
void EmitBegin();
void EmitCaseEnd();
// Returns true after the last call to EmitCaseEnd() (i.e. this signals that code generation is done).
bool Done() const
{
return (immValue > immUpperBound);
}
// Returns a value of the immediate operand that should be used for a case.
int ImmValue() const
{
return immValue;
}
private:
// Returns true if immOp is non contained immediate (i.e. the value of the immediate operand is enregistered in
// nonConstImmReg).
bool NonConstImmOp() const
{
return nonConstImmReg != REG_NA;
}
// Returns true if a non constant immediate operand can be either 0 or 1.
bool TestImmOpZeroOrOne() const
{
assert(NonConstImmOp());
return (immLowerBound == 0) && (immUpperBound == 1);
}
emitter* GetEmitter() const
{
return codeGen->GetEmitter();
}
CodeGen* const codeGen;
BasicBlock* endLabel;
BasicBlock* nonZeroLabel;
int immValue;
int immLowerBound;
int immUpperBound;
regNumber nonConstImmReg;
regNumber branchTargetReg;
};
#endif // TARGET_ARM64
#endif // FEATURE_HW_INTRINSICS
#if !defined(TARGET_64BIT)
// CodeGen for Long Ints
void genStoreLongLclVar(GenTree* treeNode);
#endif // !defined(TARGET_64BIT)
// Do liveness update for register produced by the current node in codegen after
// code has been emitted for it.
void genProduceReg(GenTree* tree);
void genSpillLocal(unsigned varNum, var_types type, GenTreeLclVar* lclNode, regNumber regNum);
void genUnspillLocal(
unsigned varNum, var_types type, GenTreeLclVar* lclNode, regNumber regNum, bool reSpill, bool isLastUse);
void genUnspillRegIfNeeded(GenTree* tree);
void genUnspillRegIfNeeded(GenTree* tree, unsigned multiRegIndex);
regNumber genConsumeReg(GenTree* tree);
regNumber genConsumeReg(GenTree* tree, unsigned multiRegIndex);
void genCopyRegIfNeeded(GenTree* tree, regNumber needReg);
void genConsumeRegAndCopy(GenTree* tree, regNumber needReg);
void genConsumeIfReg(GenTree* tree)
{
if (!tree->isContained())
{
(void)genConsumeReg(tree);
}
}
void genRegCopy(GenTree* tree);
regNumber genRegCopy(GenTree* tree, unsigned multiRegIndex);
void genTransferRegGCState(regNumber dst, regNumber src);
void genConsumeAddress(GenTree* addr);
void genConsumeAddrMode(GenTreeAddrMode* mode);
void genSetBlockSize(GenTreeBlk* blkNode, regNumber sizeReg);
void genConsumeBlockSrc(GenTreeBlk* blkNode);
void genSetBlockSrc(GenTreeBlk* blkNode, regNumber srcReg);
void genConsumeBlockOp(GenTreeBlk* blkNode, regNumber dstReg, regNumber srcReg, regNumber sizeReg);
#ifdef FEATURE_PUT_STRUCT_ARG_STK
void genConsumePutStructArgStk(GenTreePutArgStk* putArgStkNode,
regNumber dstReg,
regNumber srcReg,
regNumber sizeReg);
#endif // FEATURE_PUT_STRUCT_ARG_STK
#if FEATURE_ARG_SPLIT
void genConsumeArgSplitStruct(GenTreePutArgSplit* putArgNode);
#endif // FEATURE_ARG_SPLIT
void genConsumeRegs(GenTree* tree);
void genConsumeOperands(GenTreeOp* tree);
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
void genConsumeMultiOpOperands(GenTreeMultiOp* tree);
#endif
void genEmitGSCookieCheck(bool pushReg);
void genCodeForShift(GenTree* tree);
#if defined(TARGET_X86) || defined(TARGET_ARM)
void genCodeForShiftLong(GenTree* tree);
#endif
#ifdef TARGET_XARCH
void genCodeForShiftRMW(GenTreeStoreInd* storeInd);
void genCodeForBT(GenTreeOp* bt);
#endif // TARGET_XARCH
void genCodeForCast(GenTreeOp* tree);
void genCodeForLclAddr(GenTreeLclVarCommon* lclAddrNode);
void genCodeForIndexAddr(GenTreeIndexAddr* tree);
void genCodeForIndir(GenTreeIndir* tree);
void genCodeForNegNot(GenTree* tree);
void genCodeForBswap(GenTree* tree);
void genCodeForLclVar(GenTreeLclVar* tree);
void genCodeForLclFld(GenTreeLclFld* tree);
void genCodeForStoreLclFld(GenTreeLclFld* tree);
void genCodeForStoreLclVar(GenTreeLclVar* tree);
void genCodeForReturnTrap(GenTreeOp* tree);
void genCodeForStoreInd(GenTreeStoreInd* tree);
void genCodeForSwap(GenTreeOp* tree);
void genCodeForCpObj(GenTreeObj* cpObjNode);
void genCodeForCpBlkRepMovs(GenTreeBlk* cpBlkNode);
void genCodeForCpBlkUnroll(GenTreeBlk* cpBlkNode);
#ifndef TARGET_X86
void genCodeForCpBlkHelper(GenTreeBlk* cpBlkNode);
#endif
void genCodeForPhysReg(GenTreePhysReg* tree);
void genCodeForNullCheck(GenTreeIndir* tree);
void genCodeForCmpXchg(GenTreeCmpXchg* tree);
void genAlignStackBeforeCall(GenTreePutArgStk* putArgStk);
void genAlignStackBeforeCall(GenTreeCall* call);
void genRemoveAlignmentAfterCall(GenTreeCall* call, unsigned bias = 0);
#if defined(UNIX_X86_ABI)
unsigned curNestedAlignment; // Keep track of alignment adjustment required during codegen.
unsigned maxNestedAlignment; // The maximum amount of alignment adjustment required.
void SubtractNestedAlignment(unsigned adjustment)
{
assert(curNestedAlignment >= adjustment);
unsigned newNestedAlignment = curNestedAlignment - adjustment;
if (curNestedAlignment != newNestedAlignment)
{
JITDUMP("Adjusting stack nested alignment from %d to %d\n", curNestedAlignment, newNestedAlignment);
}
curNestedAlignment = newNestedAlignment;
}
void AddNestedAlignment(unsigned adjustment)
{
unsigned newNestedAlignment = curNestedAlignment + adjustment;
if (curNestedAlignment != newNestedAlignment)
{
JITDUMP("Adjusting stack nested alignment from %d to %d\n", curNestedAlignment, newNestedAlignment);
}
curNestedAlignment = newNestedAlignment;
if (curNestedAlignment > maxNestedAlignment)
{
JITDUMP("Max stack nested alignment changed from %d to %d\n", maxNestedAlignment, curNestedAlignment);
maxNestedAlignment = curNestedAlignment;
}
}
#endif
#ifndef TARGET_X86
void genPutArgStkFieldList(GenTreePutArgStk* putArgStk, unsigned outArgVarNum);
#endif // !TARGET_X86
#ifdef FEATURE_PUT_STRUCT_ARG_STK
#ifdef TARGET_X86
bool genAdjustStackForPutArgStk(GenTreePutArgStk* putArgStk);
void genPushReg(var_types type, regNumber srcReg);
void genPutArgStkFieldList(GenTreePutArgStk* putArgStk);
#endif // TARGET_X86
void genPutStructArgStk(GenTreePutArgStk* treeNode);
unsigned genMove8IfNeeded(unsigned size, regNumber tmpReg, GenTree* srcAddr, unsigned offset);
unsigned genMove4IfNeeded(unsigned size, regNumber tmpReg, GenTree* srcAddr, unsigned offset);
unsigned genMove2IfNeeded(unsigned size, regNumber tmpReg, GenTree* srcAddr, unsigned offset);
unsigned genMove1IfNeeded(unsigned size, regNumber tmpReg, GenTree* srcAddr, unsigned offset);
void genCodeForLoadOffset(instruction ins, emitAttr size, regNumber dst, GenTree* base, unsigned offset);
void genStoreRegToStackArg(var_types type, regNumber reg, int offset);
void genStructPutArgRepMovs(GenTreePutArgStk* putArgStkNode);
void genStructPutArgUnroll(GenTreePutArgStk* putArgStkNode);
#ifdef TARGET_X86
void genStructPutArgPush(GenTreePutArgStk* putArgStkNode);
#else
void genStructPutArgPartialRepMovs(GenTreePutArgStk* putArgStkNode);
#endif
#endif // FEATURE_PUT_STRUCT_ARG_STK
void genCodeForStoreBlk(GenTreeBlk* storeBlkNode);
#ifndef TARGET_X86
void genCodeForInitBlkHelper(GenTreeBlk* initBlkNode);
#endif
void genCodeForInitBlkRepStos(GenTreeBlk* initBlkNode);
void genCodeForInitBlkUnroll(GenTreeBlk* initBlkNode);
void genJumpTable(GenTree* tree);
void genTableBasedSwitch(GenTree* tree);
void genCodeForArrIndex(GenTreeArrIndex* treeNode);
void genCodeForArrOffset(GenTreeArrOffs* treeNode);
#if defined(TARGET_LOONGARCH64)
instruction genGetInsForOper(GenTree* treeNode);
#else
instruction genGetInsForOper(genTreeOps oper, var_types type);
#endif
bool genEmitOptimizedGCWriteBarrier(GCInfo::WriteBarrierForm writeBarrierForm, GenTree* addr, GenTree* data);
GenTree* getCallTarget(const GenTreeCall* call, CORINFO_METHOD_HANDLE* methHnd);
regNumber getCallIndirectionCellReg(const GenTreeCall* call);
void genCall(GenTreeCall* call);
void genCallInstruction(GenTreeCall* call X86_ARG(target_ssize_t stackArgBytes));
void genJmpMethod(GenTree* jmp);
BasicBlock* genCallFinally(BasicBlock* block);
void genCodeForJumpTrue(GenTreeOp* jtrue);
#if defined(TARGET_LOONGARCH64)
// TODO: refactor for LA.
void genCodeForJumpCompare(GenTreeOp* tree);
#endif
#if defined(TARGET_ARM64)
void genCodeForJumpCompare(GenTreeOp* tree);
void genCodeForMadd(GenTreeOp* tree);
void genCodeForMsub(GenTreeOp* tree);
void genCodeForBfiz(GenTreeOp* tree);
void genCodeForAddEx(GenTreeOp* tree);
void genCodeForCond(GenTreeOp* tree);
#endif // TARGET_ARM64
#if defined(FEATURE_EH_FUNCLETS)
void genEHCatchRet(BasicBlock* block);
#else // !FEATURE_EH_FUNCLETS
void genEHFinallyOrFilterRet(BasicBlock* block);
#endif // !FEATURE_EH_FUNCLETS
void genMultiRegStoreToSIMDLocal(GenTreeLclVar* lclNode);
void genMultiRegStoreToLocal(GenTreeLclVar* lclNode);
#if defined(TARGET_LOONGARCH64)
void genMultiRegCallStoreToLocal(GenTree* treeNode);
#endif
// Codegen for multi-register struct returns.
bool isStructReturn(GenTree* treeNode);
#ifdef FEATURE_SIMD
void genSIMDSplitReturn(GenTree* src, ReturnTypeDesc* retTypeDesc);
#endif
void genStructReturn(GenTree* treeNode);
#if defined(TARGET_X86) || defined(TARGET_ARM)
void genLongReturn(GenTree* treeNode);
#endif // TARGET_X86 || TARGET_ARM
#if defined(TARGET_X86)
void genFloatReturn(GenTree* treeNode);
#endif // TARGET_X86
#if defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)
void genSimpleReturn(GenTree* treeNode);
#endif // TARGET_ARM64 || TARGET_LOONGARCH64
void genReturn(GenTree* treeNode);
void genStackPointerConstantAdjustment(ssize_t spDelta, regNumber regTmp);
void genStackPointerConstantAdjustmentWithProbe(ssize_t spDelta, regNumber regTmp);
target_ssize_t genStackPointerConstantAdjustmentLoopWithProbe(ssize_t spDelta, regNumber regTmp);
#if defined(TARGET_XARCH)
void genStackPointerDynamicAdjustmentWithProbe(regNumber regSpDelta, regNumber regTmp);
#endif // defined(TARGET_XARCH)
void genLclHeap(GenTree* tree);
bool genIsRegCandidateLocal(GenTree* tree)
{
if (!tree->IsLocal())
{
return false;
}
return compiler->lvaGetDesc(tree->AsLclVarCommon())->lvIsRegCandidate();
}
#ifdef FEATURE_PUT_STRUCT_ARG_STK
#ifdef TARGET_X86
bool m_pushStkArg;
#else // !TARGET_X86
unsigned m_stkArgVarNum;
unsigned m_stkArgOffset;
#endif // !TARGET_X86
#endif // !FEATURE_PUT_STRUCT_ARG_STK
#if defined(DEBUG) && defined(TARGET_XARCH)
void genStackPointerCheck(bool doStackPointerCheck, unsigned lvaStackPointerVar);
#endif // defined(DEBUG) && defined(TARGET_XARCH)
#ifdef DEBUG
GenTree* lastConsumedNode;
void genNumberOperandUse(GenTree* const operand, int& useNum) const;
void genCheckConsumeNode(GenTree* const node);
#else // !DEBUG
inline void genCheckConsumeNode(GenTree* treeNode)
{
}
#endif // DEBUG
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Instruction XX
XX XX
XX The interface to generate a machine-instruction. XX
XX Currently specific to x86 XX
XX TODO-Cleanup: Consider factoring this out of CodeGen XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
public:
void instGen(instruction ins);
void inst_JMP(emitJumpKind jmp, BasicBlock* tgtBlock);
void inst_SET(emitJumpKind condition, regNumber reg);
void inst_RV(instruction ins, regNumber reg, var_types type, emitAttr size = EA_UNKNOWN);
void inst_Mov(var_types dstType,
regNumber dstReg,
regNumber srcReg,
bool canSkip,
emitAttr size = EA_UNKNOWN,
insFlags flags = INS_FLAGS_DONT_CARE);
void inst_Mov_Extend(var_types srcType,
bool srcInReg,
regNumber dstReg,
regNumber srcReg,
bool canSkip,
emitAttr size = EA_UNKNOWN,
insFlags flags = INS_FLAGS_DONT_CARE);
void inst_RV_RV(instruction ins,
regNumber reg1,
regNumber reg2,
var_types type = TYP_I_IMPL,
emitAttr size = EA_UNKNOWN,
insFlags flags = INS_FLAGS_DONT_CARE);
void inst_RV_RV_RV(instruction ins,
regNumber reg1,
regNumber reg2,
regNumber reg3,
emitAttr size,
insFlags flags = INS_FLAGS_DONT_CARE);
void inst_IV(instruction ins, cnsval_ssize_t val);
void inst_IV_handle(instruction ins, cnsval_ssize_t val);
void inst_RV_IV(
instruction ins, regNumber reg, target_ssize_t val, emitAttr size, insFlags flags = INS_FLAGS_DONT_CARE);
void inst_ST_RV(instruction ins, TempDsc* tmp, unsigned ofs, regNumber reg, var_types type);
void inst_FS_ST(instruction ins, emitAttr size, TempDsc* tmp, unsigned ofs);
void inst_TT_RV(instruction ins, emitAttr size, GenTree* tree, regNumber reg);
void inst_RV_SH(instruction ins, emitAttr size, regNumber reg, unsigned val, insFlags flags = INS_FLAGS_DONT_CARE);
#if defined(TARGET_XARCH)
enum class OperandKind{
ClsVar, // [CLS_VAR_ADDR] - "C" in the emitter.
Local, // [Local or spill temp + offset] - "S" in the emitter.
Indir, // [base+index*scale+disp] - "A" in the emitter.
Imm, // immediate - "I" in the emitter.
Reg // reg - "R" in the emitter.
};
class OperandDesc
{
OperandKind m_kind;
union {
struct
{
CORINFO_FIELD_HANDLE m_fieldHnd;
};
struct
{
int m_varNum;
uint16_t m_offset;
};
struct
{
GenTree* m_addr;
GenTreeIndir* m_indir;
var_types m_indirType;
};
struct
{
ssize_t m_immediate;
bool m_immediateNeedsReloc;
};
struct
{
regNumber m_reg;
};
};
public:
OperandDesc(CORINFO_FIELD_HANDLE fieldHnd) : m_kind(OperandKind::ClsVar), m_fieldHnd(fieldHnd)
{
}
OperandDesc(int varNum, uint16_t offset) : m_kind(OperandKind::Local), m_varNum(varNum), m_offset(offset)
{
}
OperandDesc(GenTreeIndir* indir)
: m_kind(OperandKind::Indir), m_addr(indir->Addr()), m_indir(indir), m_indirType(indir->TypeGet())
{
}
OperandDesc(var_types indirType, GenTree* addr)
: m_kind(OperandKind::Indir), m_addr(addr), m_indir(nullptr), m_indirType(indirType)
{
}
OperandDesc(ssize_t immediate, bool immediateNeedsReloc)
: m_kind(OperandKind::Imm), m_immediate(immediate), m_immediateNeedsReloc(immediateNeedsReloc)
{
}
OperandDesc(regNumber reg) : m_kind(OperandKind::Reg), m_reg(reg)
{
}
OperandKind GetKind() const
{
return m_kind;
}
CORINFO_FIELD_HANDLE GetFieldHnd() const
{
assert(m_kind == OperandKind::ClsVar);
return m_fieldHnd;
}
int GetVarNum() const
{
assert(m_kind == OperandKind::Local);
return m_varNum;
}
int GetLclOffset() const
{
assert(m_kind == OperandKind::Local);
return m_offset;
}
// TODO-Cleanup: instead of this rather unsightly workaround with
// "indirForm", create a new abstraction for address modes to pass
// to the emitter (or at least just use "addr"...).
GenTreeIndir* GetIndirForm(GenTreeIndir* pIndirForm)
{
if (m_indir == nullptr)
{
GenTreeIndir indirForm = CodeGen::indirForm(m_indirType, m_addr);
memcpy(pIndirForm, &indirForm, sizeof(GenTreeIndir));
}
else
{
pIndirForm = m_indir;
}
return pIndirForm;
}
ssize_t GetImmediate() const
{
assert(m_kind == OperandKind::Imm);
return m_immediate;
}
emitAttr GetEmitAttrForImmediate(emitAttr baseAttr) const
{
assert(m_kind == OperandKind::Imm);
return m_immediateNeedsReloc ? EA_SET_FLG(baseAttr, EA_CNS_RELOC_FLG) : baseAttr;
}
regNumber GetReg() const
{
return m_reg;
}
bool IsContained() const
{
return m_kind != OperandKind::Reg;
}
};
OperandDesc genOperandDesc(GenTree* op);
void inst_TT(instruction ins, emitAttr size, GenTree* op1);
void inst_RV_TT(instruction ins, emitAttr size, regNumber op1Reg, GenTree* op2);
void inst_RV_RV_IV(instruction ins, emitAttr size, regNumber reg1, regNumber reg2, unsigned ival);
void inst_RV_TT_IV(instruction ins, emitAttr attr, regNumber reg1, GenTree* rmOp, int ival);
void inst_RV_RV_TT(instruction ins, emitAttr size, regNumber targetReg, regNumber op1Reg, GenTree* op2, bool isRMW);
#endif
void inst_set_SV_var(GenTree* tree);
#ifdef TARGET_ARM
bool arm_Valid_Imm_For_Instr(instruction ins, target_ssize_t imm, insFlags flags);
bool arm_Valid_Imm_For_Add(target_ssize_t imm, insFlags flag);
bool arm_Valid_Imm_For_Add_SP(target_ssize_t imm);
#endif
instruction ins_Move_Extend(var_types srcType, bool srcInReg);
instruction ins_Copy(var_types dstType);
instruction ins_Copy(regNumber srcReg, var_types dstType);
instruction ins_FloatConv(var_types to, var_types from);
instruction ins_MathOp(genTreeOps oper, var_types type);
void instGen_Return(unsigned stkArgSize);
enum BarrierKind
{
BARRIER_FULL, // full barrier
BARRIER_LOAD_ONLY, // load barier
};
void instGen_MemoryBarrier(BarrierKind barrierKind = BARRIER_FULL);
void instGen_Set_Reg_To_Zero(emitAttr size, regNumber reg, insFlags flags = INS_FLAGS_DONT_CARE);
void instGen_Set_Reg_To_Imm(emitAttr size,
regNumber reg,
ssize_t imm,
insFlags flags = INS_FLAGS_DONT_CARE DEBUGARG(size_t targetHandle = 0)
DEBUGARG(GenTreeFlags gtFlags = GTF_EMPTY));
#ifdef TARGET_XARCH
instruction genMapShiftInsToShiftByConstantIns(instruction ins, int shiftByValue);
#endif // TARGET_XARCH
#ifndef TARGET_LOONGARCH64
// Maps a GenCondition code to a sequence of conditional jumps or other conditional instructions
// such as X86's SETcc. A sequence of instructions rather than just a single one is required for
// certain floating point conditions.
// For example, X86's UCOMISS sets ZF to indicate equality but it also sets it, together with PF,
// to indicate an unordered result. So for GenCondition::FEQ we first need to check if PF is 0
// and then jump if ZF is 1:
// JP fallThroughBlock
// JE jumpDestBlock
// fallThroughBlock:
// ...
// jumpDestBlock:
//
// This is very similar to the way shortcircuit evaluation of bool AND and OR operators works so
// in order to make the GenConditionDesc mapping tables easier to read, a bool expression-like
// pattern is used to encode the above:
// { EJ_jnp, GT_AND, EJ_je }
// { EJ_jp, GT_OR, EJ_jne }
//
// For more details check inst_JCC and inst_SETCC functions.
//
struct GenConditionDesc
{
emitJumpKind jumpKind1;
genTreeOps oper;
emitJumpKind jumpKind2;
char padTo4Bytes;
static const GenConditionDesc& Get(GenCondition condition)
{
assert(condition.GetCode() < ArrLen(map));
const GenConditionDesc& desc = map[condition.GetCode()];
assert(desc.jumpKind1 != EJ_NONE);
assert((desc.oper == GT_NONE) || (desc.oper == GT_AND) || (desc.oper == GT_OR));
assert((desc.oper == GT_NONE) == (desc.jumpKind2 == EJ_NONE));
return desc;
}
private:
static const GenConditionDesc map[32];
};
void inst_JCC(GenCondition condition, BasicBlock* target);
void inst_SETCC(GenCondition condition, var_types type, regNumber dstReg);
void genCodeForJcc(GenTreeCC* tree);
void genCodeForSetcc(GenTreeCC* setcc);
#endif // !TARGET_LOONGARCH64
};
// A simple phase that just invokes a method on the codegen instance
//
class CodeGenPhase final : public Phase
{
public:
CodeGenPhase(CodeGen* _codeGen, Phases _phase, void (CodeGen::*_action)())
: Phase(_codeGen->GetCompiler(), _phase), codeGen(_codeGen), action(_action)
{
}
protected:
virtual PhaseStatus DoPhase() override
{
(codeGen->*action)();
return PhaseStatus::MODIFIED_EVERYTHING;
}
private:
CodeGen* codeGen;
void (CodeGen::*action)();
};
// Wrapper for using CodeGenPhase
//
inline void DoPhase(CodeGen* _codeGen, Phases _phase, void (CodeGen::*_action)())
{
CodeGenPhase phase(_codeGen, _phase, _action);
phase.Run();
}
#endif // _CODEGEN_H_
| 1 |
dotnet/runtime
| 66,407 |
ARM64 - Optimizing a % b operations part 2
|
Addressing part of this issue: https://github.com/dotnet/runtime/issues/34937
**Description**
There are various ways to optimize `%` for integers on ARM64.
`a % b` can be transformed into a specific sequence of instructions for ARM64 if the operation is signed and `b` is a constant with the power of 2.
**Acceptance Criteria**
- [x] Merge https://github.com/dotnet/runtime/pull/65535
- [x] Add Tests
ARM64 diffs based on tests
```diff
- asr w1, w0, #31
- and w1, w1, #15
- add w1, w1, w0
- asr w1, w1, #4
- lsl w1, w1, #4
- sub w0, w0, w1
- ;; bbWeight=1 PerfScore 4.50
+ and w1, w0, #15
+ negs w0, w0
+ and w0, w0, #15
+ csneg w0, w1, w0, mi
+ ;; bbWeight=1 PerfScore 2.00
```
|
TIHan
| 2022-03-09T20:33:02Z | 2022-04-13T20:01:26Z |
3b6a539c7219383ec6181a112465f904fe9e2edb
|
a81f4bc9d53dedefe03f49da53ed198abbf9c467
|
ARM64 - Optimizing a % b operations part 2. Addressing part of this issue: https://github.com/dotnet/runtime/issues/34937
**Description**
There are various ways to optimize `%` for integers on ARM64.
`a % b` can be transformed into a specific sequence of instructions for ARM64 if the operation is signed and `b` is a constant with the power of 2.
**Acceptance Criteria**
- [x] Merge https://github.com/dotnet/runtime/pull/65535
- [x] Add Tests
ARM64 diffs based on tests
```diff
- asr w1, w0, #31
- and w1, w1, #15
- add w1, w1, w0
- asr w1, w1, #4
- lsl w1, w1, #4
- sub w0, w0, w1
- ;; bbWeight=1 PerfScore 4.50
+ and w1, w0, #15
+ negs w0, w0
+ and w0, w0, #15
+ csneg w0, w1, w0, mi
+ ;; bbWeight=1 PerfScore 2.00
```
|
./src/coreclr/jit/codegenarm64.cpp
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Arm64 Code Generator XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#ifdef TARGET_ARM64
#include "emit.h"
#include "codegen.h"
#include "lower.h"
#include "gcinfo.h"
#include "gcinfoencoder.h"
#include "patchpointinfo.h"
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Prolog / Epilog XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
void CodeGen::genPopCalleeSavedRegistersAndFreeLclFrame(bool jmpEpilog)
{
assert(compiler->compGeneratingEpilog);
regMaskTP rsRestoreRegs = regSet.rsGetModifiedRegsMask() & RBM_CALLEE_SAVED;
if (isFramePointerUsed())
{
rsRestoreRegs |= RBM_FPBASE;
}
rsRestoreRegs |= RBM_LR; // We must save/restore the return address (in the LR register)
regMaskTP regsToRestoreMask = rsRestoreRegs;
const int totalFrameSize = genTotalFrameSize();
// Fetch info about the frame we saved when creating the prolog.
//
const int frameType = compiler->compFrameInfo.frameType;
const int calleeSaveSpOffset = compiler->compFrameInfo.calleeSaveSpOffset;
const int calleeSaveSpDelta = compiler->compFrameInfo.calleeSaveSpDelta;
const int offsetSpToSavedFp = compiler->compFrameInfo.offsetSpToSavedFp;
switch (frameType)
{
case 1:
{
JITDUMP("Frame type 1. #outsz=0; #framesz=%d; localloc? %s\n", totalFrameSize,
dspBool(compiler->compLocallocUsed));
if (compiler->compLocallocUsed)
{
// Restore sp from fp
// mov sp, fp
inst_Mov(TYP_I_IMPL, REG_SPBASE, REG_FPBASE, /* canSkip */ false);
compiler->unwindSetFrameReg(REG_FPBASE, 0);
}
regsToRestoreMask &= ~(RBM_FP | RBM_LR); // We'll restore FP/LR at the end, and post-index SP.
break;
}
case 2:
{
JITDUMP("Frame type 2 (save FP/LR at bottom). #outsz=%d; #framesz=%d; localloc? %s\n",
unsigned(compiler->lvaOutgoingArgSpaceSize), totalFrameSize, dspBool(compiler->compLocallocUsed));
assert(!genSaveFpLrWithAllCalleeSavedRegisters);
if (compiler->compLocallocUsed)
{
// Restore sp from fp
// sub sp, fp, #outsz // Uses #outsz if FP/LR stored at bottom
int SPtoFPdelta = genSPtoFPdelta();
GetEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, REG_SPBASE, REG_FPBASE, SPtoFPdelta);
compiler->unwindSetFrameReg(REG_FPBASE, SPtoFPdelta);
}
regsToRestoreMask &= ~(RBM_FP | RBM_LR); // We'll restore FP/LR at the end, and post-index SP.
break;
}
case 3:
{
JITDUMP("Frame type 3 (save FP/LR at bottom). #outsz=%d; #framesz=%d; localloc? %s\n",
unsigned(compiler->lvaOutgoingArgSpaceSize), totalFrameSize, dspBool(compiler->compLocallocUsed));
assert(!genSaveFpLrWithAllCalleeSavedRegisters);
JITDUMP(" calleeSaveSpDelta=%d\n", calleeSaveSpDelta);
regsToRestoreMask &= ~(RBM_FP | RBM_LR); // We'll restore FP/LR at the end, and (hopefully) post-index SP.
int remainingFrameSz = totalFrameSize - calleeSaveSpDelta;
assert(remainingFrameSz > 0);
if (compiler->lvaOutgoingArgSpaceSize > 504)
{
// We can't do "ldp fp,lr,[sp,#outsz]" because #outsz is too big.
// If compiler->lvaOutgoingArgSpaceSize is not aligned, we need to align the SP adjustment.
assert(remainingFrameSz > (int)compiler->lvaOutgoingArgSpaceSize);
int spAdjustment2Unaligned = remainingFrameSz - compiler->lvaOutgoingArgSpaceSize;
int spAdjustment2 = (int)roundUp((unsigned)spAdjustment2Unaligned, STACK_ALIGN);
int alignmentAdjustment2 = spAdjustment2 - spAdjustment2Unaligned;
assert((alignmentAdjustment2 == 0) || (alignmentAdjustment2 == REGSIZE_BYTES));
// Restore sp from fp. No need to update sp after this since we've set up fp before adjusting sp
// in prolog.
// sub sp, fp, #alignmentAdjustment2
GetEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, REG_SPBASE, REG_FPBASE, alignmentAdjustment2);
compiler->unwindSetFrameReg(REG_FPBASE, alignmentAdjustment2);
// Generate:
// ldp fp,lr,[sp]
// add sp,sp,#remainingFrameSz
JITDUMP(" alignmentAdjustment2=%d\n", alignmentAdjustment2);
genEpilogRestoreRegPair(REG_FP, REG_LR, alignmentAdjustment2, spAdjustment2, false, REG_IP1, nullptr);
}
else
{
if (compiler->compLocallocUsed)
{
// Restore sp from fp; here that's #outsz from SP
// sub sp, fp, #outsz
int SPtoFPdelta = genSPtoFPdelta();
assert(SPtoFPdelta == (int)compiler->lvaOutgoingArgSpaceSize);
GetEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, REG_SPBASE, REG_FPBASE, SPtoFPdelta);
compiler->unwindSetFrameReg(REG_FPBASE, SPtoFPdelta);
}
// Generate:
// ldp fp,lr,[sp,#outsz]
// add sp,sp,#remainingFrameSz ; might need to load this constant in a scratch register if
// ; it's large
JITDUMP(" remainingFrameSz=%d\n", remainingFrameSz);
genEpilogRestoreRegPair(REG_FP, REG_LR, compiler->lvaOutgoingArgSpaceSize, remainingFrameSz, false,
REG_IP1, nullptr);
}
// Unlike frameType=1 or frameType=2 that restore SP at the end,
// frameType=3 already adjusted SP above to delete local frame.
// There is at most one alignment slot between SP and where we store the callee-saved registers.
assert((calleeSaveSpOffset == 0) || (calleeSaveSpOffset == REGSIZE_BYTES));
break;
}
case 4:
{
JITDUMP("Frame type 4 (save FP/LR at top). #outsz=%d; #framesz=%d; localloc? %s\n",
unsigned(compiler->lvaOutgoingArgSpaceSize), totalFrameSize, dspBool(compiler->compLocallocUsed));
assert(genSaveFpLrWithAllCalleeSavedRegisters);
if (compiler->compLocallocUsed)
{
// Restore sp from fp
// sub sp, fp, #outsz // Uses #outsz if FP/LR stored at bottom
int SPtoFPdelta = genSPtoFPdelta();
GetEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, REG_SPBASE, REG_FPBASE, SPtoFPdelta);
compiler->unwindSetFrameReg(REG_FPBASE, SPtoFPdelta);
}
break;
}
case 5:
{
JITDUMP("Frame type 5 (save FP/LR at top). #outsz=%d; #framesz=%d; localloc? %s\n",
unsigned(compiler->lvaOutgoingArgSpaceSize), totalFrameSize, dspBool(compiler->compLocallocUsed));
assert((calleeSaveSpOffset == 0) || (calleeSaveSpOffset == REGSIZE_BYTES));
// Restore sp from fp:
// sub sp, fp, #sp-to-fp-delta
// This is the same whether there is localloc or not. Note that we don't need to do anything to remove the
// "remainingFrameSz" to reverse the SUB of that amount in the prolog.
GetEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, REG_SPBASE, REG_FPBASE, offsetSpToSavedFp);
compiler->unwindSetFrameReg(REG_FPBASE, offsetSpToSavedFp);
break;
}
default:
unreached();
}
JITDUMP(" calleeSaveSpOffset=%d, calleeSaveSpDelta=%d\n", calleeSaveSpOffset, calleeSaveSpDelta);
genRestoreCalleeSavedRegistersHelp(regsToRestoreMask, calleeSaveSpOffset, calleeSaveSpDelta);
switch (frameType)
{
case 1:
{
// Generate:
// ldp fp,lr,[sp],#framesz
GetEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, totalFrameSize,
INS_OPTS_POST_INDEX);
compiler->unwindSaveRegPairPreindexed(REG_FP, REG_LR, -totalFrameSize);
break;
}
case 2:
{
// Generate:
// ldr fp,lr,[sp,#outsz]
// add sp,sp,#framesz
GetEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE,
compiler->lvaOutgoingArgSpaceSize);
compiler->unwindSaveRegPair(REG_FP, REG_LR, compiler->lvaOutgoingArgSpaceSize);
GetEmitter()->emitIns_R_R_I(INS_add, EA_PTRSIZE, REG_SPBASE, REG_SPBASE, totalFrameSize);
compiler->unwindAllocStack(totalFrameSize);
break;
}
case 3:
case 4:
case 5:
{
// Nothing to do after restoring callee-saved registers.
break;
}
default:
{
unreached();
}
}
// For OSR, we must also adjust the SP to remove the Tier0 frame.
//
if (compiler->opts.IsOSR())
{
PatchpointInfo* const patchpointInfo = compiler->info.compPatchpointInfo;
const int tier0FrameSize = patchpointInfo->TotalFrameSize();
JITDUMP("Extra SP adjust for OSR to pop off Tier0 frame: %d bytes\n", tier0FrameSize);
// Tier0 size may exceed simple immediate. We're in the epilog so not clear if we can
// use a scratch reg. So just do two subtracts if necessary.
//
int spAdjust = tier0FrameSize;
if (!GetEmitter()->emitIns_valid_imm_for_add(tier0FrameSize, EA_PTRSIZE))
{
const int lowPart = spAdjust & 0xFFF;
const int highPart = spAdjust - lowPart;
assert(GetEmitter()->emitIns_valid_imm_for_add(highPart, EA_PTRSIZE));
GetEmitter()->emitIns_R_R_I(INS_add, EA_PTRSIZE, REG_SPBASE, REG_SPBASE, highPart);
compiler->unwindAllocStack(highPart);
spAdjust = lowPart;
}
assert(GetEmitter()->emitIns_valid_imm_for_add(spAdjust, EA_PTRSIZE));
GetEmitter()->emitIns_R_R_I(INS_add, EA_PTRSIZE, REG_SPBASE, REG_SPBASE, spAdjust);
compiler->unwindAllocStack(spAdjust);
}
}
//------------------------------------------------------------------------
// genInstrWithConstant: we will typically generate one instruction
//
// ins reg1, reg2, imm
//
// However the imm might not fit as a directly encodable immediate,
// when it doesn't fit we generate extra instruction(s) that sets up
// the 'regTmp' with the proper immediate value.
//
// mov regTmp, imm
// ins reg1, reg2, regTmp
//
// Arguments:
// ins - instruction
// attr - operation size and GC attribute
// reg1, reg2 - first and second register operands
// imm - immediate value (third operand when it fits)
// tmpReg - temp register to use when the 'imm' doesn't fit. Can be REG_NA
// if caller knows for certain the constant will fit.
// inUnwindRegion - true if we are in a prolog/epilog region with unwind codes.
// Default: false.
//
// Return Value:
// returns true if the immediate was small enough to be encoded inside instruction. If not,
// returns false meaning the immediate was too large and tmpReg was used and modified.
//
bool CodeGen::genInstrWithConstant(instruction ins,
emitAttr attr,
regNumber reg1,
regNumber reg2,
ssize_t imm,
regNumber tmpReg,
bool inUnwindRegion /* = false */)
{
bool immFitsInIns = false;
emitAttr size = EA_SIZE(attr);
// reg1 is usually a dest register
// reg2 is always source register
assert(tmpReg != reg2); // regTmp can not match any source register
switch (ins)
{
case INS_add:
case INS_sub:
if (imm < 0)
{
imm = -imm;
ins = (ins == INS_add) ? INS_sub : INS_add;
}
immFitsInIns = emitter::emitIns_valid_imm_for_add(imm, size);
break;
case INS_strb:
case INS_strh:
case INS_str:
// reg1 is a source register for store instructions
assert(tmpReg != reg1); // regTmp can not match any source register
immFitsInIns = emitter::emitIns_valid_imm_for_ldst_offset(imm, size);
break;
case INS_ldrsb:
case INS_ldrsh:
case INS_ldrsw:
case INS_ldrb:
case INS_ldrh:
case INS_ldr:
immFitsInIns = emitter::emitIns_valid_imm_for_ldst_offset(imm, size);
break;
default:
assert(!"Unexpected instruction in genInstrWithConstant");
break;
}
if (immFitsInIns)
{
// generate a single instruction that encodes the immediate directly
GetEmitter()->emitIns_R_R_I(ins, attr, reg1, reg2, imm);
}
else
{
// caller can specify REG_NA for tmpReg, when it "knows" that the immediate will always fit
assert(tmpReg != REG_NA);
// generate two or more instructions
// first we load the immediate into tmpReg
instGen_Set_Reg_To_Imm(EA_PTRSIZE, tmpReg, imm);
regSet.verifyRegUsed(tmpReg);
// when we are in an unwind code region
// we record the extra instructions using unwindPadding()
if (inUnwindRegion)
{
compiler->unwindPadding();
}
// generate the instruction using a three register encoding with the immediate in tmpReg
GetEmitter()->emitIns_R_R_R(ins, attr, reg1, reg2, tmpReg);
}
return immFitsInIns;
}
//------------------------------------------------------------------------
// genStackPointerAdjustment: add a specified constant value to the stack pointer in either the prolog
// or the epilog. The unwind codes for the generated instructions are produced. An available temporary
// register is required to be specified, in case the constant is too large to encode in an "add"
// instruction (or "sub" instruction if we choose to use one), such that we need to load the constant
// into a register first, before using it.
//
// Arguments:
// spDelta - the value to add to SP (can be negative)
// tmpReg - an available temporary register
// pTmpRegIsZero - If we use tmpReg, and pTmpRegIsZero is non-null, we set *pTmpRegIsZero to 'false'.
// Otherwise, we don't touch it.
// reportUnwindData - If true, report the change in unwind data. Otherwise, do not report it.
//
// Return Value:
// None.
void CodeGen::genStackPointerAdjustment(ssize_t spDelta, regNumber tmpReg, bool* pTmpRegIsZero, bool reportUnwindData)
{
// Even though INS_add is specified here, the encoder will choose either
// an INS_add or an INS_sub and encode the immediate as a positive value
//
bool wasTempRegisterUsedForImm =
!genInstrWithConstant(INS_add, EA_PTRSIZE, REG_SPBASE, REG_SPBASE, spDelta, tmpReg, true);
if (wasTempRegisterUsedForImm)
{
if (pTmpRegIsZero != nullptr)
{
*pTmpRegIsZero = false;
}
}
if (reportUnwindData)
{
// spDelta is negative in the prolog, positive in the epilog, but we always tell the unwind codes the positive
// value.
ssize_t spDeltaAbs = abs(spDelta);
unsigned unwindSpDelta = (unsigned)spDeltaAbs;
assert((ssize_t)unwindSpDelta == spDeltaAbs); // make sure that it fits in a unsigned
compiler->unwindAllocStack(unwindSpDelta);
}
}
//------------------------------------------------------------------------
// genPrologSaveRegPair: Save a pair of general-purpose or floating-point/SIMD registers in a function or funclet
// prolog. If possible, we use pre-indexed addressing to adjust SP and store the registers with a single instruction.
// The caller must ensure that we can use the STP instruction, and that spOffset will be in the legal range for that
// instruction.
//
// Arguments:
// reg1 - First register of pair to save.
// reg2 - Second register of pair to save.
// spOffset - The offset from SP to store reg1 (must be positive or zero).
// spDelta - If non-zero, the amount to add to SP before the register saves (must be negative or
// zero).
// useSaveNextPair - True if the last prolog instruction was to save the previous register pair. This
// allows us to emit the "save_next" unwind code.
// tmpReg - An available temporary register. Needed for the case of large frames.
// pTmpRegIsZero - If we use tmpReg, and pTmpRegIsZero is non-null, we set *pTmpRegIsZero to 'false'.
// Otherwise, we don't touch it.
//
// Return Value:
// None.
void CodeGen::genPrologSaveRegPair(regNumber reg1,
regNumber reg2,
int spOffset,
int spDelta,
bool useSaveNextPair,
regNumber tmpReg,
bool* pTmpRegIsZero)
{
assert(spOffset >= 0);
assert(spDelta <= 0);
assert((spDelta % 16) == 0); // SP changes must be 16-byte aligned
assert(genIsValidFloatReg(reg1) == genIsValidFloatReg(reg2)); // registers must be both general-purpose, or both
// FP/SIMD
bool needToSaveRegs = true;
if (spDelta != 0)
{
assert(!useSaveNextPair);
if ((spOffset == 0) && (spDelta >= -512))
{
// We can use pre-indexed addressing.
// stp REG, REG + 1, [SP, #spDelta]!
// 64-bit STP offset range: -512 to 504, multiple of 8.
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, reg1, reg2, REG_SPBASE, spDelta, INS_OPTS_PRE_INDEX);
compiler->unwindSaveRegPairPreindexed(reg1, reg2, spDelta);
needToSaveRegs = false;
}
else // (spOffset != 0) || (spDelta < -512)
{
// We need to do SP adjustment separately from the store; we can't fold in a pre-indexed addressing and the
// non-zero offset.
// generate sub SP,SP,imm
genStackPointerAdjustment(spDelta, tmpReg, pTmpRegIsZero, /* reportUnwindData */ true);
}
}
if (needToSaveRegs)
{
// stp REG, REG + 1, [SP, #offset]
// 64-bit STP offset range: -512 to 504, multiple of 8.
assert(spOffset <= 504);
assert((spOffset % 8) == 0);
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, reg1, reg2, REG_SPBASE, spOffset);
if (TargetOS::IsUnix && compiler->generateCFIUnwindCodes())
{
useSaveNextPair = false;
}
if (useSaveNextPair)
{
// This works as long as we've only been saving pairs, in order, and we've saved the previous one just
// before this one.
compiler->unwindSaveNext();
}
else
{
compiler->unwindSaveRegPair(reg1, reg2, spOffset);
}
}
}
//------------------------------------------------------------------------
// genPrologSaveReg: Like genPrologSaveRegPair, but for a single register. Save a single general-purpose or
// floating-point/SIMD register in a function or funclet prolog. Note that if we wish to change SP (i.e., spDelta != 0),
// then spOffset must be 8. This is because otherwise we would create an alignment hole above the saved register, not
// below it, which we currently don't support. This restriction could be loosened if the callers change to handle it
// (and this function changes to support using pre-indexed STR addressing). The caller must ensure that we can use the
// STR instruction, and that spOffset will be in the legal range for that instruction.
//
// Arguments:
// reg1 - Register to save.
// spOffset - The offset from SP to store reg1 (must be positive or zero).
// spDelta - If non-zero, the amount to add to SP before the register saves (must be negative or
// zero).
// tmpReg - An available temporary register. Needed for the case of large frames.
// pTmpRegIsZero - If we use tmpReg, and pTmpRegIsZero is non-null, we set *pTmpRegIsZero to 'false'.
// Otherwise, we don't touch it.
//
// Return Value:
// None.
void CodeGen::genPrologSaveReg(regNumber reg1, int spOffset, int spDelta, regNumber tmpReg, bool* pTmpRegIsZero)
{
assert(spOffset >= 0);
assert(spDelta <= 0);
assert((spDelta % 16) == 0); // SP changes must be 16-byte aligned
bool needToSaveRegs = true;
if (spDelta != 0)
{
if ((spOffset == 0) && (spDelta >= -256))
{
// We can use pre-index addressing.
// str REG, [SP, #spDelta]!
GetEmitter()->emitIns_R_R_I(INS_str, EA_PTRSIZE, reg1, REG_SPBASE, spDelta, INS_OPTS_PRE_INDEX);
compiler->unwindSaveRegPreindexed(reg1, spDelta);
needToSaveRegs = false;
}
else // (spOffset != 0) || (spDelta < -256)
{
// generate sub SP,SP,imm
genStackPointerAdjustment(spDelta, tmpReg, pTmpRegIsZero, /* reportUnwindData */ true);
}
}
if (needToSaveRegs)
{
// str REG, [SP, #offset]
// 64-bit STR offset range: 0 to 32760, multiple of 8.
GetEmitter()->emitIns_R_R_I(INS_str, EA_PTRSIZE, reg1, REG_SPBASE, spOffset);
compiler->unwindSaveReg(reg1, spOffset);
}
}
//------------------------------------------------------------------------
// genEpilogRestoreRegPair: This is the opposite of genPrologSaveRegPair(), run in the epilog instead of the prolog.
// The stack pointer adjustment, if requested, is done after the register restore, using post-index addressing.
// The caller must ensure that we can use the LDP instruction, and that spOffset will be in the legal range for that
// instruction.
//
// Arguments:
// reg1 - First register of pair to restore.
// reg2 - Second register of pair to restore.
// spOffset - The offset from SP to load reg1 (must be positive or zero).
// spDelta - If non-zero, the amount to add to SP after the register restores (must be positive or
// zero).
// useSaveNextPair - True if the last prolog instruction was to save the previous register pair. This
// allows us to emit the "save_next" unwind code.
// tmpReg - An available temporary register. Needed for the case of large frames.
// pTmpRegIsZero - If we use tmpReg, and pTmpRegIsZero is non-null, we set *pTmpRegIsZero to 'false'.
// Otherwise, we don't touch it.
//
// Return Value:
// None.
void CodeGen::genEpilogRestoreRegPair(regNumber reg1,
regNumber reg2,
int spOffset,
int spDelta,
bool useSaveNextPair,
regNumber tmpReg,
bool* pTmpRegIsZero)
{
assert(spOffset >= 0);
assert(spDelta >= 0);
assert((spDelta % 16) == 0); // SP changes must be 16-byte aligned
assert(genIsValidFloatReg(reg1) == genIsValidFloatReg(reg2)); // registers must be both general-purpose, or both
// FP/SIMD
if (spDelta != 0)
{
assert(!useSaveNextPair);
if ((spOffset == 0) && (spDelta <= 504))
{
// Fold the SP change into this instruction.
// ldp reg1, reg2, [SP], #spDelta
GetEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, reg1, reg2, REG_SPBASE, spDelta, INS_OPTS_POST_INDEX);
compiler->unwindSaveRegPairPreindexed(reg1, reg2, -spDelta);
}
else // (spOffset != 0) || (spDelta > 504)
{
// Can't fold in the SP change; need to use a separate ADD instruction.
// ldp reg1, reg2, [SP, #offset]
GetEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, reg1, reg2, REG_SPBASE, spOffset);
compiler->unwindSaveRegPair(reg1, reg2, spOffset);
// generate add SP,SP,imm
genStackPointerAdjustment(spDelta, tmpReg, pTmpRegIsZero, /* reportUnwindData */ true);
}
}
else
{
GetEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, reg1, reg2, REG_SPBASE, spOffset);
if (TargetOS::IsUnix && compiler->generateCFIUnwindCodes())
{
useSaveNextPair = false;
}
if (useSaveNextPair)
{
compiler->unwindSaveNext();
}
else
{
compiler->unwindSaveRegPair(reg1, reg2, spOffset);
}
}
}
//------------------------------------------------------------------------
// genEpilogRestoreReg: The opposite of genPrologSaveReg(), run in the epilog instead of the prolog.
//
// Arguments:
// reg1 - Register to restore.
// spOffset - The offset from SP to restore reg1 (must be positive or zero).
// spDelta - If non-zero, the amount to add to SP after the register restores (must be positive or
// zero).
// tmpReg - An available temporary register. Needed for the case of large frames.
// pTmpRegIsZero - If we use tmpReg, and pTmpRegIsZero is non-null, we set *pTmpRegIsZero to 'false'.
// Otherwise, we don't touch it.
//
// Return Value:
// None.
void CodeGen::genEpilogRestoreReg(regNumber reg1, int spOffset, int spDelta, regNumber tmpReg, bool* pTmpRegIsZero)
{
assert(spOffset >= 0);
assert(spDelta >= 0);
assert((spDelta % 16) == 0); // SP changes must be 16-byte aligned
if (spDelta != 0)
{
if ((spOffset == 0) && (spDelta <= 255))
{
// We can use post-index addressing.
// ldr REG, [SP], #spDelta
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, reg1, REG_SPBASE, spDelta, INS_OPTS_POST_INDEX);
compiler->unwindSaveRegPreindexed(reg1, -spDelta);
}
else // (spOffset != 0) || (spDelta > 255)
{
// ldr reg1, [SP, #offset]
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, reg1, REG_SPBASE, spOffset);
compiler->unwindSaveReg(reg1, spOffset);
// generate add SP,SP,imm
genStackPointerAdjustment(spDelta, tmpReg, pTmpRegIsZero, /* reportUnwindData */ true);
}
}
else
{
// ldr reg1, [SP, #offset]
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, reg1, REG_SPBASE, spOffset);
compiler->unwindSaveReg(reg1, spOffset);
}
}
//------------------------------------------------------------------------
// genBuildRegPairsStack: Build a stack of register pairs for prolog/epilog save/restore for the given mask.
// The first register pair will contain the lowest register. Register pairs will combine neighbor
// registers in pairs. If it can't be done (for example if we have a hole or this is the last reg in a mask with
// odd number of regs) then the second element of that RegPair will be REG_NA.
//
// Arguments:
// regsMask - a mask of registers for prolog/epilog generation;
// regStack - a regStack instance to build the stack in, used to save temp copyings.
//
// Return value:
// no return value; the regStack argument is modified.
//
// static
void CodeGen::genBuildRegPairsStack(regMaskTP regsMask, ArrayStack<RegPair>* regStack)
{
assert(regStack != nullptr);
assert(regStack->Height() == 0);
unsigned regsCount = genCountBits(regsMask);
while (regsMask != RBM_NONE)
{
regMaskTP reg1Mask = genFindLowestBit(regsMask);
regNumber reg1 = genRegNumFromMask(reg1Mask);
regsMask &= ~reg1Mask;
regsCount -= 1;
bool isPairSave = false;
if (regsCount > 0)
{
regMaskTP reg2Mask = genFindLowestBit(regsMask);
regNumber reg2 = genRegNumFromMask(reg2Mask);
if (reg2 == REG_NEXT(reg1))
{
// The JIT doesn't allow saving pair (R28,FP), even though the
// save_regp register pair unwind code specification allows it.
// The JIT always saves (FP,LR) as a pair, and uses the save_fplr
// unwind code. This only comes up in stress mode scenarios
// where callee-saved registers are not allocated completely
// from lowest-to-highest, without gaps.
if (reg1 != REG_R28)
{
// Both registers must have the same type to be saved as pair.
if (genIsValidFloatReg(reg1) == genIsValidFloatReg(reg2))
{
isPairSave = true;
regsMask &= ~reg2Mask;
regsCount -= 1;
regStack->Push(RegPair(reg1, reg2));
}
}
}
}
if (!isPairSave)
{
regStack->Push(RegPair(reg1));
}
}
assert(regsCount == 0 && regsMask == RBM_NONE);
genSetUseSaveNextPairs(regStack);
}
//------------------------------------------------------------------------
// genSetUseSaveNextPairs: Set useSaveNextPair for each RegPair on the stack which unwind info can be encoded as
// save_next code.
//
// Arguments:
// regStack - a regStack instance to set useSaveNextPair.
//
// Notes:
// We can use save_next for RegPair(N, N+1) only when we have sequence like (N-2, N-1), (N, N+1).
// In this case in the prolog save_next for (N, N+1) refers to save_pair(N-2, N-1);
// in the epilog the unwinder will search for the first save_pair (N-2, N-1)
// and then go back to the first save_next (N, N+1) to restore it first.
//
// static
void CodeGen::genSetUseSaveNextPairs(ArrayStack<RegPair>* regStack)
{
for (int i = 1; i < regStack->Height(); ++i)
{
RegPair& curr = regStack->BottomRef(i);
RegPair prev = regStack->Bottom(i - 1);
if (prev.reg2 == REG_NA || curr.reg2 == REG_NA)
{
continue;
}
if (REG_NEXT(prev.reg2) != curr.reg1)
{
continue;
}
if (genIsValidFloatReg(prev.reg2) != genIsValidFloatReg(curr.reg1))
{
// It is possible to support changing of the last int pair with the first float pair,
// but it is very rare case and it would require superfluous changes in the unwinder.
continue;
}
curr.useSaveNextPair = true;
}
}
//------------------------------------------------------------------------
// genGetSlotSizeForRegsInMask: Get the stack slot size appropriate for the register type from the mask.
//
// Arguments:
// regsMask - a mask of registers for prolog/epilog generation.
//
// Return value:
// stack slot size in bytes.
//
// Note: Because int and float register type sizes match we can call this function with a mask that includes both.
//
// static
int CodeGen::genGetSlotSizeForRegsInMask(regMaskTP regsMask)
{
assert((regsMask & (RBM_CALLEE_SAVED | RBM_FP | RBM_LR)) == regsMask); // Do not expect anything else.
static_assert_no_msg(REGSIZE_BYTES == FPSAVE_REGSIZE_BYTES);
return REGSIZE_BYTES;
}
//------------------------------------------------------------------------
// genSaveCalleeSavedRegisterGroup: Saves the group of registers described by the mask.
//
// Arguments:
// regsMask - a mask of registers for prolog generation;
// spDelta - if non-zero, the amount to add to SP before the first register save (or together with it);
// spOffset - the offset from SP that is the beginning of the callee-saved register area;
//
void CodeGen::genSaveCalleeSavedRegisterGroup(regMaskTP regsMask, int spDelta, int spOffset)
{
const int slotSize = genGetSlotSizeForRegsInMask(regsMask);
ArrayStack<RegPair> regStack(compiler->getAllocator(CMK_Codegen));
genBuildRegPairsStack(regsMask, ®Stack);
for (int i = 0; i < regStack.Height(); ++i)
{
RegPair regPair = regStack.Bottom(i);
if (regPair.reg2 != REG_NA)
{
// We can use a STP instruction.
genPrologSaveRegPair(regPair.reg1, regPair.reg2, spOffset, spDelta, regPair.useSaveNextPair, REG_IP0,
nullptr);
spOffset += 2 * slotSize;
}
else
{
// No register pair; we use a STR instruction.
genPrologSaveReg(regPair.reg1, spOffset, spDelta, REG_IP0, nullptr);
spOffset += slotSize;
}
spDelta = 0; // We've now changed SP already, if necessary; don't do it again.
}
}
//------------------------------------------------------------------------
// genSaveCalleeSavedRegistersHelp: Save the callee-saved registers in 'regsToSaveMask' to the stack frame
// in the function or funclet prolog. Registers are saved in register number order from low addresses
// to high addresses. This means that integer registers are saved at lower addresses than floatint-point/SIMD
// registers. However, when genSaveFpLrWithAllCalleeSavedRegisters is true, the integer registers are stored
// at higher addresses than floating-point/SIMD registers, that is, the relative order of these two classes
// is reveresed. This is done to put the saved frame pointer very high in the frame, for simplicity.
//
// TODO: We could always put integer registers at the higher addresses, if desired, to remove this special
// case. It would cause many asm diffs when first implemented.
//
// If establishing frame pointer chaining, it must be done after saving the callee-saved registers.
//
// We can only use the instructions that are allowed by the unwind codes. The caller ensures that
// there is enough space on the frame to store these registers, and that the store instructions
// we need to use (STR or STP) are encodable with the stack-pointer immediate offsets we need to use.
//
// The caller can tell us to fold in a stack pointer adjustment, which we will do with the first instruction.
// Note that the stack pointer adjustment must be by a multiple of 16 to preserve the invariant that the
// stack pointer is always 16 byte aligned. If we are saving an odd number of callee-saved
// registers, though, we will have an empty alignment slot somewhere. It turns out we will put
// it below (at a lower address) the callee-saved registers, as that is currently how we
// do frame layout. This means that the first stack offset will be 8 and the stack pointer
// adjustment must be done by a SUB, and not folded in to a pre-indexed store.
//
// Arguments:
// regsToSaveMask - The mask of callee-saved registers to save. If empty, this function does nothing.
// lowestCalleeSavedOffset - The offset from SP that is the beginning of the callee-saved register area. Note that
// if non-zero spDelta, then this is the offset of the first save *after* that
// SP adjustment.
// spDelta - If non-zero, the amount to add to SP before the register saves (must be negative or
// zero).
//
// Notes:
// The save set can contain LR in which case LR is saved along with the other callee-saved registers.
// But currently Jit doesn't use frames without frame pointer on arm64.
//
void CodeGen::genSaveCalleeSavedRegistersHelp(regMaskTP regsToSaveMask, int lowestCalleeSavedOffset, int spDelta)
{
assert(spDelta <= 0);
assert(-spDelta <= STACK_PROBE_BOUNDARY_THRESHOLD_BYTES);
unsigned regsToSaveCount = genCountBits(regsToSaveMask);
if (regsToSaveCount == 0)
{
if (spDelta != 0)
{
// Currently this is the case for varargs only
// whose size is MAX_REG_ARG * REGSIZE_BYTES = 64 bytes.
genStackPointerAdjustment(spDelta, REG_NA, nullptr, /* reportUnwindData */ true);
}
return;
}
assert((spDelta % 16) == 0);
// We also can save FP and LR, even though they are not in RBM_CALLEE_SAVED.
assert(regsToSaveCount <= genCountBits(RBM_CALLEE_SAVED | RBM_FP | RBM_LR));
// Save integer registers at higher addresses than floating-point registers.
regMaskTP maskSaveRegsFloat = regsToSaveMask & RBM_ALLFLOAT;
regMaskTP maskSaveRegsInt = regsToSaveMask & ~maskSaveRegsFloat;
if (maskSaveRegsFloat != RBM_NONE)
{
genSaveCalleeSavedRegisterGroup(maskSaveRegsFloat, spDelta, lowestCalleeSavedOffset);
spDelta = 0;
lowestCalleeSavedOffset += genCountBits(maskSaveRegsFloat) * FPSAVE_REGSIZE_BYTES;
}
if (maskSaveRegsInt != RBM_NONE)
{
genSaveCalleeSavedRegisterGroup(maskSaveRegsInt, spDelta, lowestCalleeSavedOffset);
// No need to update spDelta, lowestCalleeSavedOffset since they're not used after this.
}
}
//------------------------------------------------------------------------
// genRestoreCalleeSavedRegisterGroup: Restores the group of registers described by the mask.
//
// Arguments:
// regsMask - a mask of registers for epilog generation;
// spDelta - if non-zero, the amount to add to SP after the last register restore (or together with it);
// spOffset - the offset from SP that is the beginning of the callee-saved register area;
//
void CodeGen::genRestoreCalleeSavedRegisterGroup(regMaskTP regsMask, int spDelta, int spOffset)
{
const int slotSize = genGetSlotSizeForRegsInMask(regsMask);
ArrayStack<RegPair> regStack(compiler->getAllocator(CMK_Codegen));
genBuildRegPairsStack(regsMask, ®Stack);
int stackDelta = 0;
for (int i = 0; i < regStack.Height(); ++i)
{
bool lastRestoreInTheGroup = (i == regStack.Height() - 1);
bool updateStackDelta = lastRestoreInTheGroup && (spDelta != 0);
if (updateStackDelta)
{
// Update stack delta only if it is the last restore (the first save).
assert(stackDelta == 0);
stackDelta = spDelta;
}
RegPair regPair = regStack.Top(i);
if (regPair.reg2 != REG_NA)
{
spOffset -= 2 * slotSize;
genEpilogRestoreRegPair(regPair.reg1, regPair.reg2, spOffset, stackDelta, regPair.useSaveNextPair, REG_IP1,
nullptr);
}
else
{
spOffset -= slotSize;
genEpilogRestoreReg(regPair.reg1, spOffset, stackDelta, REG_IP1, nullptr);
}
}
}
//------------------------------------------------------------------------
// genRestoreCalleeSavedRegistersHelp: Restore the callee-saved registers in 'regsToRestoreMask' from the stack frame
// in the function or funclet epilog. This exactly reverses the actions of genSaveCalleeSavedRegistersHelp().
//
// Arguments:
// regsToRestoreMask - The mask of callee-saved registers to restore. If empty, this function does nothing.
// lowestCalleeSavedOffset - The offset from SP that is the beginning of the callee-saved register area.
// spDelta - If non-zero, the amount to add to SP after the register restores (must be positive or
// zero).
//
// Here's an example restore sequence:
// ldp x27, x28, [sp,#96]
// ldp x25, x26, [sp,#80]
// ldp x23, x24, [sp,#64]
// ldp x21, x22, [sp,#48]
// ldp x19, x20, [sp,#32]
//
// For the case of non-zero spDelta, we assume the base of the callee-save registers to restore is at SP, and
// the last restore adjusts SP by the specified amount. For example:
// ldp x27, x28, [sp,#64]
// ldp x25, x26, [sp,#48]
// ldp x23, x24, [sp,#32]
// ldp x21, x22, [sp,#16]
// ldp x19, x20, [sp], #80
//
// Note you call the unwind functions specifying the prolog operation that is being un-done. So, for example, when
// generating a post-indexed load, you call the unwind function for specifying the corresponding preindexed store.
//
// Return Value:
// None.
void CodeGen::genRestoreCalleeSavedRegistersHelp(regMaskTP regsToRestoreMask, int lowestCalleeSavedOffset, int spDelta)
{
assert(spDelta >= 0);
unsigned regsToRestoreCount = genCountBits(regsToRestoreMask);
if (regsToRestoreCount == 0)
{
if (spDelta != 0)
{
// Currently this is the case for varargs only
// whose size is MAX_REG_ARG * REGSIZE_BYTES = 64 bytes.
genStackPointerAdjustment(spDelta, REG_NA, nullptr, /* reportUnwindData */ true);
}
return;
}
assert((spDelta % 16) == 0);
// We also can restore FP and LR, even though they are not in RBM_CALLEE_SAVED.
assert(regsToRestoreCount <= genCountBits(RBM_CALLEE_SAVED | RBM_FP | RBM_LR));
// Point past the end, to start. We predecrement to find the offset to load from.
static_assert_no_msg(REGSIZE_BYTES == FPSAVE_REGSIZE_BYTES);
int spOffset = lowestCalleeSavedOffset + regsToRestoreCount * REGSIZE_BYTES;
// Save integer registers at higher addresses than floating-point registers.
regMaskTP maskRestoreRegsFloat = regsToRestoreMask & RBM_ALLFLOAT;
regMaskTP maskRestoreRegsInt = regsToRestoreMask & ~maskRestoreRegsFloat;
// Restore in the opposite order of saving.
if (maskRestoreRegsInt != RBM_NONE)
{
int spIntDelta = (maskRestoreRegsFloat != RBM_NONE) ? 0 : spDelta; // should we delay the SP adjustment?
genRestoreCalleeSavedRegisterGroup(maskRestoreRegsInt, spIntDelta, spOffset);
spOffset -= genCountBits(maskRestoreRegsInt) * REGSIZE_BYTES;
}
if (maskRestoreRegsFloat != RBM_NONE)
{
// If there is any spDelta, it must be used here.
genRestoreCalleeSavedRegisterGroup(maskRestoreRegsFloat, spDelta, spOffset);
// No need to update spOffset since it's not used after this.
}
}
// clang-format off
/*****************************************************************************
*
* Generates code for an EH funclet prolog.
*
* Funclets have the following incoming arguments:
*
* catch: x0 = the exception object that was caught (see GT_CATCH_ARG)
* filter: x0 = the exception object to filter (see GT_CATCH_ARG), x1 = CallerSP of the containing function
* finally/fault: none
*
* Funclets set the following registers on exit:
*
* catch: x0 = the address at which execution should resume (see BBJ_EHCATCHRET)
* filter: x0 = non-zero if the handler should handle the exception, zero otherwise (see GT_RETFILT)
* finally/fault: none
*
* The ARM64 funclet prolog sequence is one of the following (Note: #framesz is total funclet frame size,
* including everything; #outsz is outgoing argument space. #framesz must be a multiple of 16):
*
* Frame type 1:
* For #outsz == 0 and #framesz <= 512:
* stp fp,lr,[sp,-#framesz]! ; establish the frame (predecrement by #framesz), save FP/LR
* stp x19,x20,[sp,#xxx] ; save callee-saved registers, as necessary
*
* The funclet frame is thus:
*
* | |
* |-----------------------|
* | incoming arguments |
* +=======================+ <---- Caller's SP
* | Varargs regs space | // Only for varargs main functions; 64 bytes
* |-----------------------|
* |Callee saved registers | // multiple of 8 bytes
* |-----------------------|
* | PSP slot | // 8 bytes (omitted in CoreRT ABI)
* |-----------------------|
* ~ alignment padding ~ // To make the whole frame 16 byte aligned.
* |-----------------------|
* | Saved FP, LR | // 16 bytes
* |-----------------------| <---- Ambient SP
* | | |
* ~ | Stack grows ~
* | | downward |
* V
*
* Frame type 2:
* For #outsz != 0 and #framesz <= 512:
* sub sp,sp,#framesz ; establish the frame
* stp fp,lr,[sp,#outsz] ; save FP/LR.
* stp x19,x20,[sp,#xxx] ; save callee-saved registers, as necessary
*
* The funclet frame is thus:
*
* | |
* |-----------------------|
* | incoming arguments |
* +=======================+ <---- Caller's SP
* | Varargs regs space | // Only for varargs main functions; 64 bytes
* |-----------------------|
* |Callee saved registers | // multiple of 8 bytes
* |-----------------------|
* | PSP slot | // 8 bytes (omitted in CoreRT ABI)
* |-----------------------|
* ~ alignment padding ~ // To make the whole frame 16 byte aligned.
* |-----------------------|
* | Saved FP, LR | // 16 bytes
* |-----------------------|
* | Outgoing arg space | // multiple of 8 bytes
* |-----------------------| <---- Ambient SP
* | | |
* ~ | Stack grows ~
* | | downward |
* V
*
* Frame type 3:
* For #framesz > 512:
* stp fp,lr,[sp,- (#framesz - #outsz)]! ; establish the frame, save FP/LR
* ; note that it is guaranteed here that (#framesz - #outsz) <= 240
* stp x19,x20,[sp,#xxx] ; save callee-saved registers, as necessary
* sub sp,sp,#outsz ; create space for outgoing argument space
*
* The funclet frame is thus:
*
* | |
* |-----------------------|
* | incoming arguments |
* +=======================+ <---- Caller's SP
* | Varargs regs space | // Only for varargs main functions; 64 bytes
* |-----------------------|
* |Callee saved registers | // multiple of 8 bytes
* |-----------------------|
* | PSP slot | // 8 bytes (omitted in CoreRT ABI)
* |-----------------------|
* ~ alignment padding ~ // To make the first SP subtraction 16 byte aligned
* |-----------------------|
* | Saved FP, LR | // 16 bytes
* |-----------------------|
* ~ alignment padding ~ // To make the whole frame 16 byte aligned (specifically, to 16-byte align the outgoing argument space).
* |-----------------------|
* | Outgoing arg space | // multiple of 8 bytes
* |-----------------------| <---- Ambient SP
* | | |
* ~ | Stack grows ~
* | | downward |
* V
*
* Both #1 and #2 only change SP once. That means that there will be a maximum of one alignment slot needed. For the general case, #3,
* it is possible that we will need to add alignment to both changes to SP, leading to 16 bytes of alignment. Remember that the stack
* pointer needs to be 16 byte aligned at all times. The size of the PSP slot plus callee-saved registers space is a maximum of 240 bytes:
*
* FP,LR registers
* 10 int callee-saved register x19-x28
* 8 float callee-saved registers v8-v15
* 8 saved integer argument registers x0-x7, if varargs function
* 1 PSP slot
* 1 alignment slot
* == 30 slots * 8 bytes = 240 bytes.
*
* The outgoing argument size, however, can be very large, if we call a function that takes a large number of
* arguments (note that we currently use the same outgoing argument space size in the funclet as for the main
* function, even if the funclet doesn't have any calls, or has a much smaller, or larger, maximum number of
* outgoing arguments for any call). In that case, we need to 16-byte align the initial change to SP, before
* saving off the callee-saved registers and establishing the PSPsym, so we can use the limited immediate offset
* encodings we have available, before doing another 16-byte aligned SP adjustment to create the outgoing argument
* space. Both changes to SP might need to add alignment padding.
*
* In addition to the above "standard" frames, we also need to support a frame where the saved FP/LR are at the
* highest addresses. This is to match the frame layout (specifically, callee-saved registers including FP/LR
* and the PSPSym) that is used in the main function when a GS cookie is required due to the use of localloc.
* (Note that localloc cannot be used in a funclet.) In these variants, not only has the position of FP/LR
* changed, but where the alignment padding is placed has also changed.
*
* Frame type 4 (variant of frame types 1 and 2):
* For #framesz <= 512:
* sub sp,sp,#framesz ; establish the frame
* stp x19,x20,[sp,#xxx] ; save callee-saved registers, as necessary
* stp fp,lr,[sp,#yyy] ; save FP/LR.
* ; write PSPSym
*
* The "#framesz <= 512" condition ensures that after we've established the frame, we can use "stp" with its
* maximum allowed offset (504) to save the callee-saved register at the highest address.
*
* We use "sub" instead of folding it into the next instruction as a predecrement, as we need to write PSPSym
* at the bottom of the stack, and there might also be an alignment padding slot.
*
* The funclet frame is thus:
*
* | |
* |-----------------------|
* | incoming arguments |
* +=======================+ <---- Caller's SP
* | Varargs regs space | // Only for varargs main functions; 64 bytes
* |-----------------------|
* | Saved LR | // 8 bytes
* |-----------------------|
* | Saved FP | // 8 bytes
* |-----------------------|
* |Callee saved registers | // multiple of 8 bytes
* |-----------------------|
* | PSP slot | // 8 bytes (omitted in CoreRT ABI)
* |-----------------------|
* ~ alignment padding ~ // To make the whole frame 16 byte aligned.
* |-----------------------|
* | Outgoing arg space | // multiple of 8 bytes (optional; if #outsz > 0)
* |-----------------------| <---- Ambient SP
* | | |
* ~ | Stack grows ~
* | | downward |
* V
*
* Frame type 5 (variant of frame type 3):
* For #framesz > 512:
* sub sp,sp,(#framesz - #outsz) ; establish part of the frame. Note that it is guaranteed here that (#framesz - #outsz) <= 240
* stp x19,x20,[sp,#xxx] ; save callee-saved registers, as necessary
* stp fp,lr,[sp,#yyy] ; save FP/LR.
* sub sp,sp,#outsz ; create space for outgoing argument space
* ; write PSPSym
*
* For large frames with "#framesz > 512", we must do one SP adjustment first, after which we can save callee-saved
* registers with up to the maximum "stp" offset of 504. Then, we can establish the rest of the frame (namely, the
* space for the outgoing argument space).
*
* The funclet frame is thus:
*
* | |
* |-----------------------|
* | incoming arguments |
* +=======================+ <---- Caller's SP
* | Varargs regs space | // Only for varargs main functions; 64 bytes
* |-----------------------|
* | Saved LR | // 8 bytes
* |-----------------------|
* | Saved FP | // 8 bytes
* |-----------------------|
* |Callee saved registers | // multiple of 8 bytes
* |-----------------------|
* | PSP slot | // 8 bytes (omitted in CoreRT ABI)
* |-----------------------|
* ~ alignment padding ~ // To make the first SP subtraction 16 byte aligned
* |-----------------------|
* ~ alignment padding ~ // To make the whole frame 16 byte aligned (specifically, to 16-byte align the outgoing argument space).
* |-----------------------|
* | Outgoing arg space | // multiple of 8 bytes
* |-----------------------| <---- Ambient SP
* | | |
* ~ | Stack grows ~
* | | downward |
* V
*
* Note that in this case we might have 16 bytes of alignment that is adjacent. This is because we are doing 2 SP
* subtractions, and each one must be aligned up to 16 bytes.
*
* Note that in all cases, the PSPSym is in exactly the same position with respect to Caller-SP, and that location is the same relative to Caller-SP
* as in the main function.
*
* Funclets do not have varargs arguments. However, because the PSPSym must exist at the same offset from Caller-SP as in the main function, we
* must add buffer space for the saved varargs argument registers here, if the main function did the same.
*
* ; After this header, fill the PSP slot, for use by the VM (it gets reported with the GC info), or by code generation of nested filters.
* ; This is not part of the "OS prolog"; it has no associated unwind data, and is not reversed in the funclet epilog.
*
* if (this is a filter funclet)
* {
* // x1 on entry to a filter funclet is CallerSP of the containing function:
* // either the main function, or the funclet for a handler that this filter is dynamically nested within.
* // Note that a filter can be dynamically nested within a funclet even if it is not statically within
* // a funclet. Consider:
* //
* // try {
* // try {
* // throw new Exception();
* // } catch(Exception) {
* // throw new Exception(); // The exception thrown here ...
* // }
* // } filter { // ... will be processed here, while the "catch" funclet frame is still on the stack
* // } filter-handler {
* // }
* //
* // Because of this, we need a PSP in the main function anytime a filter funclet doesn't know whether the enclosing frame will
* // be a funclet or main function. We won't know any time there is a filter protecting nested EH. To simplify, we just always
* // create a main function PSP for any function with a filter.
*
* ldr x1, [x1, #CallerSP_to_PSP_slot_delta] ; Load the CallerSP of the main function (stored in the PSP of the dynamically containing funclet or function)
* str x1, [sp, #SP_to_PSP_slot_delta] ; store the PSP
* add fp, x1, #Function_CallerSP_to_FP_delta ; re-establish the frame pointer
* }
* else
* {
* // This is NOT a filter funclet. The VM re-establishes the frame pointer on entry.
* // TODO-ARM64-CQ: if VM set x1 to CallerSP on entry, like for filters, we could save an instruction.
*
* add x3, fp, #Function_FP_to_CallerSP_delta ; compute the CallerSP, given the frame pointer. x3 is scratch.
* str x3, [sp, #SP_to_PSP_slot_delta] ; store the PSP
* }
*
* An example epilog sequence is then:
*
* add sp,sp,#outsz ; if any outgoing argument space
* ... ; restore callee-saved registers
* ldp x19,x20,[sp,#xxx]
* ldp fp,lr,[sp],#framesz
* ret lr
*
*/
// clang-format on
void CodeGen::genFuncletProlog(BasicBlock* block)
{
#ifdef DEBUG
if (verbose)
printf("*************** In genFuncletProlog()\n");
#endif
assert(block != NULL);
assert(block->bbFlags & BBF_FUNCLET_BEG);
ScopedSetVariable<bool> _setGeneratingProlog(&compiler->compGeneratingProlog, true);
gcInfo.gcResetForBB();
compiler->unwindBegProlog();
regMaskTP maskSaveRegsFloat = genFuncletInfo.fiSaveRegs & RBM_ALLFLOAT;
regMaskTP maskSaveRegsInt = genFuncletInfo.fiSaveRegs & ~maskSaveRegsFloat;
// Funclets must always save LR and FP, since when we have funclets we must have an FP frame.
assert((maskSaveRegsInt & RBM_LR) != 0);
assert((maskSaveRegsInt & RBM_FP) != 0);
bool isFilter = (block->bbCatchTyp == BBCT_FILTER);
regMaskTP maskArgRegsLiveIn;
if (isFilter)
{
maskArgRegsLiveIn = RBM_R0 | RBM_R1;
}
else if ((block->bbCatchTyp == BBCT_FINALLY) || (block->bbCatchTyp == BBCT_FAULT))
{
maskArgRegsLiveIn = RBM_NONE;
}
else
{
maskArgRegsLiveIn = RBM_R0;
}
if (genFuncletInfo.fiFrameType == 1)
{
// With OSR we may see large values for fiSpDelta1
// (we really need to probe the frame, sigh)
if (compiler->opts.IsOSR())
{
genStackPointerAdjustment(genFuncletInfo.fiSpDelta1, REG_SCRATCH, nullptr, /* reportUnwindData */ true);
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, 0);
compiler->unwindSaveRegPair(REG_FP, REG_LR, 0);
}
else
{
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, genFuncletInfo.fiSpDelta1,
INS_OPTS_PRE_INDEX);
compiler->unwindSaveRegPairPreindexed(REG_FP, REG_LR, genFuncletInfo.fiSpDelta1);
}
maskSaveRegsInt &= ~(RBM_LR | RBM_FP); // We've saved these now
assert(genFuncletInfo.fiSpDelta2 == 0);
assert(genFuncletInfo.fiSP_to_FPLR_save_delta == 0);
}
else if (genFuncletInfo.fiFrameType == 2)
{
// fiFrameType==2 constraints:
assert(genFuncletInfo.fiSpDelta1 < 0);
assert(genFuncletInfo.fiSpDelta1 >= -512);
// generate sub SP,SP,imm
genStackPointerAdjustment(genFuncletInfo.fiSpDelta1, REG_NA, nullptr, /* reportUnwindData */ true);
assert(genFuncletInfo.fiSpDelta2 == 0);
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE,
genFuncletInfo.fiSP_to_FPLR_save_delta);
compiler->unwindSaveRegPair(REG_FP, REG_LR, genFuncletInfo.fiSP_to_FPLR_save_delta);
maskSaveRegsInt &= ~(RBM_LR | RBM_FP); // We've saved these now
}
else if (genFuncletInfo.fiFrameType == 3)
{
// With OSR we may see large values for fiSpDelta1
// (we really need to probe the frame, sigh)
if (compiler->opts.IsOSR())
{
genStackPointerAdjustment(genFuncletInfo.fiSpDelta1, REG_SCRATCH, nullptr, /* reportUnwindData */ true);
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, 0);
compiler->unwindSaveRegPair(REG_FP, REG_LR, 0);
}
else
{
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, genFuncletInfo.fiSpDelta1,
INS_OPTS_PRE_INDEX);
compiler->unwindSaveRegPairPreindexed(REG_FP, REG_LR, genFuncletInfo.fiSpDelta1);
}
maskSaveRegsInt &= ~(RBM_LR | RBM_FP); // We've saved these now
}
else if (genFuncletInfo.fiFrameType == 4)
{
// fiFrameType==4 constraints:
assert(genFuncletInfo.fiSpDelta1 < 0);
assert(genFuncletInfo.fiSpDelta1 >= -512);
// generate sub SP,SP,imm
genStackPointerAdjustment(genFuncletInfo.fiSpDelta1, REG_NA, nullptr, /* reportUnwindData */ true);
assert(genFuncletInfo.fiSpDelta2 == 0);
}
else
{
assert(genFuncletInfo.fiFrameType == 5);
if (compiler->opts.IsOSR())
{
genStackPointerAdjustment(genFuncletInfo.fiSpDelta1, REG_SCRATCH, nullptr, /* reportUnwindData */ true);
}
else
{
// Nothing to do here; the first SP adjustment will be done by saving the callee-saved registers.
}
}
int lowestCalleeSavedOffset = genFuncletInfo.fiSP_to_CalleeSave_delta +
genFuncletInfo.fiSpDelta2; // We haven't done the second adjustment of SP yet (if any)
genSaveCalleeSavedRegistersHelp(maskSaveRegsInt | maskSaveRegsFloat, lowestCalleeSavedOffset, 0);
if ((genFuncletInfo.fiFrameType == 3) || (genFuncletInfo.fiFrameType == 5))
{
// Note that genFuncletInfo.fiSpDelta2 is always a non-positive value
assert(genFuncletInfo.fiSpDelta2 <= 0);
// generate sub SP,SP,imm
if (genFuncletInfo.fiSpDelta2 < 0)
{
genStackPointerAdjustment(genFuncletInfo.fiSpDelta2, REG_R2, nullptr, /* reportUnwindData */ true);
}
else
{
// we will only see fiSpDelta2 == 0 for osr funclets
assert(compiler->opts.IsOSR());
}
}
// This is the end of the OS-reported prolog for purposes of unwinding
compiler->unwindEndProlog();
// If there is no PSPSym (CoreRT ABI), we are done. Otherwise, we need to set up the PSPSym in the funclet frame.
if (compiler->lvaPSPSym != BAD_VAR_NUM)
{
if (isFilter)
{
// This is the first block of a filter
// Note that register x1 = CallerSP of the containing function
// X1 is overwritten by the first Load (new callerSP)
// X2 is scratch when we have a large constant offset
// Load the CallerSP of the main function (stored in the PSP of the dynamically containing funclet or
// function)
genInstrWithConstant(INS_ldr, EA_PTRSIZE, REG_R1, REG_R1, genFuncletInfo.fiCallerSP_to_PSP_slot_delta,
REG_R2, false);
regSet.verifyRegUsed(REG_R1);
// Store the PSP value (aka CallerSP)
genInstrWithConstant(INS_str, EA_PTRSIZE, REG_R1, REG_SPBASE, genFuncletInfo.fiSP_to_PSP_slot_delta, REG_R2,
false);
// re-establish the frame pointer
genInstrWithConstant(INS_add, EA_PTRSIZE, REG_FPBASE, REG_R1,
genFuncletInfo.fiFunction_CallerSP_to_FP_delta, REG_R2, false);
}
else // This is a non-filter funclet
{
// X3 is scratch, X2 can also become scratch
// compute the CallerSP, given the frame pointer. x3 is scratch.
genInstrWithConstant(INS_add, EA_PTRSIZE, REG_R3, REG_FPBASE,
-genFuncletInfo.fiFunction_CallerSP_to_FP_delta, REG_R2, false);
regSet.verifyRegUsed(REG_R3);
genInstrWithConstant(INS_str, EA_PTRSIZE, REG_R3, REG_SPBASE, genFuncletInfo.fiSP_to_PSP_slot_delta, REG_R2,
false);
}
}
}
/*****************************************************************************
*
* Generates code for an EH funclet epilog.
*/
void CodeGen::genFuncletEpilog()
{
#ifdef DEBUG
if (verbose)
printf("*************** In genFuncletEpilog()\n");
#endif
ScopedSetVariable<bool> _setGeneratingEpilog(&compiler->compGeneratingEpilog, true);
bool unwindStarted = false;
if (!unwindStarted)
{
// We can delay this until we know we'll generate an unwindable instruction, if necessary.
compiler->unwindBegEpilog();
unwindStarted = true;
}
regMaskTP maskRestoreRegsFloat = genFuncletInfo.fiSaveRegs & RBM_ALLFLOAT;
regMaskTP maskRestoreRegsInt = genFuncletInfo.fiSaveRegs & ~maskRestoreRegsFloat;
// Funclets must always save LR and FP, since when we have funclets we must have an FP frame.
assert((maskRestoreRegsInt & RBM_LR) != 0);
assert((maskRestoreRegsInt & RBM_FP) != 0);
if ((genFuncletInfo.fiFrameType == 3) || (genFuncletInfo.fiFrameType == 5))
{
// Note that genFuncletInfo.fiSpDelta2 is always a non-positive value
assert(genFuncletInfo.fiSpDelta2 <= 0);
// generate add SP,SP,imm
if (genFuncletInfo.fiSpDelta2 < 0)
{
genStackPointerAdjustment(-genFuncletInfo.fiSpDelta2, REG_R2, nullptr, /* reportUnwindData */ true);
}
else
{
// we should only zee zero SpDelta2 with osr.
assert(compiler->opts.IsOSR());
}
}
regMaskTP regsToRestoreMask = maskRestoreRegsInt | maskRestoreRegsFloat;
if ((genFuncletInfo.fiFrameType == 1) || (genFuncletInfo.fiFrameType == 2) || (genFuncletInfo.fiFrameType == 3))
{
regsToRestoreMask &= ~(RBM_LR | RBM_FP); // We restore FP/LR at the end
}
int lowestCalleeSavedOffset = genFuncletInfo.fiSP_to_CalleeSave_delta + genFuncletInfo.fiSpDelta2;
genRestoreCalleeSavedRegistersHelp(regsToRestoreMask, lowestCalleeSavedOffset, 0);
if (genFuncletInfo.fiFrameType == 1)
{
// With OSR we may see large values for fiSpDelta1
//
if (compiler->opts.IsOSR())
{
GetEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, 0);
compiler->unwindSaveRegPair(REG_FP, REG_LR, 0);
genStackPointerAdjustment(-genFuncletInfo.fiSpDelta1, REG_SCRATCH, nullptr, /* reportUnwindData */ true);
}
else
{
GetEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, -genFuncletInfo.fiSpDelta1,
INS_OPTS_POST_INDEX);
compiler->unwindSaveRegPairPreindexed(REG_FP, REG_LR, genFuncletInfo.fiSpDelta1);
}
assert(genFuncletInfo.fiSpDelta2 == 0);
assert(genFuncletInfo.fiSP_to_FPLR_save_delta == 0);
}
else if (genFuncletInfo.fiFrameType == 2)
{
GetEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE,
genFuncletInfo.fiSP_to_FPLR_save_delta);
compiler->unwindSaveRegPair(REG_FP, REG_LR, genFuncletInfo.fiSP_to_FPLR_save_delta);
// fiFrameType==2 constraints:
assert(genFuncletInfo.fiSpDelta1 < 0);
assert(genFuncletInfo.fiSpDelta1 >= -512);
// generate add SP,SP,imm
genStackPointerAdjustment(-genFuncletInfo.fiSpDelta1, REG_NA, nullptr, /* reportUnwindData */ true);
assert(genFuncletInfo.fiSpDelta2 == 0);
}
else if (genFuncletInfo.fiFrameType == 3)
{
// With OSR we may see large values for fiSpDelta1
//
if (compiler->opts.IsOSR())
{
GetEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, 0);
compiler->unwindSaveRegPair(REG_FP, REG_LR, 0);
genStackPointerAdjustment(-genFuncletInfo.fiSpDelta1, REG_SCRATCH, nullptr, /* reportUnwindData */ true);
}
else
{
GetEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, -genFuncletInfo.fiSpDelta1,
INS_OPTS_POST_INDEX);
compiler->unwindSaveRegPairPreindexed(REG_FP, REG_LR, genFuncletInfo.fiSpDelta1);
}
}
else if (genFuncletInfo.fiFrameType == 4)
{
// fiFrameType==4 constraints:
assert(genFuncletInfo.fiSpDelta1 < 0);
assert(genFuncletInfo.fiSpDelta1 >= -512);
// generate add SP,SP,imm
genStackPointerAdjustment(-genFuncletInfo.fiSpDelta1, REG_NA, nullptr, /* reportUnwindData */ true);
assert(genFuncletInfo.fiSpDelta2 == 0);
}
else
{
assert(genFuncletInfo.fiFrameType == 5);
// Same work as fiFrameType==4, but different asserts.
assert(genFuncletInfo.fiSpDelta1 < 0);
// With OSR we may see large values for fiSpDelta1 as the funclet
// frame currently must pad with the Tier0 frame size.
//
if (compiler->opts.IsOSR())
{
genStackPointerAdjustment(-genFuncletInfo.fiSpDelta1, REG_SCRATCH, nullptr, /* reportUnwindData */ true);
}
else
{
// generate add SP,SP,imm
assert(genFuncletInfo.fiSpDelta1 >= -240);
genStackPointerAdjustment(-genFuncletInfo.fiSpDelta1, REG_NA, nullptr, /* reportUnwindData */ true);
}
}
inst_RV(INS_ret, REG_LR, TYP_I_IMPL);
compiler->unwindReturn(REG_LR);
compiler->unwindEndEpilog();
}
/*****************************************************************************
*
* Capture the information used to generate the funclet prologs and epilogs.
* Note that all funclet prologs are identical, and all funclet epilogs are
* identical (per type: filters are identical, and non-filters are identical).
* Thus, we compute the data used for these just once.
*
* See genFuncletProlog() for more information about the prolog/epilog sequences.
*/
void CodeGen::genCaptureFuncletPrologEpilogInfo()
{
if (!compiler->ehAnyFunclets())
return;
assert(isFramePointerUsed());
// The frame size and offsets must be finalized
assert(compiler->lvaDoneFrameLayout == Compiler::FINAL_FRAME_LAYOUT);
unsigned const PSPSize = (compiler->lvaPSPSym != BAD_VAR_NUM) ? REGSIZE_BYTES : 0;
// Because a method and funclets must have the same caller-relative PSPSym offset,
// if there is a PSPSym, we have to pad the funclet frame size for OSR.
//
unsigned osrPad = 0;
if (compiler->opts.IsOSR() && (PSPSize > 0))
{
osrPad = compiler->info.compPatchpointInfo->TotalFrameSize();
}
genFuncletInfo.fiFunction_CallerSP_to_FP_delta = genCallerSPtoFPdelta() - osrPad;
regMaskTP rsMaskSaveRegs = regSet.rsMaskCalleeSaved;
assert((rsMaskSaveRegs & RBM_LR) != 0);
assert((rsMaskSaveRegs & RBM_FP) != 0);
unsigned saveRegsCount = genCountBits(rsMaskSaveRegs);
unsigned saveRegsPlusPSPSize = saveRegsCount * REGSIZE_BYTES + PSPSize;
if (compiler->info.compIsVarArgs)
{
// For varargs we always save all of the integer register arguments
// so that they are contiguous with the incoming stack arguments.
saveRegsPlusPSPSize += MAX_REG_ARG * REGSIZE_BYTES;
}
unsigned const saveRegsPlusPSPSizeAligned = roundUp(saveRegsPlusPSPSize, STACK_ALIGN);
assert(compiler->lvaOutgoingArgSpaceSize % REGSIZE_BYTES == 0);
unsigned const outgoingArgSpaceAligned = roundUp(compiler->lvaOutgoingArgSpaceSize, STACK_ALIGN);
unsigned const maxFuncletFrameSizeAligned = saveRegsPlusPSPSizeAligned + osrPad + outgoingArgSpaceAligned;
assert((maxFuncletFrameSizeAligned % STACK_ALIGN) == 0);
int SP_to_FPLR_save_delta;
int SP_to_PSP_slot_delta;
int CallerSP_to_PSP_slot_delta;
unsigned const funcletFrameSize = saveRegsPlusPSPSize + osrPad + compiler->lvaOutgoingArgSpaceSize;
unsigned const funcletFrameSizeAligned = roundUp(funcletFrameSize, STACK_ALIGN);
assert(funcletFrameSizeAligned <= maxFuncletFrameSizeAligned);
unsigned const funcletFrameAlignmentPad = funcletFrameSizeAligned - funcletFrameSize;
assert((funcletFrameAlignmentPad == 0) || (funcletFrameAlignmentPad == REGSIZE_BYTES));
if (maxFuncletFrameSizeAligned <= 512)
{
if (genSaveFpLrWithAllCalleeSavedRegisters)
{
SP_to_FPLR_save_delta = funcletFrameSizeAligned - (2 /* FP, LR */ * REGSIZE_BYTES);
if (compiler->info.compIsVarArgs)
{
SP_to_FPLR_save_delta -= MAX_REG_ARG * REGSIZE_BYTES;
}
SP_to_PSP_slot_delta = compiler->lvaOutgoingArgSpaceSize + funcletFrameAlignmentPad + osrPad;
CallerSP_to_PSP_slot_delta = -(int)(osrPad + saveRegsPlusPSPSize);
genFuncletInfo.fiFrameType = 4;
}
else
{
SP_to_FPLR_save_delta = compiler->lvaOutgoingArgSpaceSize;
SP_to_PSP_slot_delta = SP_to_FPLR_save_delta + 2 /* FP, LR */ * REGSIZE_BYTES + funcletFrameAlignmentPad;
CallerSP_to_PSP_slot_delta = -(int)(osrPad + saveRegsPlusPSPSize - 2 /* FP, LR */ * REGSIZE_BYTES);
if (compiler->lvaOutgoingArgSpaceSize == 0)
{
genFuncletInfo.fiFrameType = 1;
}
else
{
genFuncletInfo.fiFrameType = 2;
}
}
genFuncletInfo.fiSpDelta1 = -(int)funcletFrameSizeAligned;
genFuncletInfo.fiSpDelta2 = 0;
assert(genFuncletInfo.fiSpDelta1 + genFuncletInfo.fiSpDelta2 == -(int)funcletFrameSizeAligned);
}
else
{
unsigned saveRegsPlusPSPAlignmentPad = saveRegsPlusPSPSizeAligned - saveRegsPlusPSPSize;
assert((saveRegsPlusPSPAlignmentPad == 0) || (saveRegsPlusPSPAlignmentPad == REGSIZE_BYTES));
if (genSaveFpLrWithAllCalleeSavedRegisters)
{
SP_to_FPLR_save_delta = funcletFrameSizeAligned - (2 /* FP, LR */ * REGSIZE_BYTES);
if (compiler->info.compIsVarArgs)
{
SP_to_FPLR_save_delta -= MAX_REG_ARG * REGSIZE_BYTES;
}
SP_to_PSP_slot_delta =
compiler->lvaOutgoingArgSpaceSize + funcletFrameAlignmentPad + saveRegsPlusPSPAlignmentPad;
CallerSP_to_PSP_slot_delta = -(int)(osrPad + saveRegsPlusPSPSize);
genFuncletInfo.fiFrameType = 5;
}
else
{
SP_to_FPLR_save_delta = outgoingArgSpaceAligned;
SP_to_PSP_slot_delta = SP_to_FPLR_save_delta + 2 /* FP, LR */ * REGSIZE_BYTES + saveRegsPlusPSPAlignmentPad;
CallerSP_to_PSP_slot_delta = -(int)(osrPad + saveRegsPlusPSPSizeAligned - 2 /* FP, LR */ * REGSIZE_BYTES -
saveRegsPlusPSPAlignmentPad);
genFuncletInfo.fiFrameType = 3;
}
genFuncletInfo.fiSpDelta1 = -(int)(osrPad + saveRegsPlusPSPSizeAligned);
genFuncletInfo.fiSpDelta2 = -(int)outgoingArgSpaceAligned;
assert(genFuncletInfo.fiSpDelta1 + genFuncletInfo.fiSpDelta2 == -(int)maxFuncletFrameSizeAligned);
}
/* Now save it for future use */
genFuncletInfo.fiSaveRegs = rsMaskSaveRegs;
genFuncletInfo.fiSP_to_FPLR_save_delta = SP_to_FPLR_save_delta;
genFuncletInfo.fiSP_to_PSP_slot_delta = SP_to_PSP_slot_delta;
genFuncletInfo.fiSP_to_CalleeSave_delta = SP_to_PSP_slot_delta + PSPSize;
genFuncletInfo.fiCallerSP_to_PSP_slot_delta = CallerSP_to_PSP_slot_delta;
#ifdef DEBUG
if (verbose)
{
printf("\n");
printf("Funclet prolog / epilog info\n");
printf(" Save regs: ");
dspRegMask(genFuncletInfo.fiSaveRegs);
printf("\n");
if (compiler->opts.IsOSR())
{
printf(" OSR Pad: %d\n", osrPad);
}
printf(" SP to FP/LR save location delta: %d\n", genFuncletInfo.fiSP_to_FPLR_save_delta);
printf(" SP to PSP slot delta: %d\n", genFuncletInfo.fiSP_to_PSP_slot_delta);
printf(" SP to callee-saved area delta: %d\n", genFuncletInfo.fiSP_to_CalleeSave_delta);
printf(" Caller SP to PSP slot delta: %d\n", genFuncletInfo.fiCallerSP_to_PSP_slot_delta);
printf(" Frame type: %d\n", genFuncletInfo.fiFrameType);
printf(" SP delta 1: %d\n", genFuncletInfo.fiSpDelta1);
printf(" SP delta 2: %d\n", genFuncletInfo.fiSpDelta2);
if (compiler->lvaPSPSym != BAD_VAR_NUM)
{
if (CallerSP_to_PSP_slot_delta !=
compiler->lvaGetCallerSPRelativeOffset(compiler->lvaPSPSym)) // for debugging
{
printf("lvaGetCallerSPRelativeOffset(lvaPSPSym): %d\n",
compiler->lvaGetCallerSPRelativeOffset(compiler->lvaPSPSym));
}
}
}
assert(genFuncletInfo.fiSP_to_FPLR_save_delta >= 0);
assert(genFuncletInfo.fiSP_to_PSP_slot_delta >= 0);
assert(genFuncletInfo.fiSP_to_CalleeSave_delta >= 0);
assert(genFuncletInfo.fiCallerSP_to_PSP_slot_delta <= 0);
if (compiler->lvaPSPSym != BAD_VAR_NUM)
{
assert(genFuncletInfo.fiCallerSP_to_PSP_slot_delta ==
compiler->lvaGetCallerSPRelativeOffset(compiler->lvaPSPSym)); // same offset used in main function and
// funclet!
}
#endif // DEBUG
}
void CodeGen::genSetPSPSym(regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
if (compiler->lvaPSPSym == BAD_VAR_NUM)
{
return;
}
noway_assert(isFramePointerUsed()); // We need an explicit frame pointer
int SPtoCallerSPdelta = -genCallerSPtoInitialSPdelta();
if (compiler->opts.IsOSR())
{
SPtoCallerSPdelta += compiler->info.compPatchpointInfo->TotalFrameSize();
}
// We will just use the initReg since it is an available register
// and we are probably done using it anyway...
regNumber regTmp = initReg;
*pInitRegZeroed = false;
GetEmitter()->emitIns_R_R_Imm(INS_add, EA_PTRSIZE, regTmp, REG_SPBASE, SPtoCallerSPdelta);
GetEmitter()->emitIns_S_R(INS_str, EA_PTRSIZE, regTmp, compiler->lvaPSPSym, 0);
}
//-----------------------------------------------------------------------------
// genZeroInitFrameUsingBlockInit: architecture-specific helper for genZeroInitFrame in the case
// `genUseBlockInit` is set.
//
// Arguments:
// untrLclHi - (Untracked locals High-Offset) The upper bound offset at which the zero init
// code will end initializing memory (not inclusive).
// untrLclLo - (Untracked locals Low-Offset) The lower bound at which the zero init code will
// start zero initializing memory.
// initReg - A scratch register (that gets set to zero on some platforms).
// pInitRegZeroed - OUT parameter. *pInitRegZeroed is set to 'true' if this method sets initReg register to zero,
// 'false' if initReg was set to a non-zero value, and left unchanged if initReg was not touched.
//
void CodeGen::genZeroInitFrameUsingBlockInit(int untrLclHi, int untrLclLo, regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
assert(genUseBlockInit);
assert(untrLclHi > untrLclLo);
int bytesToWrite = untrLclHi - untrLclLo;
const regNumber zeroSimdReg = REG_ZERO_INIT_FRAME_SIMD;
bool simdRegZeroed = false;
const int simdRegPairSizeBytes = 2 * FP_REGSIZE_BYTES;
regNumber addrReg = REG_ZERO_INIT_FRAME_REG1;
if (addrReg == initReg)
{
*pInitRegZeroed = false;
}
int addrOffset = 0;
// The following invariants are held below:
//
// 1) [addrReg, #addrOffset] points at a location where next chunk of zero bytes will be written;
// 2) bytesToWrite specifies the number of bytes on the frame to initialize;
// 3) if simdRegZeroed is true then 128-bit wide zeroSimdReg contains zeroes.
const int bytesUseZeroingLoop = 192;
if (bytesToWrite >= bytesUseZeroingLoop)
{
// Generates the following code:
//
// When the size of the region is greater than or equal to 256 bytes
// **and** DC ZVA instruction use is permitted
// **and** the instruction block size is configured to 64 bytes:
//
// movi v16.16b, #0
// add x9, fp, #(untrLclLo+64)
// add x10, fp, #(untrLclHi-64)
// stp q16, q16, [x9, #-64]
// stp q16, q16, [x9, #-32]
// bfm x9, xzr, #0, #5
//
// loop:
// dc zva, x9
// add x9, x9, #64
// cmp x9, x10
// blo loop
//
// stp q16, q16, [x10]
// stp q16, q16, [x10, #32]
//
// Otherwise:
//
// movi v16.16b, #0
// add x9, fp, #(untrLclLo-32)
// mov x10, #(bytesToWrite-64)
//
// loop:
// stp q16, q16, [x9, #32]
// stp q16, q16, [x9, #64]!
// subs x10, x10, #64
// bge loop
const int bytesUseDataCacheZeroInstruction = 256;
GetEmitter()->emitIns_R_I(INS_movi, EA_16BYTE, zeroSimdReg, 0, INS_OPTS_16B);
simdRegZeroed = true;
if ((bytesToWrite >= bytesUseDataCacheZeroInstruction) &&
compiler->compOpportunisticallyDependsOn(InstructionSet_Dczva))
{
// The first and the last 64 bytes should be written with two stp q-reg instructions.
// This is in order to avoid **unintended** zeroing of the data by dc zva
// outside of [fp+untrLclLo, fp+untrLclHi) memory region.
genInstrWithConstant(INS_add, EA_PTRSIZE, addrReg, genFramePointerReg(), untrLclLo + 64, addrReg);
addrOffset = -64;
const regNumber endAddrReg = REG_ZERO_INIT_FRAME_REG2;
if (endAddrReg == initReg)
{
*pInitRegZeroed = false;
}
genInstrWithConstant(INS_add, EA_PTRSIZE, endAddrReg, genFramePointerReg(), untrLclHi - 64, endAddrReg);
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_16BYTE, zeroSimdReg, zeroSimdReg, addrReg, addrOffset);
addrOffset += simdRegPairSizeBytes;
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_16BYTE, zeroSimdReg, zeroSimdReg, addrReg, addrOffset);
addrOffset += simdRegPairSizeBytes;
assert(addrOffset == 0);
GetEmitter()->emitIns_R_R_I_I(INS_bfm, EA_PTRSIZE, addrReg, REG_ZR, 0, 5);
// addrReg points at the beginning of a cache line.
GetEmitter()->emitIns_R(INS_dczva, EA_PTRSIZE, addrReg);
GetEmitter()->emitIns_R_R_I(INS_add, EA_PTRSIZE, addrReg, addrReg, 64);
GetEmitter()->emitIns_R_R(INS_cmp, EA_PTRSIZE, addrReg, endAddrReg);
GetEmitter()->emitIns_J(INS_blo, NULL, -4);
addrReg = endAddrReg;
bytesToWrite = 64;
}
else
{
genInstrWithConstant(INS_add, EA_PTRSIZE, addrReg, genFramePointerReg(), untrLclLo - 32, addrReg);
addrOffset = 32;
const regNumber countReg = REG_ZERO_INIT_FRAME_REG2;
if (countReg == initReg)
{
*pInitRegZeroed = false;
}
instGen_Set_Reg_To_Imm(EA_PTRSIZE, countReg, bytesToWrite - 64);
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_16BYTE, zeroSimdReg, zeroSimdReg, addrReg, 32);
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_16BYTE, zeroSimdReg, zeroSimdReg, addrReg, 64,
INS_OPTS_PRE_INDEX);
GetEmitter()->emitIns_R_R_I(INS_subs, EA_PTRSIZE, countReg, countReg, 64);
GetEmitter()->emitIns_J(INS_bge, NULL, -4);
bytesToWrite %= 64;
}
}
else
{
genInstrWithConstant(INS_add, EA_PTRSIZE, addrReg, genFramePointerReg(), untrLclLo, addrReg);
}
if (bytesToWrite >= simdRegPairSizeBytes)
{
// Generates the following code:
//
// movi v16.16b, #0
// stp q16, q16, [x9, #addrOffset]
// stp q16, q16, [x9, #(addrOffset+32)]
// ...
// stp q16, q16, [x9, #(addrOffset+roundDown(bytesToWrite, 32))]
if (!simdRegZeroed)
{
GetEmitter()->emitIns_R_I(INS_movi, EA_16BYTE, zeroSimdReg, 0, INS_OPTS_16B);
simdRegZeroed = true;
}
for (; bytesToWrite >= simdRegPairSizeBytes; bytesToWrite -= simdRegPairSizeBytes)
{
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_16BYTE, zeroSimdReg, zeroSimdReg, addrReg, addrOffset);
addrOffset += simdRegPairSizeBytes;
}
}
const int regPairSizeBytes = 2 * REGSIZE_BYTES;
if (bytesToWrite >= regPairSizeBytes)
{
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_ZR, REG_ZR, addrReg, addrOffset);
addrOffset += regPairSizeBytes;
bytesToWrite -= regPairSizeBytes;
}
if (bytesToWrite >= REGSIZE_BYTES)
{
GetEmitter()->emitIns_R_R_I(INS_str, EA_PTRSIZE, REG_ZR, addrReg, addrOffset);
addrOffset += REGSIZE_BYTES;
bytesToWrite -= REGSIZE_BYTES;
}
if (bytesToWrite == sizeof(int))
{
GetEmitter()->emitIns_R_R_I(INS_str, EA_4BYTE, REG_ZR, addrReg, addrOffset);
bytesToWrite = 0;
}
assert(bytesToWrite == 0);
}
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX End Prolog / Epilog XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
BasicBlock* CodeGen::genCallFinally(BasicBlock* block)
{
// Generate a call to the finally, like this:
// mov x0,qword ptr [fp + 10H] / sp // Load x0 with PSPSym, or sp if PSPSym is not used
// bl finally-funclet
// b finally-return // Only for non-retless finally calls
// The 'b' can be a NOP if we're going to the next block.
if (compiler->lvaPSPSym != BAD_VAR_NUM)
{
GetEmitter()->emitIns_R_S(INS_ldr, EA_PTRSIZE, REG_R0, compiler->lvaPSPSym, 0);
}
else
{
GetEmitter()->emitIns_Mov(INS_mov, EA_PTRSIZE, REG_R0, REG_SPBASE, /* canSkip */ false);
}
GetEmitter()->emitIns_J(INS_bl_local, block->bbJumpDest);
if (block->bbFlags & BBF_RETLESS_CALL)
{
// We have a retless call, and the last instruction generated was a call.
// If the next block is in a different EH region (or is the end of the code
// block), then we need to generate a breakpoint here (since it will never
// get executed) to get proper unwind behavior.
if ((block->bbNext == nullptr) || !BasicBlock::sameEHRegion(block, block->bbNext))
{
instGen(INS_BREAKPOINT); // This should never get executed
}
}
else
{
// Because of the way the flowgraph is connected, the liveness info for this one instruction
// after the call is not (can not be) correct in cases where a variable has a last use in the
// handler. So turn off GC reporting for this single instruction.
GetEmitter()->emitDisableGC();
// Now go to where the finally funclet needs to return to.
if (block->bbNext->bbJumpDest == block->bbNext->bbNext)
{
// Fall-through.
// TODO-ARM64-CQ: Can we get rid of this instruction, and just have the call return directly
// to the next instruction? This would depend on stack walking from within the finally
// handler working without this instruction being in this special EH region.
instGen(INS_nop);
}
else
{
inst_JMP(EJ_jmp, block->bbNext->bbJumpDest);
}
GetEmitter()->emitEnableGC();
}
// The BBJ_ALWAYS is used because the BBJ_CALLFINALLY can't point to the
// jump target using bbJumpDest - that is already used to point
// to the finally block. So just skip past the BBJ_ALWAYS unless the
// block is RETLESS.
if (!(block->bbFlags & BBF_RETLESS_CALL))
{
assert(block->isBBCallAlwaysPair());
block = block->bbNext;
}
return block;
}
void CodeGen::genEHCatchRet(BasicBlock* block)
{
// For long address (default): `adrp + add` will be emitted.
// For short address (proven later): `adr` will be emitted.
GetEmitter()->emitIns_R_L(INS_adr, EA_PTRSIZE, block->bbJumpDest, REG_INTRET);
}
// move an immediate value into an integer register
void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size,
regNumber reg,
ssize_t imm,
insFlags flags DEBUGARG(size_t targetHandle) DEBUGARG(GenTreeFlags gtFlags))
{
// reg cannot be a FP register
assert(!genIsValidFloatReg(reg));
if (!compiler->opts.compReloc)
{
size = EA_SIZE(size); // Strip any Reloc flags from size if we aren't doing relocs
}
if (EA_IS_RELOC(size))
{
// This emits a pair of adrp/add (two instructions) with fix-ups.
GetEmitter()->emitIns_R_AI(INS_adrp, size, reg, imm DEBUGARG(targetHandle) DEBUGARG(gtFlags));
}
else if (imm == 0)
{
instGen_Set_Reg_To_Zero(size, reg, flags);
}
else
{
if (emitter::emitIns_valid_imm_for_mov(imm, size))
{
GetEmitter()->emitIns_R_I(INS_mov, size, reg, imm);
}
else
{
// Arm64 allows any arbitrary 16-bit constant to be loaded into a register halfword
// There are three forms
// movk which loads into any halfword preserving the remaining halfwords
// movz which loads into any halfword zeroing the remaining halfwords
// movn which loads into any halfword zeroing the remaining halfwords then bitwise inverting the register
// In some cases it is preferable to use movn, because it has the side effect of filling the other halfwords
// with ones
// Determine whether movn or movz will require the fewest instructions to populate the immediate
int preferMovn = 0;
for (int i = (size == EA_8BYTE) ? 48 : 16; i >= 0; i -= 16)
{
if (uint16_t(imm >> i) == 0xffff)
++preferMovn; // a single movk 0xffff could be skipped if movn was used
else if (uint16_t(imm >> i) == 0x0000)
--preferMovn; // a single movk 0 could be skipped if movz was used
}
// Select the first instruction. Any additional instruction will use movk
instruction ins = (preferMovn > 0) ? INS_movn : INS_movz;
// Initial movz or movn will fill the remaining bytes with the skipVal
// This can allow skipping filling a halfword
uint16_t skipVal = (preferMovn > 0) ? 0xffff : 0;
unsigned bits = (size == EA_8BYTE) ? 64 : 32;
// Iterate over imm examining 16 bits at a time
for (unsigned i = 0; i < bits; i += 16)
{
uint16_t imm16 = uint16_t(imm >> i);
if (imm16 != skipVal)
{
if (ins == INS_movn)
{
// For the movn case, we need to bitwise invert the immediate. This is because
// (movn x0, ~imm16) === (movz x0, imm16; or x0, x0, #0xffff`ffff`ffff`0000)
imm16 = ~imm16;
}
GetEmitter()->emitIns_R_I_I(ins, size, reg, imm16, i, INS_OPTS_LSL);
// Once the initial movz/movn is emitted the remaining instructions will all use movk
ins = INS_movk;
}
}
// We must emit a movn or movz or we have not done anything
// The cases which hit this assert should be (emitIns_valid_imm_for_mov() == true) and
// should not be in this else condition
assert(ins == INS_movk);
}
// The caller may have requested that the flags be set on this mov (rarely/never)
if (flags == INS_FLAGS_SET)
{
GetEmitter()->emitIns_R_I(INS_tst, size, reg, 0);
}
}
regSet.verifyRegUsed(reg);
}
/***********************************************************************************
*
* Generate code to set a register 'targetReg' of type 'targetType' to the constant
* specified by the constant (GT_CNS_INT or GT_CNS_DBL) in 'tree'. This does not call
* genProduceReg() on the target register.
*/
void CodeGen::genSetRegToConst(regNumber targetReg, var_types targetType, GenTree* tree)
{
switch (tree->gtOper)
{
case GT_CNS_INT:
{
GenTreeIntConCommon* con = tree->AsIntConCommon();
ssize_t cnsVal = con->IconValue();
emitAttr attr = emitActualTypeSize(targetType);
// TODO-CQ: Currently we cannot do this for all handles because of
// https://github.com/dotnet/runtime/issues/60712
if (con->ImmedValNeedsReloc(compiler))
{
attr = EA_SET_FLG(attr, EA_CNS_RELOC_FLG);
}
if (targetType == TYP_BYREF)
{
attr = EA_SET_FLG(attr, EA_BYREF_FLG);
}
instGen_Set_Reg_To_Imm(attr, targetReg, cnsVal,
INS_FLAGS_DONT_CARE DEBUGARG(tree->AsIntCon()->gtTargetHandle)
DEBUGARG(tree->AsIntCon()->gtFlags));
regSet.verifyRegUsed(targetReg);
}
break;
case GT_CNS_DBL:
{
emitter* emit = GetEmitter();
emitAttr size = emitActualTypeSize(tree);
double constValue = tree->AsDblCon()->gtDconVal;
// Make sure we use "movi reg, 0x00" only for positive zero (0.0) and not for negative zero (-0.0)
if (*(__int64*)&constValue == 0)
{
// A faster/smaller way to generate 0.0
// We will just zero out the entire vector register for both float and double
emit->emitIns_R_I(INS_movi, EA_16BYTE, targetReg, 0x00, INS_OPTS_16B);
}
else if (emitter::emitIns_valid_imm_for_fmov(constValue))
{
// We can load the FP constant using the fmov FP-immediate for this constValue
emit->emitIns_R_F(INS_fmov, size, targetReg, constValue);
}
else
{
// Get a temp integer register to compute long address.
regNumber addrReg = tree->GetSingleTempReg();
// We must load the FP constant from the constant pool
// Emit a data section constant for the float or double constant.
CORINFO_FIELD_HANDLE hnd = emit->emitFltOrDblConst(constValue, size);
// For long address (default): `adrp + ldr + fmov` will be emitted.
// For short address (proven later), `ldr` will be emitted.
emit->emitIns_R_C(INS_ldr, size, targetReg, addrReg, hnd, 0);
}
}
break;
default:
unreached();
}
}
// Produce code for a GT_INC_SATURATE node.
void CodeGen::genCodeForIncSaturate(GenTree* tree)
{
regNumber targetReg = tree->GetRegNum();
// The arithmetic node must be sitting in a register (since it's not contained)
assert(!tree->isContained());
// The dst can only be a register.
assert(targetReg != REG_NA);
GenTree* operand = tree->gtGetOp1();
assert(!operand->isContained());
// The src must be a register.
regNumber operandReg = genConsumeReg(operand);
GetEmitter()->emitIns_R_R_I(INS_adds, emitActualTypeSize(tree), targetReg, operandReg, 1);
GetEmitter()->emitIns_R_R_COND(INS_cinv, emitActualTypeSize(tree), targetReg, targetReg, INS_COND_HS);
genProduceReg(tree);
}
// Generate code to get the high N bits of a N*N=2N bit multiplication result
void CodeGen::genCodeForMulHi(GenTreeOp* treeNode)
{
assert(!treeNode->gtOverflowEx());
genConsumeOperands(treeNode);
regNumber targetReg = treeNode->GetRegNum();
var_types targetType = treeNode->TypeGet();
emitter* emit = GetEmitter();
emitAttr attr = emitActualTypeSize(treeNode);
unsigned isUnsigned = (treeNode->gtFlags & GTF_UNSIGNED);
GenTree* op1 = treeNode->gtGetOp1();
GenTree* op2 = treeNode->gtGetOp2();
assert(!varTypeIsFloating(targetType));
// The arithmetic node must be sitting in a register (since it's not contained)
assert(targetReg != REG_NA);
if (EA_SIZE(attr) == EA_8BYTE)
{
instruction ins = isUnsigned ? INS_umulh : INS_smulh;
regNumber r = emit->emitInsTernary(ins, attr, treeNode, op1, op2);
assert(r == targetReg);
}
else
{
assert(EA_SIZE(attr) == EA_4BYTE);
instruction ins = isUnsigned ? INS_umull : INS_smull;
regNumber r = emit->emitInsTernary(ins, EA_4BYTE, treeNode, op1, op2);
emit->emitIns_R_R_I(isUnsigned ? INS_lsr : INS_asr, EA_8BYTE, targetReg, targetReg, 32);
}
genProduceReg(treeNode);
}
// Generate code for ADD, SUB, MUL, DIV, UDIV, AND, AND_NOT, OR and XOR
// This method is expected to have called genConsumeOperands() before calling it.
void CodeGen::genCodeForBinary(GenTreeOp* treeNode)
{
const genTreeOps oper = treeNode->OperGet();
regNumber targetReg = treeNode->GetRegNum();
var_types targetType = treeNode->TypeGet();
emitter* emit = GetEmitter();
assert(treeNode->OperIs(GT_ADD, GT_SUB, GT_MUL, GT_DIV, GT_UDIV, GT_AND, GT_AND_NOT, GT_OR, GT_XOR));
GenTree* op1 = treeNode->gtGetOp1();
GenTree* op2 = treeNode->gtGetOp2();
instruction ins = genGetInsForOper(treeNode->OperGet(), targetType);
if ((treeNode->gtFlags & GTF_SET_FLAGS) != 0)
{
switch (oper)
{
case GT_ADD:
ins = INS_adds;
break;
case GT_SUB:
ins = INS_subs;
break;
case GT_AND:
ins = INS_ands;
break;
case GT_AND_NOT:
ins = INS_bics;
break;
default:
noway_assert(!"Unexpected BinaryOp with GTF_SET_FLAGS set");
}
}
// The arithmetic node must be sitting in a register (since it's not contained)
assert(targetReg != REG_NA);
regNumber r = emit->emitInsTernary(ins, emitActualTypeSize(treeNode), treeNode, op1, op2);
assert(r == targetReg);
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genCodeForLclVar: Produce code for a GT_LCL_VAR node.
//
// Arguments:
// tree - the GT_LCL_VAR node
//
void CodeGen::genCodeForLclVar(GenTreeLclVar* tree)
{
unsigned varNum = tree->GetLclNum();
LclVarDsc* varDsc = compiler->lvaGetDesc(varNum);
var_types targetType = varDsc->GetRegisterType(tree);
bool isRegCandidate = varDsc->lvIsRegCandidate();
// lcl_vars are not defs
assert((tree->gtFlags & GTF_VAR_DEF) == 0);
// If this is a register candidate that has been spilled, genConsumeReg() will
// reload it at the point of use. Otherwise, if it's not in a register, we load it here.
if (!isRegCandidate && !tree->IsMultiReg() && !(tree->gtFlags & GTF_SPILLED))
{
// targetType must be a normal scalar type and not a TYP_STRUCT
assert(targetType != TYP_STRUCT);
instruction ins = ins_Load(targetType);
emitAttr attr = emitActualTypeSize(targetType);
emitter* emit = GetEmitter();
emit->emitIns_R_S(ins, attr, tree->GetRegNum(), varNum, 0);
genProduceReg(tree);
}
}
//------------------------------------------------------------------------
// genCodeForStoreLclFld: Produce code for a GT_STORE_LCL_FLD node.
//
// Arguments:
// tree - the GT_STORE_LCL_FLD node
//
void CodeGen::genCodeForStoreLclFld(GenTreeLclFld* tree)
{
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->GetRegNum();
emitter* emit = GetEmitter();
noway_assert(targetType != TYP_STRUCT);
#ifdef FEATURE_SIMD
// storing of TYP_SIMD12 (i.e. Vector3) field
if (tree->TypeGet() == TYP_SIMD12)
{
genStoreLclTypeSIMD12(tree);
return;
}
#endif // FEATURE_SIMD
// record the offset
unsigned offset = tree->GetLclOffs();
// We must have a stack store with GT_STORE_LCL_FLD
noway_assert(targetReg == REG_NA);
unsigned varNum = tree->GetLclNum();
LclVarDsc* varDsc = compiler->lvaGetDesc(varNum);
// Ensure that lclVar nodes are typed correctly.
assert(!varDsc->lvNormalizeOnStore() || targetType == genActualType(varDsc->TypeGet()));
GenTree* data = tree->gtOp1;
genConsumeRegs(data);
regNumber dataReg = REG_NA;
if (data->isContainedIntOrIImmed())
{
assert(data->IsIntegralConst(0));
dataReg = REG_ZR;
}
else if (data->isContained())
{
assert(data->OperIs(GT_BITCAST));
const GenTree* bitcastSrc = data->AsUnOp()->gtGetOp1();
assert(!bitcastSrc->isContained());
dataReg = bitcastSrc->GetRegNum();
}
else
{
assert(!data->isContained());
dataReg = data->GetRegNum();
}
assert(dataReg != REG_NA);
instruction ins = ins_StoreFromSrc(dataReg, targetType);
emitAttr attr = emitActualTypeSize(targetType);
emit->emitIns_S_R(ins, attr, dataReg, varNum, offset);
genUpdateLife(tree);
varDsc->SetRegNum(REG_STK);
}
//------------------------------------------------------------------------
// genCodeForStoreLclVar: Produce code for a GT_STORE_LCL_VAR node.
//
// Arguments:
// lclNode - the GT_STORE_LCL_VAR node
//
void CodeGen::genCodeForStoreLclVar(GenTreeLclVar* lclNode)
{
GenTree* data = lclNode->gtOp1;
// Stores from a multi-reg source are handled separately.
if (data->gtSkipReloadOrCopy()->IsMultiRegNode())
{
genMultiRegStoreToLocal(lclNode);
return;
}
LclVarDsc* varDsc = compiler->lvaGetDesc(lclNode);
if (lclNode->IsMultiReg())
{
// This is the case of storing to a multi-reg HFA local from a fixed-size SIMD type.
assert(varTypeIsSIMD(data) && varDsc->lvIsHfa() && (varDsc->GetHfaType() == TYP_FLOAT));
regNumber operandReg = genConsumeReg(data);
unsigned int regCount = varDsc->lvFieldCnt;
for (unsigned i = 0; i < regCount; ++i)
{
regNumber varReg = lclNode->GetRegByIndex(i);
assert(varReg != REG_NA);
unsigned fieldLclNum = varDsc->lvFieldLclStart + i;
LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(fieldLclNum);
assert(fieldVarDsc->TypeGet() == TYP_FLOAT);
GetEmitter()->emitIns_R_R_I(INS_dup, emitTypeSize(TYP_FLOAT), varReg, operandReg, i);
}
genProduceReg(lclNode);
}
else
{
regNumber targetReg = lclNode->GetRegNum();
emitter* emit = GetEmitter();
unsigned varNum = lclNode->GetLclNum();
var_types targetType = varDsc->GetRegisterType(lclNode);
#ifdef FEATURE_SIMD
// storing of TYP_SIMD12 (i.e. Vector3) field
if (targetType == TYP_SIMD12)
{
genStoreLclTypeSIMD12(lclNode);
return;
}
#endif // FEATURE_SIMD
genConsumeRegs(data);
regNumber dataReg = REG_NA;
if (data->isContained())
{
// This is only possible for a zero-init or bitcast.
const bool zeroInit = (data->IsIntegralConst(0) || data->IsSIMDZero());
assert(zeroInit || data->OperIs(GT_BITCAST));
if (zeroInit && varTypeIsSIMD(targetType))
{
if (targetReg != REG_NA)
{
emit->emitIns_R_I(INS_movi, emitActualTypeSize(targetType), targetReg, 0x00, INS_OPTS_16B);
genProduceReg(lclNode);
}
else
{
if (targetType == TYP_SIMD16)
{
GetEmitter()->emitIns_S_S_R_R(INS_stp, EA_8BYTE, EA_8BYTE, REG_ZR, REG_ZR, varNum, 0);
}
else
{
assert(targetType == TYP_SIMD8);
GetEmitter()->emitIns_S_R(INS_str, EA_8BYTE, REG_ZR, varNum, 0);
}
genUpdateLife(lclNode);
}
return;
}
if (zeroInit)
{
dataReg = REG_ZR;
}
else
{
const GenTree* bitcastSrc = data->AsUnOp()->gtGetOp1();
assert(!bitcastSrc->isContained());
dataReg = bitcastSrc->GetRegNum();
}
}
else
{
assert(!data->isContained());
dataReg = data->GetRegNum();
}
assert(dataReg != REG_NA);
if (targetReg == REG_NA) // store into stack based LclVar
{
inst_set_SV_var(lclNode);
instruction ins = ins_StoreFromSrc(dataReg, targetType);
emitAttr attr = emitActualTypeSize(targetType);
emit->emitIns_S_R(ins, attr, dataReg, varNum, /* offset */ 0);
genUpdateLife(lclNode);
varDsc->SetRegNum(REG_STK);
}
else // store into register (i.e move into register)
{
// Assign into targetReg when dataReg (from op1) is not the same register
inst_Mov(targetType, targetReg, dataReg, /* canSkip */ true);
genProduceReg(lclNode);
}
}
}
//------------------------------------------------------------------------
// genSimpleReturn: Generates code for simple return statement for arm64.
//
// Note: treeNode's and op1's registers are already consumed.
//
// Arguments:
// treeNode - The GT_RETURN or GT_RETFILT tree node with non-struct and non-void type
//
// Return Value:
// None
//
void CodeGen::genSimpleReturn(GenTree* treeNode)
{
assert(treeNode->OperGet() == GT_RETURN || treeNode->OperGet() == GT_RETFILT);
GenTree* op1 = treeNode->gtGetOp1();
var_types targetType = treeNode->TypeGet();
assert(targetType != TYP_STRUCT);
assert(targetType != TYP_VOID);
regNumber retReg = varTypeUsesFloatArgReg(treeNode) ? REG_FLOATRET : REG_INTRET;
bool movRequired = (op1->GetRegNum() != retReg);
if (!movRequired)
{
if (op1->OperGet() == GT_LCL_VAR)
{
GenTreeLclVarCommon* lcl = op1->AsLclVarCommon();
const LclVarDsc* varDsc = compiler->lvaGetDesc(lcl);
bool isRegCandidate = varDsc->lvIsRegCandidate();
if (isRegCandidate && ((op1->gtFlags & GTF_SPILLED) == 0))
{
// We may need to generate a zero-extending mov instruction to load the value from this GT_LCL_VAR
var_types op1Type = genActualType(op1->TypeGet());
var_types lclType = genActualType(varDsc->TypeGet());
if (genTypeSize(op1Type) < genTypeSize(lclType))
{
movRequired = true;
}
}
}
}
emitAttr attr = emitActualTypeSize(targetType);
GetEmitter()->emitIns_Mov(INS_mov, attr, retReg, op1->GetRegNum(), /* canSkip */ !movRequired);
}
/***********************************************************************************************
* Generate code for localloc
*/
void CodeGen::genLclHeap(GenTree* tree)
{
assert(tree->OperGet() == GT_LCLHEAP);
assert(compiler->compLocallocUsed);
GenTree* size = tree->AsOp()->gtOp1;
noway_assert((genActualType(size->gtType) == TYP_INT) || (genActualType(size->gtType) == TYP_I_IMPL));
regNumber targetReg = tree->GetRegNum();
regNumber regCnt = REG_NA;
regNumber pspSymReg = REG_NA;
var_types type = genActualType(size->gtType);
emitAttr easz = emitTypeSize(type);
BasicBlock* endLabel = nullptr;
BasicBlock* loop = nullptr;
unsigned stackAdjustment = 0;
const target_ssize_t ILLEGAL_LAST_TOUCH_DELTA = (target_ssize_t)-1;
target_ssize_t lastTouchDelta =
ILLEGAL_LAST_TOUCH_DELTA; // The number of bytes from SP to the last stack address probed.
noway_assert(isFramePointerUsed()); // localloc requires Frame Pointer to be established since SP changes
noway_assert(genStackLevel == 0); // Can't have anything on the stack
// compute the amount of memory to allocate to properly STACK_ALIGN.
size_t amount = 0;
if (size->IsCnsIntOrI())
{
// If size is a constant, then it must be contained.
assert(size->isContained());
// If amount is zero then return null in targetReg
amount = size->AsIntCon()->gtIconVal;
if (amount == 0)
{
instGen_Set_Reg_To_Zero(EA_PTRSIZE, targetReg);
goto BAILOUT;
}
// 'amount' is the total number of bytes to localloc to properly STACK_ALIGN
amount = AlignUp(amount, STACK_ALIGN);
}
else
{
// If 0 bail out by returning null in targetReg
genConsumeRegAndCopy(size, targetReg);
endLabel = genCreateTempLabel();
GetEmitter()->emitIns_R_R(INS_tst, easz, targetReg, targetReg);
inst_JMP(EJ_eq, endLabel);
// Compute the size of the block to allocate and perform alignment.
// If compInitMem=true, we can reuse targetReg as regcnt,
// since we don't need any internal registers.
if (compiler->info.compInitMem)
{
assert(tree->AvailableTempRegCount() == 0);
regCnt = targetReg;
}
else
{
regCnt = tree->ExtractTempReg();
inst_Mov(size->TypeGet(), regCnt, targetReg, /* canSkip */ true);
}
// Align to STACK_ALIGN
// regCnt will be the total number of bytes to localloc
inst_RV_IV(INS_add, regCnt, (STACK_ALIGN - 1), emitActualTypeSize(type));
inst_RV_IV(INS_and, regCnt, ~(STACK_ALIGN - 1), emitActualTypeSize(type));
}
// If we have an outgoing arg area then we must adjust the SP by popping off the
// outgoing arg area. We will restore it right before we return from this method.
//
// Localloc returns stack space that aligned to STACK_ALIGN bytes. The following
// are the cases that need to be handled:
// i) Method has out-going arg area.
// It is guaranteed that size of out-going arg area is STACK_ALIGN'ed (see fgMorphArgs).
// Therefore, we will pop off the out-going arg area from the stack pointer before allocating the localloc
// space.
// ii) Method has no out-going arg area.
// Nothing to pop off from the stack.
if (compiler->lvaOutgoingArgSpaceSize > 0)
{
assert((compiler->lvaOutgoingArgSpaceSize % STACK_ALIGN) == 0); // This must be true for the stack to remain
// aligned
genInstrWithConstant(INS_add, EA_PTRSIZE, REG_SPBASE, REG_SPBASE, compiler->lvaOutgoingArgSpaceSize,
rsGetRsvdReg());
stackAdjustment += compiler->lvaOutgoingArgSpaceSize;
}
if (size->IsCnsIntOrI())
{
// We should reach here only for non-zero, constant size allocations.
assert(amount > 0);
const int storePairRegsWritesBytes = 2 * REGSIZE_BYTES;
// For small allocations we will generate up to four stp instructions, to zero 16 to 64 bytes.
static_assert_no_msg(STACK_ALIGN == storePairRegsWritesBytes);
assert(amount % storePairRegsWritesBytes == 0); // stp stores two registers at a time
if (compiler->info.compInitMem)
{
if (amount <= LCLHEAP_UNROLL_LIMIT)
{
// The following zeroes the last 16 bytes and probes the page containing [sp, #16] address.
// stp xzr, xzr, [sp, #-16]!
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_ZR, REG_ZR, REG_SPBASE, -storePairRegsWritesBytes,
INS_OPTS_PRE_INDEX);
if (amount > storePairRegsWritesBytes)
{
// The following sets SP to its final value and zeroes the first 16 bytes of the allocated space.
// stp xzr, xzr, [sp, #-amount+16]!
const ssize_t finalSpDelta = (ssize_t)amount - storePairRegsWritesBytes;
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_ZR, REG_ZR, REG_SPBASE, -finalSpDelta,
INS_OPTS_PRE_INDEX);
// The following zeroes the remaining space in [finalSp+16, initialSp-16) interval
// using a sequence of stp instruction with unsigned offset.
for (ssize_t offset = storePairRegsWritesBytes; offset < finalSpDelta;
offset += storePairRegsWritesBytes)
{
// stp xzr, xzr, [sp, #offset]
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_ZR, REG_ZR, REG_SPBASE, offset);
}
}
lastTouchDelta = 0;
goto ALLOC_DONE;
}
}
else if (amount < compiler->eeGetPageSize()) // must be < not <=
{
// Since the size is less than a page, simply adjust the SP value.
// The SP might already be in the guard page, so we must touch it BEFORE
// the alloc, not after.
// Note the we check against the lower boundary of the post-index immediate range [-256, 256)
// since the offset is -amount.
const bool canEncodeLoadRegPostIndexOffset = amount <= 256;
if (canEncodeLoadRegPostIndexOffset)
{
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_ZR, REG_SPBASE, -(ssize_t)amount,
INS_OPTS_POST_INDEX);
}
else if (emitter::canEncodeLoadOrStorePairOffset(-(ssize_t)amount, EA_8BYTE))
{
// The following probes the page and allocates the local heap.
// ldp tmpReg, xzr, [sp], #-amount
// Note that we cannot use ldp xzr, xzr since
// the behaviour of ldp where two source registers are the same is unpredictable.
const regNumber tmpReg = targetReg;
GetEmitter()->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, tmpReg, REG_ZR, REG_SPBASE, -(ssize_t)amount,
INS_OPTS_POST_INDEX);
}
else
{
// ldr wzr, [sp]
// sub, sp, #amount
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_ZR, REG_SPBASE, 0);
genInstrWithConstant(INS_sub, EA_PTRSIZE, REG_SPBASE, REG_SPBASE, amount, rsGetRsvdReg());
}
lastTouchDelta = amount;
goto ALLOC_DONE;
}
// else, "mov regCnt, amount"
// If compInitMem=true, we can reuse targetReg as regcnt.
// Since size is a constant, regCnt is not yet initialized.
assert(regCnt == REG_NA);
if (compiler->info.compInitMem)
{
assert(tree->AvailableTempRegCount() == 0);
regCnt = targetReg;
}
else
{
regCnt = tree->ExtractTempReg();
}
instGen_Set_Reg_To_Imm(((unsigned int)amount == amount) ? EA_4BYTE : EA_8BYTE, regCnt, amount);
}
if (compiler->info.compInitMem)
{
BasicBlock* loop = genCreateTempLabel();
// At this point 'regCnt' is set to the total number of bytes to locAlloc.
// Since we have to zero out the allocated memory AND ensure that the stack pointer is always valid
// by tickling the pages, we will just push 0's on the stack.
//
// Note: regCnt is guaranteed to be even on Amd64 since STACK_ALIGN/TARGET_POINTER_SIZE = 2
// and localloc size is a multiple of STACK_ALIGN.
// Loop:
genDefineTempLabel(loop);
// We can use pre-indexed addressing.
// stp ZR, ZR, [SP, #-16]!
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_ZR, REG_ZR, REG_SPBASE, -16, INS_OPTS_PRE_INDEX);
// If not done, loop
// Note that regCnt is the number of bytes to stack allocate.
// Therefore we need to subtract 16 from regcnt here.
assert(genIsValidIntReg(regCnt));
inst_RV_IV(INS_subs, regCnt, 16, emitActualTypeSize(type));
inst_JMP(EJ_ne, loop);
lastTouchDelta = 0;
}
else
{
// At this point 'regCnt' is set to the total number of bytes to localloc.
//
// We don't need to zero out the allocated memory. However, we do have
// to tickle the pages to ensure that SP is always valid and is
// in sync with the "stack guard page". Note that in the worst
// case SP is on the last byte of the guard page. Thus you must
// touch SP-0 first not SP-0x1000.
//
// This is similar to the prolog code in CodeGen::genAllocLclFrame().
//
// Note that we go through a few hoops so that SP never points to
// illegal pages at any time during the tickling process.
//
// subs regCnt, SP, regCnt // regCnt now holds ultimate SP
// bvc Loop // result is smaller than original SP (no wrap around)
// mov regCnt, #0 // Overflow, pick lowest possible value
//
// Loop:
// ldr wzr, [SP + 0] // tickle the page - read from the page
// sub regTmp, SP, PAGE_SIZE // decrement SP by eeGetPageSize()
// cmp regTmp, regCnt
// jb Done
// mov SP, regTmp
// j Loop
//
// Done:
// mov SP, regCnt
//
// Setup the regTmp
regNumber regTmp = tree->GetSingleTempReg();
BasicBlock* loop = genCreateTempLabel();
BasicBlock* done = genCreateTempLabel();
// subs regCnt, SP, regCnt // regCnt now holds ultimate SP
GetEmitter()->emitIns_R_R_R(INS_subs, EA_PTRSIZE, regCnt, REG_SPBASE, regCnt);
inst_JMP(EJ_vc, loop); // branch if the V flag is not set
// Overflow, set regCnt to lowest possible value
instGen_Set_Reg_To_Zero(EA_PTRSIZE, regCnt);
genDefineTempLabel(loop);
// tickle the page - Read from the updated SP - this triggers a page fault when on the guard page
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_ZR, REG_SPBASE, 0);
// decrement SP by eeGetPageSize()
GetEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, regTmp, REG_SPBASE, compiler->eeGetPageSize());
GetEmitter()->emitIns_R_R(INS_cmp, EA_PTRSIZE, regTmp, regCnt);
inst_JMP(EJ_lo, done);
// Update SP to be at the next page of stack that we will tickle
GetEmitter()->emitIns_Mov(INS_mov, EA_PTRSIZE, REG_SPBASE, regTmp, /* canSkip */ false);
// Jump to loop and tickle new stack address
inst_JMP(EJ_jmp, loop);
// Done with stack tickle loop
genDefineTempLabel(done);
// Now just move the final value to SP
GetEmitter()->emitIns_Mov(INS_mov, EA_PTRSIZE, REG_SPBASE, regCnt, /* canSkip */ false);
// lastTouchDelta is dynamic, and can be up to a page. So if we have outgoing arg space,
// we're going to assume the worst and probe.
}
ALLOC_DONE:
// Re-adjust SP to allocate outgoing arg area. We must probe this adjustment.
if (stackAdjustment != 0)
{
assert((stackAdjustment % STACK_ALIGN) == 0); // This must be true for the stack to remain aligned
assert((lastTouchDelta == ILLEGAL_LAST_TOUCH_DELTA) || (lastTouchDelta >= 0));
const regNumber tmpReg = rsGetRsvdReg();
if ((lastTouchDelta == ILLEGAL_LAST_TOUCH_DELTA) ||
(stackAdjustment + (unsigned)lastTouchDelta + STACK_PROBE_BOUNDARY_THRESHOLD_BYTES >
compiler->eeGetPageSize()))
{
genStackPointerConstantAdjustmentLoopWithProbe(-(ssize_t)stackAdjustment, tmpReg);
}
else
{
genStackPointerConstantAdjustment(-(ssize_t)stackAdjustment, tmpReg);
}
// Return the stackalloc'ed address in result register.
// TargetReg = SP + stackAdjustment.
//
genInstrWithConstant(INS_add, EA_PTRSIZE, targetReg, REG_SPBASE, (ssize_t)stackAdjustment, tmpReg);
}
else // stackAdjustment == 0
{
// Move the final value of SP to targetReg
inst_Mov(TYP_I_IMPL, targetReg, REG_SPBASE, /* canSkip */ false);
}
BAILOUT:
if (endLabel != nullptr)
genDefineTempLabel(endLabel);
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genCodeForNegNot: Produce code for a GT_NEG/GT_NOT node.
//
// Arguments:
// tree - the node
//
void CodeGen::genCodeForNegNot(GenTree* tree)
{
assert(tree->OperIs(GT_NEG, GT_NOT));
var_types targetType = tree->TypeGet();
assert(!tree->OperIs(GT_NOT) || !varTypeIsFloating(targetType));
regNumber targetReg = tree->GetRegNum();
instruction ins = genGetInsForOper(tree->OperGet(), targetType);
// The arithmetic node must be sitting in a register (since it's not contained)
assert(!tree->isContained());
// The dst can only be a register.
assert(targetReg != REG_NA);
GenTree* operand = tree->gtGetOp1();
assert(!operand->isContained());
// The src must be a register.
regNumber operandReg = genConsumeReg(operand);
GetEmitter()->emitIns_R_R(ins, emitActualTypeSize(tree), targetReg, operandReg);
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genCodeForBswap: Produce code for a GT_BSWAP / GT_BSWAP16 node.
//
// Arguments:
// tree - the node
//
void CodeGen::genCodeForBswap(GenTree* tree)
{
assert(tree->OperIs(GT_BSWAP, GT_BSWAP16));
regNumber targetReg = tree->GetRegNum();
var_types targetType = tree->TypeGet();
GenTree* operand = tree->gtGetOp1();
assert(operand->isUsedFromReg());
regNumber operandReg = genConsumeReg(operand);
if (tree->OperIs(GT_BSWAP16))
{
inst_RV_RV(INS_rev16, targetReg, operandReg, targetType);
}
else
{
inst_RV_RV(INS_rev, targetReg, operandReg, targetType);
}
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genCodeForDivMod: Produce code for a GT_DIV/GT_UDIV node. We don't see MOD:
// (1) integer MOD is morphed into a sequence of sub, mul, div in fgMorph;
// (2) float/double MOD is morphed into a helper call by front-end.
//
// Arguments:
// tree - the node
//
void CodeGen::genCodeForDivMod(GenTreeOp* tree)
{
assert(tree->OperIs(GT_DIV, GT_UDIV));
var_types targetType = tree->TypeGet();
emitter* emit = GetEmitter();
genConsumeOperands(tree);
if (varTypeIsFloating(targetType))
{
// Floating point divide never raises an exception
genCodeForBinary(tree);
}
else // an integer divide operation
{
GenTree* divisorOp = tree->gtGetOp2();
emitAttr size = EA_ATTR(genTypeSize(genActualType(tree->TypeGet())));
if (divisorOp->IsIntegralConst(0))
{
// We unconditionally throw a divide by zero exception
genJumpToThrowHlpBlk(EJ_jmp, SCK_DIV_BY_ZERO);
// We still need to call genProduceReg
genProduceReg(tree);
}
else // the divisor is not the constant zero
{
regNumber divisorReg = divisorOp->GetRegNum();
// Generate the require runtime checks for GT_DIV or GT_UDIV
if (tree->gtOper == GT_DIV)
{
BasicBlock* sdivLabel = genCreateTempLabel();
// Two possible exceptions:
// (AnyVal / 0) => DivideByZeroException
// (MinInt / -1) => ArithmeticException
//
bool checkDividend = true;
// Do we have an immediate for the 'divisorOp'?
//
if (divisorOp->IsCnsIntOrI())
{
GenTreeIntConCommon* intConstTree = divisorOp->AsIntConCommon();
ssize_t intConstValue = intConstTree->IconValue();
assert(intConstValue != 0); // already checked above by IsIntegralConst(0)
if (intConstValue != -1)
{
checkDividend = false; // We statically know that the dividend is not -1
}
}
else // insert check for divison by zero
{
// Check if the divisor is zero throw a DivideByZeroException
emit->emitIns_R_I(INS_cmp, size, divisorReg, 0);
genJumpToThrowHlpBlk(EJ_eq, SCK_DIV_BY_ZERO);
}
if (checkDividend)
{
// Check if the divisor is not -1 branch to 'sdivLabel'
emit->emitIns_R_I(INS_cmp, size, divisorReg, -1);
inst_JMP(EJ_ne, sdivLabel);
// If control flow continues past here the 'divisorReg' is known to be -1
regNumber dividendReg = tree->gtGetOp1()->GetRegNum();
// At this point the divisor is known to be -1
//
// Issue the 'adds zr, dividendReg, dividendReg' instruction
// this will set both the Z and V flags only when dividendReg is MinInt
//
emit->emitIns_R_R_R(INS_adds, size, REG_ZR, dividendReg, dividendReg);
inst_JMP(EJ_ne, sdivLabel); // goto sdiv if the Z flag is clear
genJumpToThrowHlpBlk(EJ_vs, SCK_ARITH_EXCPN); // if the V flags is set throw
// ArithmeticException
genDefineTempLabel(sdivLabel);
}
genCodeForBinary(tree); // Generate the sdiv instruction
}
else // (tree->gtOper == GT_UDIV)
{
// Only one possible exception
// (AnyVal / 0) => DivideByZeroException
//
// Note that division by the constant 0 was already checked for above by the
// op2->IsIntegralConst(0) check
//
if (!divisorOp->IsCnsIntOrI())
{
// divisorOp is not a constant, so it could be zero
//
emit->emitIns_R_I(INS_cmp, size, divisorReg, 0);
genJumpToThrowHlpBlk(EJ_eq, SCK_DIV_BY_ZERO);
}
genCodeForBinary(tree);
}
}
}
}
// Generate code for CpObj nodes wich copy structs that have interleaved
// GC pointers.
// For this case we'll generate a sequence of loads/stores in the case of struct
// slots that don't contain GC pointers. The generated code will look like:
// ldr tempReg, [R13, #8]
// str tempReg, [R14, #8]
//
// In the case of a GC-Pointer we'll call the ByRef write barrier helper
// who happens to use the same registers as the previous call to maintain
// the same register requirements and register killsets:
// bl CORINFO_HELP_ASSIGN_BYREF
//
// So finally an example would look like this:
// ldr tempReg, [R13, #8]
// str tempReg, [R14, #8]
// bl CORINFO_HELP_ASSIGN_BYREF
// ldr tempReg, [R13, #8]
// str tempReg, [R14, #8]
// bl CORINFO_HELP_ASSIGN_BYREF
// ldr tempReg, [R13, #8]
// str tempReg, [R14, #8]
void CodeGen::genCodeForCpObj(GenTreeObj* cpObjNode)
{
GenTree* dstAddr = cpObjNode->Addr();
GenTree* source = cpObjNode->Data();
var_types srcAddrType = TYP_BYREF;
bool sourceIsLocal = false;
assert(source->isContained());
if (source->gtOper == GT_IND)
{
GenTree* srcAddr = source->gtGetOp1();
assert(!srcAddr->isContained());
srcAddrType = srcAddr->TypeGet();
}
else
{
noway_assert(source->IsLocal());
sourceIsLocal = true;
}
bool dstOnStack = dstAddr->gtSkipReloadOrCopy()->OperIsLocalAddr();
#ifdef DEBUG
assert(!dstAddr->isContained());
// This GenTree node has data about GC pointers, this means we're dealing
// with CpObj.
assert(cpObjNode->GetLayout()->HasGCPtr());
#endif // DEBUG
// Consume the operands and get them into the right registers.
// They may now contain gc pointers (depending on their type; gcMarkRegPtrVal will "do the right thing").
genConsumeBlockOp(cpObjNode, REG_WRITE_BARRIER_DST_BYREF, REG_WRITE_BARRIER_SRC_BYREF, REG_NA);
gcInfo.gcMarkRegPtrVal(REG_WRITE_BARRIER_SRC_BYREF, srcAddrType);
gcInfo.gcMarkRegPtrVal(REG_WRITE_BARRIER_DST_BYREF, dstAddr->TypeGet());
ClassLayout* layout = cpObjNode->GetLayout();
unsigned slots = layout->GetSlotCount();
// Temp register(s) used to perform the sequence of loads and stores.
regNumber tmpReg = cpObjNode->ExtractTempReg();
regNumber tmpReg2 = REG_NA;
assert(genIsValidIntReg(tmpReg));
assert(tmpReg != REG_WRITE_BARRIER_SRC_BYREF);
assert(tmpReg != REG_WRITE_BARRIER_DST_BYREF);
if (slots > 1)
{
tmpReg2 = cpObjNode->GetSingleTempReg();
assert(tmpReg2 != tmpReg);
assert(genIsValidIntReg(tmpReg2));
assert(tmpReg2 != REG_WRITE_BARRIER_DST_BYREF);
assert(tmpReg2 != REG_WRITE_BARRIER_SRC_BYREF);
}
if (cpObjNode->gtFlags & GTF_BLK_VOLATILE)
{
// issue a full memory barrier before a volatile CpObj operation
instGen_MemoryBarrier();
}
emitter* emit = GetEmitter();
// If we can prove it's on the stack we don't need to use the write barrier.
if (dstOnStack)
{
unsigned i = 0;
// Check if two or more remaining slots and use a ldp/stp sequence
while (i < slots - 1)
{
emitAttr attr0 = emitTypeSize(layout->GetGCPtrType(i + 0));
emitAttr attr1 = emitTypeSize(layout->GetGCPtrType(i + 1));
emit->emitIns_R_R_R_I(INS_ldp, attr0, tmpReg, tmpReg2, REG_WRITE_BARRIER_SRC_BYREF, 2 * TARGET_POINTER_SIZE,
INS_OPTS_POST_INDEX, attr1);
emit->emitIns_R_R_R_I(INS_stp, attr0, tmpReg, tmpReg2, REG_WRITE_BARRIER_DST_BYREF, 2 * TARGET_POINTER_SIZE,
INS_OPTS_POST_INDEX, attr1);
i += 2;
}
// Use a ldr/str sequence for the last remainder
if (i < slots)
{
emitAttr attr0 = emitTypeSize(layout->GetGCPtrType(i + 0));
emit->emitIns_R_R_I(INS_ldr, attr0, tmpReg, REG_WRITE_BARRIER_SRC_BYREF, TARGET_POINTER_SIZE,
INS_OPTS_POST_INDEX);
emit->emitIns_R_R_I(INS_str, attr0, tmpReg, REG_WRITE_BARRIER_DST_BYREF, TARGET_POINTER_SIZE,
INS_OPTS_POST_INDEX);
}
}
else
{
unsigned gcPtrCount = cpObjNode->GetLayout()->GetGCPtrCount();
unsigned i = 0;
while (i < slots)
{
if (!layout->IsGCPtr(i))
{
// Check if the next slot's type is also TYP_GC_NONE and use ldp/stp
if ((i + 1 < slots) && !layout->IsGCPtr(i + 1))
{
emit->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, tmpReg, tmpReg2, REG_WRITE_BARRIER_SRC_BYREF,
2 * TARGET_POINTER_SIZE, INS_OPTS_POST_INDEX);
emit->emitIns_R_R_R_I(INS_stp, EA_8BYTE, tmpReg, tmpReg2, REG_WRITE_BARRIER_DST_BYREF,
2 * TARGET_POINTER_SIZE, INS_OPTS_POST_INDEX);
++i; // extra increment of i, since we are copying two items
}
else
{
emit->emitIns_R_R_I(INS_ldr, EA_8BYTE, tmpReg, REG_WRITE_BARRIER_SRC_BYREF, TARGET_POINTER_SIZE,
INS_OPTS_POST_INDEX);
emit->emitIns_R_R_I(INS_str, EA_8BYTE, tmpReg, REG_WRITE_BARRIER_DST_BYREF, TARGET_POINTER_SIZE,
INS_OPTS_POST_INDEX);
}
}
else
{
// In the case of a GC-Pointer we'll call the ByRef write barrier helper
genEmitHelperCall(CORINFO_HELP_ASSIGN_BYREF, 0, EA_PTRSIZE);
gcPtrCount--;
}
++i;
}
assert(gcPtrCount == 0);
}
if (cpObjNode->gtFlags & GTF_BLK_VOLATILE)
{
// issue a load barrier after a volatile CpObj operation
instGen_MemoryBarrier(BARRIER_LOAD_ONLY);
}
// Clear the gcInfo for REG_WRITE_BARRIER_SRC_BYREF and REG_WRITE_BARRIER_DST_BYREF.
// While we normally update GC info prior to the last instruction that uses them,
// these actually live into the helper call.
gcInfo.gcMarkRegSetNpt(RBM_WRITE_BARRIER_SRC_BYREF | RBM_WRITE_BARRIER_DST_BYREF);
}
// generate code do a switch statement based on a table of ip-relative offsets
void CodeGen::genTableBasedSwitch(GenTree* treeNode)
{
genConsumeOperands(treeNode->AsOp());
regNumber idxReg = treeNode->AsOp()->gtOp1->GetRegNum();
regNumber baseReg = treeNode->AsOp()->gtOp2->GetRegNum();
regNumber tmpReg = treeNode->GetSingleTempReg();
// load the ip-relative offset (which is relative to start of fgFirstBB)
GetEmitter()->emitIns_R_R_R(INS_ldr, EA_4BYTE, baseReg, baseReg, idxReg, INS_OPTS_LSL);
// add it to the absolute address of fgFirstBB
GetEmitter()->emitIns_R_L(INS_adr, EA_PTRSIZE, compiler->fgFirstBB, tmpReg);
GetEmitter()->emitIns_R_R_R(INS_add, EA_PTRSIZE, baseReg, baseReg, tmpReg);
// br baseReg
GetEmitter()->emitIns_R(INS_br, emitActualTypeSize(TYP_I_IMPL), baseReg);
}
// emits the table and an instruction to get the address of the first element
void CodeGen::genJumpTable(GenTree* treeNode)
{
noway_assert(compiler->compCurBB->bbJumpKind == BBJ_SWITCH);
assert(treeNode->OperGet() == GT_JMPTABLE);
unsigned jumpCount = compiler->compCurBB->bbJumpSwt->bbsCount;
BasicBlock** jumpTable = compiler->compCurBB->bbJumpSwt->bbsDstTab;
unsigned jmpTabOffs;
unsigned jmpTabBase;
jmpTabBase = GetEmitter()->emitBBTableDataGenBeg(jumpCount, true);
jmpTabOffs = 0;
JITDUMP("\n J_M%03u_DS%02u LABEL DWORD\n", compiler->compMethodID, jmpTabBase);
for (unsigned i = 0; i < jumpCount; i++)
{
BasicBlock* target = *jumpTable++;
noway_assert(target->bbFlags & BBF_HAS_LABEL);
JITDUMP(" DD L_M%03u_" FMT_BB "\n", compiler->compMethodID, target->bbNum);
GetEmitter()->emitDataGenData(i, target);
};
GetEmitter()->emitDataGenEnd();
// Access to inline data is 'abstracted' by a special type of static member
// (produced by eeFindJitDataOffs) which the emitter recognizes as being a reference
// to constant data, not a real static field.
GetEmitter()->emitIns_R_C(INS_adr, emitActualTypeSize(TYP_I_IMPL), treeNode->GetRegNum(), REG_NA,
compiler->eeFindJitDataOffs(jmpTabBase), 0);
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genLockedInstructions: Generate code for a GT_XADD, GT_XAND, GT_XORR or GT_XCHG node.
//
// Arguments:
// treeNode - the GT_XADD/XAND/XORR/XCHG node
//
void CodeGen::genLockedInstructions(GenTreeOp* treeNode)
{
GenTree* data = treeNode->AsOp()->gtOp2;
GenTree* addr = treeNode->AsOp()->gtOp1;
regNumber targetReg = treeNode->GetRegNum();
regNumber dataReg = data->GetRegNum();
regNumber addrReg = addr->GetRegNum();
genConsumeAddress(addr);
genConsumeRegs(data);
emitAttr dataSize = emitActualTypeSize(data);
if (compiler->compOpportunisticallyDependsOn(InstructionSet_Atomics))
{
assert(!data->isContainedIntOrIImmed());
switch (treeNode->gtOper)
{
case GT_XORR:
GetEmitter()->emitIns_R_R_R(INS_ldsetal, dataSize, dataReg, (targetReg == REG_NA) ? REG_ZR : targetReg,
addrReg);
break;
case GT_XAND:
{
// Grab a temp reg to perform `MVN` for dataReg first.
regNumber tempReg = treeNode->GetSingleTempReg();
GetEmitter()->emitIns_R_R(INS_mvn, dataSize, tempReg, dataReg);
GetEmitter()->emitIns_R_R_R(INS_ldclral, dataSize, tempReg, (targetReg == REG_NA) ? REG_ZR : targetReg,
addrReg);
break;
}
case GT_XCHG:
GetEmitter()->emitIns_R_R_R(INS_swpal, dataSize, dataReg, targetReg, addrReg);
break;
case GT_XADD:
GetEmitter()->emitIns_R_R_R(INS_ldaddal, dataSize, dataReg, (targetReg == REG_NA) ? REG_ZR : targetReg,
addrReg);
break;
default:
assert(!"Unexpected treeNode->gtOper");
}
}
else
{
// These are imported normally if Atomics aren't supported.
assert(!treeNode->OperIs(GT_XORR, GT_XAND));
regNumber exResultReg = treeNode->ExtractTempReg(RBM_ALLINT);
regNumber storeDataReg = (treeNode->OperGet() == GT_XCHG) ? dataReg : treeNode->ExtractTempReg(RBM_ALLINT);
regNumber loadReg = (targetReg != REG_NA) ? targetReg : storeDataReg;
// Check allocator assumptions
//
// The register allocator should have extended the lifetimes of all input and internal registers so that
// none interfere with the target.
noway_assert(addrReg != targetReg);
noway_assert(addrReg != loadReg);
noway_assert(dataReg != loadReg);
noway_assert(addrReg != storeDataReg);
noway_assert((treeNode->OperGet() == GT_XCHG) || (addrReg != dataReg));
assert(addr->isUsedFromReg());
noway_assert(exResultReg != REG_NA);
noway_assert(exResultReg != targetReg);
noway_assert((targetReg != REG_NA) || (treeNode->OperGet() != GT_XCHG));
// Store exclusive unpredictable cases must be avoided
noway_assert(exResultReg != storeDataReg);
noway_assert(exResultReg != addrReg);
// NOTE: `genConsumeAddress` marks the consumed register as not a GC pointer, as it assumes that the input
// registers
// die at the first instruction generated by the node. This is not the case for these atomics as the input
// registers are multiply-used. As such, we need to mark the addr register as containing a GC pointer until
// we are finished generating the code for this node.
gcInfo.gcMarkRegPtrVal(addrReg, addr->TypeGet());
// Emit code like this:
// retry:
// ldxr loadReg, [addrReg]
// add storeDataReg, loadReg, dataReg # Only for GT_XADD
// # GT_XCHG storeDataReg === dataReg
// stxr exResult, storeDataReg, [addrReg]
// cbnz exResult, retry
// dmb ish
BasicBlock* labelRetry = genCreateTempLabel();
genDefineTempLabel(labelRetry);
// The following instruction includes a acquire half barrier
GetEmitter()->emitIns_R_R(INS_ldaxr, dataSize, loadReg, addrReg);
switch (treeNode->OperGet())
{
case GT_XADD:
if (data->isContainedIntOrIImmed())
{
// Even though INS_add is specified here, the encoder will choose either
// an INS_add or an INS_sub and encode the immediate as a positive value
genInstrWithConstant(INS_add, dataSize, storeDataReg, loadReg, data->AsIntConCommon()->IconValue(),
REG_NA);
}
else
{
GetEmitter()->emitIns_R_R_R(INS_add, dataSize, storeDataReg, loadReg, dataReg);
}
break;
case GT_XCHG:
assert(!data->isContained());
storeDataReg = dataReg;
break;
default:
unreached();
}
// The following instruction includes a release half barrier
GetEmitter()->emitIns_R_R_R(INS_stlxr, dataSize, exResultReg, storeDataReg, addrReg);
GetEmitter()->emitIns_J_R(INS_cbnz, EA_4BYTE, labelRetry, exResultReg);
instGen_MemoryBarrier();
gcInfo.gcMarkRegSetNpt(addr->gtGetRegMask());
}
if (treeNode->GetRegNum() != REG_NA)
{
genProduceReg(treeNode);
}
}
//------------------------------------------------------------------------
// genCodeForCmpXchg: Produce code for a GT_CMPXCHG node.
//
// Arguments:
// tree - the GT_CMPXCHG node
//
void CodeGen::genCodeForCmpXchg(GenTreeCmpXchg* treeNode)
{
assert(treeNode->OperIs(GT_CMPXCHG));
GenTree* addr = treeNode->gtOpLocation; // arg1
GenTree* data = treeNode->gtOpValue; // arg2
GenTree* comparand = treeNode->gtOpComparand; // arg3
regNumber targetReg = treeNode->GetRegNum();
regNumber dataReg = data->GetRegNum();
regNumber addrReg = addr->GetRegNum();
regNumber comparandReg = comparand->GetRegNum();
genConsumeAddress(addr);
genConsumeRegs(data);
genConsumeRegs(comparand);
if (compiler->compOpportunisticallyDependsOn(InstructionSet_Atomics))
{
emitAttr dataSize = emitActualTypeSize(data);
// casal use the comparand as the target reg
GetEmitter()->emitIns_Mov(INS_mov, dataSize, targetReg, comparandReg, /* canSkip */ true);
// Catch case we destroyed data or address before use
noway_assert((addrReg != targetReg) || (targetReg == comparandReg));
noway_assert((dataReg != targetReg) || (targetReg == comparandReg));
GetEmitter()->emitIns_R_R_R(INS_casal, dataSize, targetReg, dataReg, addrReg);
}
else
{
regNumber exResultReg = treeNode->ExtractTempReg(RBM_ALLINT);
// Check allocator assumptions
//
// The register allocator should have extended the lifetimes of all input and internal registers so that
// none interfere with the target.
noway_assert(addrReg != targetReg);
noway_assert(dataReg != targetReg);
noway_assert(comparandReg != targetReg);
noway_assert(addrReg != dataReg);
noway_assert(targetReg != REG_NA);
noway_assert(exResultReg != REG_NA);
noway_assert(exResultReg != targetReg);
assert(addr->isUsedFromReg());
assert(data->isUsedFromReg());
assert(!comparand->isUsedFromMemory());
// Store exclusive unpredictable cases must be avoided
noway_assert(exResultReg != dataReg);
noway_assert(exResultReg != addrReg);
// NOTE: `genConsumeAddress` marks the consumed register as not a GC pointer, as it assumes that the input
// registers
// die at the first instruction generated by the node. This is not the case for these atomics as the input
// registers are multiply-used. As such, we need to mark the addr register as containing a GC pointer until
// we are finished generating the code for this node.
gcInfo.gcMarkRegPtrVal(addrReg, addr->TypeGet());
// TODO-ARM64-CQ Use ARMv8.1 atomics if available
// https://github.com/dotnet/runtime/issues/8225
// Emit code like this:
// retry:
// ldxr targetReg, [addrReg]
// cmp targetReg, comparandReg
// bne compareFail
// stxr exResult, dataReg, [addrReg]
// cbnz exResult, retry
// compareFail:
// dmb ish
BasicBlock* labelRetry = genCreateTempLabel();
BasicBlock* labelCompareFail = genCreateTempLabel();
genDefineTempLabel(labelRetry);
// The following instruction includes a acquire half barrier
GetEmitter()->emitIns_R_R(INS_ldaxr, emitTypeSize(treeNode), targetReg, addrReg);
if (comparand->isContainedIntOrIImmed())
{
if (comparand->IsIntegralConst(0))
{
GetEmitter()->emitIns_J_R(INS_cbnz, emitActualTypeSize(treeNode), labelCompareFail, targetReg);
}
else
{
GetEmitter()->emitIns_R_I(INS_cmp, emitActualTypeSize(treeNode), targetReg,
comparand->AsIntConCommon()->IconValue());
GetEmitter()->emitIns_J(INS_bne, labelCompareFail);
}
}
else
{
GetEmitter()->emitIns_R_R(INS_cmp, emitActualTypeSize(treeNode), targetReg, comparandReg);
GetEmitter()->emitIns_J(INS_bne, labelCompareFail);
}
// The following instruction includes a release half barrier
GetEmitter()->emitIns_R_R_R(INS_stlxr, emitTypeSize(treeNode), exResultReg, dataReg, addrReg);
GetEmitter()->emitIns_J_R(INS_cbnz, EA_4BYTE, labelRetry, exResultReg);
genDefineTempLabel(labelCompareFail);
instGen_MemoryBarrier();
gcInfo.gcMarkRegSetNpt(addr->gtGetRegMask());
}
genProduceReg(treeNode);
}
instruction CodeGen::genGetInsForOper(genTreeOps oper, var_types type)
{
instruction ins = INS_BREAKPOINT;
if (varTypeIsFloating(type))
{
switch (oper)
{
case GT_ADD:
ins = INS_fadd;
break;
case GT_SUB:
ins = INS_fsub;
break;
case GT_MUL:
ins = INS_fmul;
break;
case GT_DIV:
ins = INS_fdiv;
break;
case GT_NEG:
ins = INS_fneg;
break;
default:
NYI("Unhandled oper in genGetInsForOper() - float");
unreached();
break;
}
}
else
{
switch (oper)
{
case GT_ADD:
ins = INS_add;
break;
case GT_AND:
ins = INS_and;
break;
case GT_AND_NOT:
ins = INS_bic;
break;
case GT_DIV:
ins = INS_sdiv;
break;
case GT_UDIV:
ins = INS_udiv;
break;
case GT_MUL:
ins = INS_mul;
break;
case GT_LSH:
ins = INS_lsl;
break;
case GT_NEG:
ins = INS_neg;
break;
case GT_NOT:
ins = INS_mvn;
break;
case GT_OR:
ins = INS_orr;
break;
case GT_ROR:
ins = INS_ror;
break;
case GT_RSH:
ins = INS_asr;
break;
case GT_RSZ:
ins = INS_lsr;
break;
case GT_SUB:
ins = INS_sub;
break;
case GT_XOR:
ins = INS_eor;
break;
default:
NYI("Unhandled oper in genGetInsForOper() - integer");
unreached();
break;
}
}
return ins;
}
//------------------------------------------------------------------------
// genCodeForReturnTrap: Produce code for a GT_RETURNTRAP node.
//
// Arguments:
// tree - the GT_RETURNTRAP node
//
void CodeGen::genCodeForReturnTrap(GenTreeOp* tree)
{
assert(tree->OperGet() == GT_RETURNTRAP);
// this is nothing but a conditional call to CORINFO_HELP_STOP_FOR_GC
// based on the contents of 'data'
GenTree* data = tree->gtOp1;
genConsumeRegs(data);
GetEmitter()->emitIns_R_I(INS_cmp, EA_4BYTE, data->GetRegNum(), 0);
BasicBlock* skipLabel = genCreateTempLabel();
inst_JMP(EJ_eq, skipLabel);
// emit the call to the EE-helper that stops for GC (or other reasons)
genEmitHelperCall(CORINFO_HELP_STOP_FOR_GC, 0, EA_UNKNOWN);
genDefineTempLabel(skipLabel);
}
//------------------------------------------------------------------------
// genCodeForStoreInd: Produce code for a GT_STOREIND node.
//
// Arguments:
// tree - the GT_STOREIND node
//
void CodeGen::genCodeForStoreInd(GenTreeStoreInd* tree)
{
#ifdef FEATURE_SIMD
// Storing Vector3 of size 12 bytes through indirection
if (tree->TypeGet() == TYP_SIMD12)
{
genStoreIndTypeSIMD12(tree);
return;
}
#endif // FEATURE_SIMD
GenTree* data = tree->Data();
GenTree* addr = tree->Addr();
GCInfo::WriteBarrierForm writeBarrierForm = gcInfo.gcIsWriteBarrierCandidate(tree, data);
if (writeBarrierForm != GCInfo::WBF_NoBarrier)
{
// data and addr must be in registers.
// Consume both registers so that any copies of interfering
// registers are taken care of.
genConsumeOperands(tree);
// At this point, we should not have any interference.
// That is, 'data' must not be in REG_WRITE_BARRIER_DST_BYREF,
// as that is where 'addr' must go.
noway_assert(data->GetRegNum() != REG_WRITE_BARRIER_DST_BYREF);
// 'addr' goes into x14 (REG_WRITE_BARRIER_DST)
genCopyRegIfNeeded(addr, REG_WRITE_BARRIER_DST);
// 'data' goes into x15 (REG_WRITE_BARRIER_SRC)
genCopyRegIfNeeded(data, REG_WRITE_BARRIER_SRC);
genGCWriteBarrier(tree, writeBarrierForm);
}
else // A normal store, not a WriteBarrier store
{
// We must consume the operands in the proper execution order,
// so that liveness is updated appropriately.
genConsumeAddress(addr);
if (!data->isContained())
{
genConsumeRegs(data);
}
regNumber dataReg;
if (data->isContainedIntOrIImmed())
{
assert(data->IsIntegralConst(0));
dataReg = REG_ZR;
}
else // data is not contained, so evaluate it into a register
{
assert(!data->isContained());
dataReg = data->GetRegNum();
}
var_types type = tree->TypeGet();
instruction ins = ins_StoreFromSrc(dataReg, type);
if ((tree->gtFlags & GTF_IND_VOLATILE) != 0)
{
bool addrIsInReg = addr->isUsedFromReg();
bool addrIsAligned = ((tree->gtFlags & GTF_IND_UNALIGNED) == 0);
if ((ins == INS_strb) && addrIsInReg)
{
ins = INS_stlrb;
}
else if ((ins == INS_strh) && addrIsInReg && addrIsAligned)
{
ins = INS_stlrh;
}
else if ((ins == INS_str) && genIsValidIntReg(dataReg) && addrIsInReg && addrIsAligned)
{
ins = INS_stlr;
}
else
{
// issue a full memory barrier before a volatile StInd
// Note: We cannot issue store barrier ishst because it is a weaker barrier.
// The loads can get rearranged around the barrier causing to read wrong
// value.
instGen_MemoryBarrier();
}
}
GetEmitter()->emitInsLoadStoreOp(ins, emitActualTypeSize(type), dataReg, tree);
// If store was to a variable, update variable liveness after instruction was emitted.
genUpdateLife(tree);
}
}
//------------------------------------------------------------------------
// genCodeForSwap: Produce code for a GT_SWAP node.
//
// Arguments:
// tree - the GT_SWAP node
//
void CodeGen::genCodeForSwap(GenTreeOp* tree)
{
assert(tree->OperIs(GT_SWAP));
// Swap is only supported for lclVar operands that are enregistered
// We do not consume or produce any registers. Both operands remain enregistered.
// However, the gc-ness may change.
assert(genIsRegCandidateLocal(tree->gtOp1) && genIsRegCandidateLocal(tree->gtOp2));
GenTreeLclVarCommon* lcl1 = tree->gtOp1->AsLclVarCommon();
LclVarDsc* varDsc1 = compiler->lvaGetDesc(lcl1);
var_types type1 = varDsc1->TypeGet();
GenTreeLclVarCommon* lcl2 = tree->gtOp2->AsLclVarCommon();
LclVarDsc* varDsc2 = compiler->lvaGetDesc(lcl2);
var_types type2 = varDsc2->TypeGet();
// We must have both int or both fp regs
assert(!varTypeIsFloating(type1) || varTypeIsFloating(type2));
// FP swap is not yet implemented (and should have NYI'd in LSRA)
assert(!varTypeIsFloating(type1));
regNumber oldOp1Reg = lcl1->GetRegNum();
regMaskTP oldOp1RegMask = genRegMask(oldOp1Reg);
regNumber oldOp2Reg = lcl2->GetRegNum();
regMaskTP oldOp2RegMask = genRegMask(oldOp2Reg);
// We don't call genUpdateVarReg because we don't have a tree node with the new register.
varDsc1->SetRegNum(oldOp2Reg);
varDsc2->SetRegNum(oldOp1Reg);
// Do the xchg
emitAttr size = EA_PTRSIZE;
if (varTypeGCtype(type1) != varTypeGCtype(type2))
{
// If the type specified to the emitter is a GC type, it will swap the GC-ness of the registers.
// Otherwise it will leave them alone, which is correct if they have the same GC-ness.
size = EA_GCREF;
}
NYI("register swap");
// inst_RV_RV(INS_xchg, oldOp1Reg, oldOp2Reg, TYP_I_IMPL, size);
// Update the gcInfo.
// Manually remove these regs for the gc sets (mostly to avoid confusing duplicative dump output)
gcInfo.gcRegByrefSetCur &= ~(oldOp1RegMask | oldOp2RegMask);
gcInfo.gcRegGCrefSetCur &= ~(oldOp1RegMask | oldOp2RegMask);
// gcMarkRegPtrVal will do the appropriate thing for non-gc types.
// It will also dump the updates.
gcInfo.gcMarkRegPtrVal(oldOp2Reg, type1);
gcInfo.gcMarkRegPtrVal(oldOp1Reg, type2);
}
//------------------------------------------------------------------------
// genIntToFloatCast: Generate code to cast an int/long to float/double
//
// Arguments:
// treeNode - The GT_CAST node
//
// Return Value:
// None.
//
// Assumptions:
// Cast is a non-overflow conversion.
// The treeNode must have an assigned register.
// SrcType= int32/uint32/int64/uint64 and DstType=float/double.
//
void CodeGen::genIntToFloatCast(GenTree* treeNode)
{
// int type --> float/double conversions are always non-overflow ones
assert(treeNode->OperGet() == GT_CAST);
assert(!treeNode->gtOverflow());
regNumber targetReg = treeNode->GetRegNum();
assert(genIsValidFloatReg(targetReg));
GenTree* op1 = treeNode->AsOp()->gtOp1;
assert(!op1->isContained()); // Cannot be contained
assert(genIsValidIntReg(op1->GetRegNum())); // Must be a valid int reg.
var_types dstType = treeNode->CastToType();
var_types srcType = genActualType(op1->TypeGet());
assert(!varTypeIsFloating(srcType) && varTypeIsFloating(dstType));
// force the srcType to unsigned if GT_UNSIGNED flag is set
if (treeNode->gtFlags & GTF_UNSIGNED)
{
srcType = varTypeToUnsigned(srcType);
}
// We should never see a srcType whose size is neither EA_4BYTE or EA_8BYTE
emitAttr srcSize = EA_ATTR(genTypeSize(srcType));
noway_assert((srcSize == EA_4BYTE) || (srcSize == EA_8BYTE));
instruction ins = varTypeIsUnsigned(srcType) ? INS_ucvtf : INS_scvtf;
insOpts cvtOption = INS_OPTS_NONE; // invalid value
if (dstType == TYP_DOUBLE)
{
if (srcSize == EA_4BYTE)
{
cvtOption = INS_OPTS_4BYTE_TO_D;
}
else
{
assert(srcSize == EA_8BYTE);
cvtOption = INS_OPTS_8BYTE_TO_D;
}
}
else
{
assert(dstType == TYP_FLOAT);
if (srcSize == EA_4BYTE)
{
cvtOption = INS_OPTS_4BYTE_TO_S;
}
else
{
assert(srcSize == EA_8BYTE);
cvtOption = INS_OPTS_8BYTE_TO_S;
}
}
genConsumeOperands(treeNode->AsOp());
GetEmitter()->emitIns_R_R(ins, emitActualTypeSize(dstType), treeNode->GetRegNum(), op1->GetRegNum(), cvtOption);
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genFloatToIntCast: Generate code to cast float/double to int/long
//
// Arguments:
// treeNode - The GT_CAST node
//
// Return Value:
// None.
//
// Assumptions:
// Cast is a non-overflow conversion.
// The treeNode must have an assigned register.
// SrcType=float/double and DstType= int32/uint32/int64/uint64
//
void CodeGen::genFloatToIntCast(GenTree* treeNode)
{
// we don't expect to see overflow detecting float/double --> int type conversions here
// as they should have been converted into helper calls by front-end.
assert(treeNode->OperGet() == GT_CAST);
assert(!treeNode->gtOverflow());
regNumber targetReg = treeNode->GetRegNum();
assert(genIsValidIntReg(targetReg)); // Must be a valid int reg.
GenTree* op1 = treeNode->AsOp()->gtOp1;
assert(!op1->isContained()); // Cannot be contained
assert(genIsValidFloatReg(op1->GetRegNum())); // Must be a valid float reg.
var_types dstType = treeNode->CastToType();
var_types srcType = op1->TypeGet();
assert(varTypeIsFloating(srcType) && !varTypeIsFloating(dstType));
// We should never see a dstType whose size is neither EA_4BYTE or EA_8BYTE
// For conversions to small types (byte/sbyte/int16/uint16) from float/double,
// we expect the front-end or lowering phase to have generated two levels of cast.
//
emitAttr dstSize = EA_ATTR(genTypeSize(dstType));
noway_assert((dstSize == EA_4BYTE) || (dstSize == EA_8BYTE));
instruction ins = INS_fcvtzs; // default to sign converts
insOpts cvtOption = INS_OPTS_NONE; // invalid value
if (varTypeIsUnsigned(dstType))
{
ins = INS_fcvtzu; // use unsigned converts
}
if (srcType == TYP_DOUBLE)
{
if (dstSize == EA_4BYTE)
{
cvtOption = INS_OPTS_D_TO_4BYTE;
}
else
{
assert(dstSize == EA_8BYTE);
cvtOption = INS_OPTS_D_TO_8BYTE;
}
}
else
{
assert(srcType == TYP_FLOAT);
if (dstSize == EA_4BYTE)
{
cvtOption = INS_OPTS_S_TO_4BYTE;
}
else
{
assert(dstSize == EA_8BYTE);
cvtOption = INS_OPTS_S_TO_8BYTE;
}
}
genConsumeOperands(treeNode->AsOp());
GetEmitter()->emitIns_R_R(ins, dstSize, treeNode->GetRegNum(), op1->GetRegNum(), cvtOption);
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genCkfinite: Generate code for ckfinite opcode.
//
// Arguments:
// treeNode - The GT_CKFINITE node
//
// Return Value:
// None.
//
// Assumptions:
// GT_CKFINITE node has reserved an internal register.
//
void CodeGen::genCkfinite(GenTree* treeNode)
{
assert(treeNode->OperGet() == GT_CKFINITE);
GenTree* op1 = treeNode->AsOp()->gtOp1;
var_types targetType = treeNode->TypeGet();
int expMask = (targetType == TYP_FLOAT) ? 0x7F8 : 0x7FF; // Bit mask to extract exponent.
int shiftAmount = targetType == TYP_FLOAT ? 20 : 52;
emitter* emit = GetEmitter();
// Extract exponent into a register.
regNumber intReg = treeNode->GetSingleTempReg();
regNumber fpReg = genConsumeReg(op1);
inst_Mov(targetType, intReg, fpReg, /* canSkip */ false, emitActualTypeSize(treeNode));
emit->emitIns_R_R_I(INS_lsr, emitActualTypeSize(targetType), intReg, intReg, shiftAmount);
// Mask of exponent with all 1's and check if the exponent is all 1's
emit->emitIns_R_R_I(INS_and, EA_4BYTE, intReg, intReg, expMask);
emit->emitIns_R_I(INS_cmp, EA_4BYTE, intReg, expMask);
// If exponent is all 1's, throw ArithmeticException
genJumpToThrowHlpBlk(EJ_eq, SCK_ARITH_EXCPN);
// if it is a finite value copy it to targetReg
inst_Mov(targetType, treeNode->GetRegNum(), fpReg, /* canSkip */ true);
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genCodeForCompare: Produce code for a GT_EQ/GT_NE/GT_LT/GT_LE/GT_GE/GT_GT/GT_TEST_EQ/GT_TEST_NE node.
//
// Arguments:
// tree - the node
//
void CodeGen::genCodeForCompare(GenTreeOp* tree)
{
regNumber targetReg = tree->GetRegNum();
emitter* emit = GetEmitter();
GenTree* op1 = tree->gtOp1;
GenTree* op2 = tree->gtOp2;
var_types op1Type = genActualType(op1->TypeGet());
var_types op2Type = genActualType(op2->TypeGet());
assert(!op1->isUsedFromMemory());
genConsumeOperands(tree);
emitAttr cmpSize = EA_ATTR(genTypeSize(op1Type));
assert(genTypeSize(op1Type) == genTypeSize(op2Type));
if (varTypeIsFloating(op1Type))
{
assert(varTypeIsFloating(op2Type));
assert(!op1->isContained());
assert(op1Type == op2Type);
if (op2->IsFPZero())
{
assert(op2->isContained());
emit->emitIns_R_F(INS_fcmp, cmpSize, op1->GetRegNum(), 0.0);
}
else
{
assert(!op2->isContained());
emit->emitIns_R_R(INS_fcmp, cmpSize, op1->GetRegNum(), op2->GetRegNum());
}
}
else
{
assert(!varTypeIsFloating(op2Type));
// We don't support swapping op1 and op2 to generate cmp reg, imm
assert(!op1->isContainedIntOrIImmed());
instruction ins = tree->OperIs(GT_TEST_EQ, GT_TEST_NE) ? INS_tst : INS_cmp;
if (op2->isContainedIntOrIImmed())
{
GenTreeIntConCommon* intConst = op2->AsIntConCommon();
emit->emitIns_R_I(ins, cmpSize, op1->GetRegNum(), intConst->IconValue());
}
else
{
emit->emitIns_R_R(ins, cmpSize, op1->GetRegNum(), op2->GetRegNum());
}
}
// Are we evaluating this into a register?
if (targetReg != REG_NA)
{
inst_SETCC(GenCondition::FromRelop(tree), tree->TypeGet(), targetReg);
genProduceReg(tree);
}
}
//------------------------------------------------------------------------
// genCodeForJumpCompare: Generates code for jmpCompare statement.
//
// A GT_JCMP node is created when a comparison and conditional branch
// can be executed in a single instruction.
//
// Arm64 has a few instructions with this behavior.
// - cbz/cbnz -- Compare and branch register zero/not zero
// - tbz/tbnz -- Test and branch register bit zero/not zero
//
// The cbz/cbnz supports the normal +/- 1MB branch range for conditional branches
// The tbz/tbnz supports a smaller +/- 32KB branch range
//
// A GT_JCMP cbz/cbnz node is created when there is a GT_EQ or GT_NE
// integer/unsigned comparison against #0 which is used by a GT_JTRUE
// condition jump node.
//
// A GT_JCMP tbz/tbnz node is created when there is a GT_TEST_EQ or GT_TEST_NE
// integer/unsigned comparison against against a mask with a single bit set
// which is used by a GT_JTRUE condition jump node.
//
// This node is repsonsible for consuming the register, and emitting the
// appropriate fused compare/test and branch instruction
//
// Two flags guide code generation
// GTF_JCMP_TST -- Set if this is a tbz/tbnz rather than cbz/cbnz
// GTF_JCMP_EQ -- Set if this is cbz/tbz rather than cbnz/tbnz
//
// Arguments:
// tree - The GT_JCMP tree node.
//
// Return Value:
// None
//
void CodeGen::genCodeForJumpCompare(GenTreeOp* tree)
{
assert(compiler->compCurBB->bbJumpKind == BBJ_COND);
GenTree* op1 = tree->gtGetOp1();
GenTree* op2 = tree->gtGetOp2();
assert(tree->OperIs(GT_JCMP));
assert(!varTypeIsFloating(tree));
assert(!op1->isUsedFromMemory());
assert(!op2->isUsedFromMemory());
assert(op2->IsCnsIntOrI());
assert(op2->isContained());
genConsumeOperands(tree);
regNumber reg = op1->GetRegNum();
emitAttr attr = emitActualTypeSize(op1->TypeGet());
if (tree->gtFlags & GTF_JCMP_TST)
{
ssize_t compareImm = op2->AsIntCon()->IconValue();
assert(isPow2(compareImm));
instruction ins = (tree->gtFlags & GTF_JCMP_EQ) ? INS_tbz : INS_tbnz;
int imm = genLog2((size_t)compareImm);
GetEmitter()->emitIns_J_R_I(ins, attr, compiler->compCurBB->bbJumpDest, reg, imm);
}
else
{
assert(op2->IsIntegralConst(0));
instruction ins = (tree->gtFlags & GTF_JCMP_EQ) ? INS_cbz : INS_cbnz;
GetEmitter()->emitIns_J_R(ins, attr, compiler->compCurBB->bbJumpDest, reg);
}
}
//---------------------------------------------------------------------
// genSPtoFPdelta - return offset from the stack pointer (Initial-SP) to the frame pointer. The frame pointer
// will point to the saved frame pointer slot (i.e., there will be frame pointer chaining).
//
int CodeGenInterface::genSPtoFPdelta() const
{
assert(isFramePointerUsed());
int delta = -1; // initialization to illegal value
if (IsSaveFpLrWithAllCalleeSavedRegisters())
{
// The saved frame pointer is at the top of the frame, just beneath the saved varargs register space and the
// saved LR.
delta = genTotalFrameSize() - (compiler->info.compIsVarArgs ? MAX_REG_ARG * REGSIZE_BYTES : 0) -
2 /* FP, LR */ * REGSIZE_BYTES;
}
else
{
// We place the saved frame pointer immediately above the outgoing argument space.
delta = (int)compiler->lvaOutgoingArgSpaceSize;
}
assert(delta >= 0);
return delta;
}
//---------------------------------------------------------------------
// genTotalFrameSize - return the total size of the stack frame, including local size,
// callee-saved register size, etc.
//
// Return value:
// Total frame size
//
int CodeGenInterface::genTotalFrameSize() const
{
// For varargs functions, we home all the incoming register arguments. They are not
// included in the compCalleeRegsPushed count. This is like prespill on ARM32, but
// since we don't use "push" instructions to save them, we don't have to do the
// save of these varargs register arguments as the first thing in the prolog.
assert(!IsUninitialized(compiler->compCalleeRegsPushed));
int totalFrameSize = (compiler->info.compIsVarArgs ? MAX_REG_ARG * REGSIZE_BYTES : 0) +
compiler->compCalleeRegsPushed * REGSIZE_BYTES + compiler->compLclFrameSize;
assert(totalFrameSize >= 0);
return totalFrameSize;
}
//---------------------------------------------------------------------
// genCallerSPtoFPdelta - return the offset from Caller-SP to the frame pointer.
// This number is going to be negative, since the Caller-SP is at a higher
// address than the frame pointer.
//
// There must be a frame pointer to call this function!
int CodeGenInterface::genCallerSPtoFPdelta() const
{
assert(isFramePointerUsed());
int callerSPtoFPdelta;
callerSPtoFPdelta = genCallerSPtoInitialSPdelta() + genSPtoFPdelta();
assert(callerSPtoFPdelta <= 0);
return callerSPtoFPdelta;
}
//---------------------------------------------------------------------
// genCallerSPtoInitialSPdelta - return the offset from Caller-SP to Initial SP.
//
// This number will be negative.
int CodeGenInterface::genCallerSPtoInitialSPdelta() const
{
int callerSPtoSPdelta = 0;
callerSPtoSPdelta -= genTotalFrameSize();
assert(callerSPtoSPdelta <= 0);
return callerSPtoSPdelta;
}
//---------------------------------------------------------------------
// SetSaveFpLrWithAllCalleeSavedRegisters - Set the variable that indicates if FP/LR registers
// are stored with the rest of the callee-saved registers.
//
void CodeGen::SetSaveFpLrWithAllCalleeSavedRegisters(bool value)
{
JITDUMP("Setting genSaveFpLrWithAllCalleeSavedRegisters to %s\n", dspBool(value));
genSaveFpLrWithAllCalleeSavedRegisters = value;
}
//---------------------------------------------------------------------
// IsSaveFpLrWithAllCalleeSavedRegisters - Return the value that indicates where FP/LR registers
// are stored in the prolog.
//
bool CodeGen::IsSaveFpLrWithAllCalleeSavedRegisters() const
{
return genSaveFpLrWithAllCalleeSavedRegisters;
}
/*****************************************************************************
* Emit a call to a helper function.
*
*/
void CodeGen::genEmitHelperCall(unsigned helper, int argSize, emitAttr retSize, regNumber callTargetReg /*= REG_NA */)
{
void* addr = nullptr;
void* pAddr = nullptr;
emitter::EmitCallType callType = emitter::EC_FUNC_TOKEN;
addr = compiler->compGetHelperFtn((CorInfoHelpFunc)helper, &pAddr);
regNumber callTarget = REG_NA;
if (addr == nullptr)
{
// This is call to a runtime helper.
// adrp x, [reloc:rel page addr]
// add x, x, [reloc:page offset]
// ldr x, [x]
// br x
if (callTargetReg == REG_NA)
{
// If a callTargetReg has not been explicitly provided, we will use REG_DEFAULT_HELPER_CALL_TARGET, but
// this is only a valid assumption if the helper call is known to kill REG_DEFAULT_HELPER_CALL_TARGET.
callTargetReg = REG_DEFAULT_HELPER_CALL_TARGET;
}
regMaskTP callTargetMask = genRegMask(callTargetReg);
regMaskTP callKillSet = compiler->compHelperCallKillSet((CorInfoHelpFunc)helper);
// assert that all registers in callTargetMask are in the callKillSet
noway_assert((callTargetMask & callKillSet) == callTargetMask);
callTarget = callTargetReg;
// adrp + add with relocations will be emitted
GetEmitter()->emitIns_R_AI(INS_adrp, EA_PTR_DSP_RELOC, callTarget,
(ssize_t)pAddr DEBUGARG((size_t)compiler->eeFindHelper(helper))
DEBUGARG(GTF_ICON_METHOD_HDL));
GetEmitter()->emitIns_R_R(INS_ldr, EA_PTRSIZE, callTarget, callTarget);
callType = emitter::EC_INDIR_R;
}
GetEmitter()->emitIns_Call(callType, compiler->eeFindHelper(helper), INDEBUG_LDISASM_COMMA(nullptr) addr, argSize,
retSize, EA_UNKNOWN, gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur, DebugInfo(), callTarget, /* ireg */
REG_NA, 0, 0, /* xreg, xmul, disp */
false /* isJump */
);
regMaskTP killMask = compiler->compHelperCallKillSet((CorInfoHelpFunc)helper);
regSet.verifyRegistersUsed(killMask);
}
#ifdef FEATURE_SIMD
//------------------------------------------------------------------------
// genSIMDIntrinsic: Generate code for a SIMD Intrinsic. This is the main
// routine which in turn calls appropriate genSIMDIntrinsicXXX() routine.
//
// Arguments:
// simdNode - The GT_SIMD node
//
// Return Value:
// None.
//
// Notes:
// Currently, we only recognize SIMDVector<float> and SIMDVector<int>, and
// a limited set of methods.
//
// TODO-CLEANUP Merge all versions of this function and move to new file simdcodegencommon.cpp.
void CodeGen::genSIMDIntrinsic(GenTreeSIMD* simdNode)
{
// NYI for unsupported base types
if (!varTypeIsArithmetic(simdNode->GetSimdBaseType()))
{
noway_assert(!"SIMD intrinsic with unsupported base type.");
}
switch (simdNode->GetSIMDIntrinsicId())
{
case SIMDIntrinsicInit:
genSIMDIntrinsicInit(simdNode);
break;
case SIMDIntrinsicInitN:
genSIMDIntrinsicInitN(simdNode);
break;
case SIMDIntrinsicCast:
genSIMDIntrinsicUnOp(simdNode);
break;
case SIMDIntrinsicSub:
case SIMDIntrinsicBitwiseAnd:
case SIMDIntrinsicBitwiseOr:
case SIMDIntrinsicEqual:
genSIMDIntrinsicBinOp(simdNode);
break;
case SIMDIntrinsicUpperSave:
genSIMDIntrinsicUpperSave(simdNode);
break;
case SIMDIntrinsicUpperRestore:
genSIMDIntrinsicUpperRestore(simdNode);
break;
default:
noway_assert(!"Unimplemented SIMD intrinsic.");
unreached();
}
}
insOpts CodeGen::genGetSimdInsOpt(emitAttr size, var_types elementType)
{
assert((size == EA_16BYTE) || (size == EA_8BYTE));
insOpts result = INS_OPTS_NONE;
switch (elementType)
{
case TYP_DOUBLE:
case TYP_ULONG:
case TYP_LONG:
result = (size == EA_16BYTE) ? INS_OPTS_2D : INS_OPTS_1D;
break;
case TYP_FLOAT:
case TYP_UINT:
case TYP_INT:
result = (size == EA_16BYTE) ? INS_OPTS_4S : INS_OPTS_2S;
break;
case TYP_USHORT:
case TYP_SHORT:
result = (size == EA_16BYTE) ? INS_OPTS_8H : INS_OPTS_4H;
break;
case TYP_UBYTE:
case TYP_BYTE:
result = (size == EA_16BYTE) ? INS_OPTS_16B : INS_OPTS_8B;
break;
default:
assert(!"Unsupported element type");
unreached();
}
return result;
}
// getOpForSIMDIntrinsic: return the opcode for the given SIMD Intrinsic
//
// Arguments:
// intrinsicId - SIMD intrinsic Id
// baseType - Base type of the SIMD vector
// ival - Out param. Any immediate byte operand that needs to be passed to SSE2 opcode
//
//
// Return Value:
// Instruction (op) to be used, and immed is set if instruction requires an immediate operand.
//
instruction CodeGen::getOpForSIMDIntrinsic(SIMDIntrinsicID intrinsicId, var_types baseType, unsigned* ival /*=nullptr*/)
{
instruction result = INS_invalid;
if (varTypeIsFloating(baseType))
{
switch (intrinsicId)
{
case SIMDIntrinsicBitwiseAnd:
result = INS_and;
break;
case SIMDIntrinsicBitwiseOr:
result = INS_orr;
break;
case SIMDIntrinsicCast:
result = INS_mov;
break;
case SIMDIntrinsicEqual:
result = INS_fcmeq;
break;
case SIMDIntrinsicSub:
result = INS_fsub;
break;
default:
assert(!"Unsupported SIMD intrinsic");
unreached();
}
}
else
{
bool isUnsigned = varTypeIsUnsigned(baseType);
switch (intrinsicId)
{
case SIMDIntrinsicBitwiseAnd:
result = INS_and;
break;
case SIMDIntrinsicBitwiseOr:
result = INS_orr;
break;
case SIMDIntrinsicCast:
result = INS_mov;
break;
case SIMDIntrinsicEqual:
result = INS_cmeq;
break;
case SIMDIntrinsicSub:
result = INS_sub;
break;
default:
assert(!"Unsupported SIMD intrinsic");
unreached();
}
}
noway_assert(result != INS_invalid);
return result;
}
//------------------------------------------------------------------------
// genSIMDIntrinsicInit: Generate code for SIMD Intrinsic Initialize.
//
// Arguments:
// simdNode - The GT_SIMD node
//
// Return Value:
// None.
//
void CodeGen::genSIMDIntrinsicInit(GenTreeSIMD* simdNode)
{
assert(simdNode->GetSIMDIntrinsicId() == SIMDIntrinsicInit);
GenTree* op1 = simdNode->Op(1);
var_types baseType = simdNode->GetSimdBaseType();
regNumber targetReg = simdNode->GetRegNum();
assert(targetReg != REG_NA);
var_types targetType = simdNode->TypeGet();
genConsumeMultiOpOperands(simdNode);
regNumber op1Reg = op1->IsIntegralConst(0) ? REG_ZR : op1->GetRegNum();
// TODO-ARM64-CQ Add LD1R to allow SIMDIntrinsicInit from contained memory
// TODO-ARM64-CQ Add MOVI to allow SIMDIntrinsicInit from contained immediate small constants
assert(op1->isContained() == op1->IsIntegralConst(0));
assert(!op1->isUsedFromMemory());
assert(genIsValidFloatReg(targetReg));
assert(genIsValidIntReg(op1Reg) || genIsValidFloatReg(op1Reg));
emitAttr attr = (simdNode->GetSimdSize() > 8) ? EA_16BYTE : EA_8BYTE;
insOpts opt = genGetSimdInsOpt(attr, baseType);
if (opt == INS_OPTS_1D)
{
GetEmitter()->emitIns_Mov(INS_mov, attr, targetReg, op1Reg, /* canSkip */ false);
}
else if (genIsValidIntReg(op1Reg))
{
GetEmitter()->emitIns_R_R(INS_dup, attr, targetReg, op1Reg, opt);
}
else
{
GetEmitter()->emitIns_R_R_I(INS_dup, attr, targetReg, op1Reg, 0, opt);
}
genProduceReg(simdNode);
}
//-------------------------------------------------------------------------------------------
// genSIMDIntrinsicInitN: Generate code for SIMD Intrinsic Initialize for the form that takes
// a number of arguments equal to the length of the Vector.
//
// Arguments:
// simdNode - The GT_SIMD node
//
// Return Value:
// None.
//
void CodeGen::genSIMDIntrinsicInitN(GenTreeSIMD* simdNode)
{
assert(simdNode->GetSIMDIntrinsicId() == SIMDIntrinsicInitN);
regNumber targetReg = simdNode->GetRegNum();
assert(targetReg != REG_NA);
var_types targetType = simdNode->TypeGet();
var_types baseType = simdNode->GetSimdBaseType();
emitAttr baseTypeSize = emitTypeSize(baseType);
regNumber vectorReg = targetReg;
size_t initCount = simdNode->GetOperandCount();
assert((initCount * baseTypeSize) <= simdNode->GetSimdSize());
if (varTypeIsFloating(baseType))
{
// Note that we cannot use targetReg before consuming all float source operands.
// Therefore use an internal temp register
vectorReg = simdNode->GetSingleTempReg(RBM_ALLFLOAT);
}
// We will first consume the list items in execution (left to right) order,
// and record the registers.
regNumber operandRegs[FP_REGSIZE_BYTES];
for (size_t i = 1; i <= initCount; i++)
{
GenTree* operand = simdNode->Op(i);
assert(operand->TypeIs(baseType));
assert(!operand->isContained());
operandRegs[i - 1] = genConsumeReg(operand);
}
if (initCount * baseTypeSize < EA_16BYTE)
{
GetEmitter()->emitIns_R_I(INS_movi, EA_16BYTE, vectorReg, 0x00, INS_OPTS_16B);
}
if (varTypeIsIntegral(baseType))
{
for (unsigned i = 0; i < initCount; i++)
{
GetEmitter()->emitIns_R_R_I(INS_ins, baseTypeSize, vectorReg, operandRegs[i], i);
}
}
else
{
for (unsigned i = 0; i < initCount; i++)
{
GetEmitter()->emitIns_R_R_I_I(INS_ins, baseTypeSize, vectorReg, operandRegs[i], i, 0);
}
}
// Load the initialized value.
GetEmitter()->emitIns_Mov(INS_mov, EA_16BYTE, targetReg, vectorReg, /* canSkip */ true);
genProduceReg(simdNode);
}
//----------------------------------------------------------------------------------
// genSIMDIntrinsicUnOp: Generate code for SIMD Intrinsic unary operations like sqrt.
//
// Arguments:
// simdNode - The GT_SIMD node
//
// Return Value:
// None.
//
void CodeGen::genSIMDIntrinsicUnOp(GenTreeSIMD* simdNode)
{
assert(simdNode->GetSIMDIntrinsicId() == SIMDIntrinsicCast);
GenTree* op1 = simdNode->Op(1);
var_types baseType = simdNode->GetSimdBaseType();
regNumber targetReg = simdNode->GetRegNum();
assert(targetReg != REG_NA);
var_types targetType = simdNode->TypeGet();
genConsumeMultiOpOperands(simdNode);
regNumber op1Reg = op1->GetRegNum();
assert(genIsValidFloatReg(op1Reg));
assert(genIsValidFloatReg(targetReg));
instruction ins = getOpForSIMDIntrinsic(simdNode->GetSIMDIntrinsicId(), baseType);
emitAttr attr = (simdNode->GetSimdSize() > 8) ? EA_16BYTE : EA_8BYTE;
if (GetEmitter()->IsMovInstruction(ins))
{
GetEmitter()->emitIns_Mov(ins, attr, targetReg, op1Reg, /* canSkip */ false, INS_OPTS_NONE);
}
else
{
GetEmitter()->emitIns_R_R(ins, attr, targetReg, op1Reg, genGetSimdInsOpt(attr, baseType));
}
genProduceReg(simdNode);
}
//--------------------------------------------------------------------------------
// genSIMDIntrinsicBinOp: Generate code for SIMD Intrinsic binary operations
// add, sub, mul, bit-wise And, AndNot and Or.
//
// Arguments:
// simdNode - The GT_SIMD node
//
// Return Value:
// None.
//
void CodeGen::genSIMDIntrinsicBinOp(GenTreeSIMD* simdNode)
{
assert((simdNode->GetSIMDIntrinsicId() == SIMDIntrinsicSub) ||
(simdNode->GetSIMDIntrinsicId() == SIMDIntrinsicBitwiseAnd) ||
(simdNode->GetSIMDIntrinsicId() == SIMDIntrinsicBitwiseOr) ||
(simdNode->GetSIMDIntrinsicId() == SIMDIntrinsicEqual));
GenTree* op1 = simdNode->Op(1);
GenTree* op2 = simdNode->Op(2);
var_types baseType = simdNode->GetSimdBaseType();
regNumber targetReg = simdNode->GetRegNum();
assert(targetReg != REG_NA);
var_types targetType = simdNode->TypeGet();
genConsumeMultiOpOperands(simdNode);
regNumber op1Reg = op1->GetRegNum();
regNumber op2Reg = op2->GetRegNum();
assert(genIsValidFloatReg(op1Reg));
assert(genIsValidFloatReg(op2Reg));
assert(genIsValidFloatReg(targetReg));
// TODO-ARM64-CQ Contain integer constants where posible
instruction ins = getOpForSIMDIntrinsic(simdNode->GetSIMDIntrinsicId(), baseType);
emitAttr attr = (simdNode->GetSimdSize() > 8) ? EA_16BYTE : EA_8BYTE;
insOpts opt = genGetSimdInsOpt(attr, baseType);
GetEmitter()->emitIns_R_R_R(ins, attr, targetReg, op1Reg, op2Reg, opt);
genProduceReg(simdNode);
}
//-----------------------------------------------------------------------------
// genSIMDIntrinsicUpperSave: save the upper half of a TYP_SIMD16 vector to
// the given register, if any, or to memory.
//
// Arguments:
// simdNode - The GT_SIMD node
//
// Return Value:
// None.
//
// Notes:
// The upper half of all SIMD registers are volatile, even the callee-save registers.
// When a 16-byte SIMD value is live across a call, the register allocator will use this intrinsic
// to cause the upper half to be saved. It will first attempt to find another, unused, callee-save
// register. If such a register cannot be found, it will save it to an available caller-save register.
// In that case, this node will be marked GTF_SPILL, which will cause this method to save
// the upper half to the lclVar's home location.
//
void CodeGen::genSIMDIntrinsicUpperSave(GenTreeSIMD* simdNode)
{
assert(simdNode->GetSIMDIntrinsicId() == SIMDIntrinsicUpperSave);
GenTree* op1 = simdNode->Op(1);
GenTreeLclVar* lclNode = op1->AsLclVar();
LclVarDsc* varDsc = compiler->lvaGetDesc(lclNode);
assert(emitTypeSize(varDsc->GetRegisterType(lclNode)) == 16);
regNumber targetReg = simdNode->GetRegNum();
regNumber op1Reg = genConsumeReg(op1);
assert(op1Reg != REG_NA);
assert(targetReg != REG_NA);
GetEmitter()->emitIns_R_R_I_I(INS_mov, EA_8BYTE, targetReg, op1Reg, 0, 1);
if ((simdNode->gtFlags & GTF_SPILL) != 0)
{
// This is not a normal spill; we'll spill it to the lclVar location.
// The localVar must have a stack home.
unsigned varNum = lclNode->GetLclNum();
assert(varDsc->lvOnFrame);
// We want to store this to the upper 8 bytes of this localVar's home.
int offset = 8;
emitAttr attr = emitTypeSize(TYP_SIMD8);
GetEmitter()->emitIns_S_R(INS_str, attr, targetReg, varNum, offset);
}
else
{
genProduceReg(simdNode);
}
}
//-----------------------------------------------------------------------------
// genSIMDIntrinsicUpperRestore: Restore the upper half of a TYP_SIMD16 vector to
// the given register, if any, or to memory.
//
// Arguments:
// simdNode - The GT_SIMD node
//
// Return Value:
// None.
//
// Notes:
// For consistency with genSIMDIntrinsicUpperSave, and to ensure that lclVar nodes always
// have their home register, this node has its targetReg on the lclVar child, and its source
// on the simdNode.
// Regarding spill, please see the note above on genSIMDIntrinsicUpperSave. If we have spilled
// an upper-half to the lclVar's home location, this node will be marked GTF_SPILLED.
//
void CodeGen::genSIMDIntrinsicUpperRestore(GenTreeSIMD* simdNode)
{
assert(simdNode->GetSIMDIntrinsicId() == SIMDIntrinsicUpperRestore);
GenTree* op1 = simdNode->Op(1);
assert(op1->IsLocal());
GenTreeLclVar* lclNode = op1->AsLclVar();
LclVarDsc* varDsc = compiler->lvaGetDesc(lclNode);
assert(emitTypeSize(varDsc->GetRegisterType(lclNode)) == 16);
regNumber srcReg = simdNode->GetRegNum();
regNumber lclVarReg = genConsumeReg(lclNode);
unsigned varNum = lclNode->GetLclNum();
assert(lclVarReg != REG_NA);
assert(srcReg != REG_NA);
if (simdNode->gtFlags & GTF_SPILLED)
{
// The localVar must have a stack home.
assert(varDsc->lvOnFrame);
// We will load this from the upper 8 bytes of this localVar's home.
int offset = 8;
emitAttr attr = emitTypeSize(TYP_SIMD8);
GetEmitter()->emitIns_R_S(INS_ldr, attr, srcReg, varNum, offset);
}
GetEmitter()->emitIns_R_R_I_I(INS_mov, EA_8BYTE, lclVarReg, srcReg, 1, 0);
}
//-----------------------------------------------------------------------------
// genStoreIndTypeSIMD12: store indirect a TYP_SIMD12 (i.e. Vector3) to memory.
// Since Vector3 is not a hardware supported write size, it is performed
// as two writes: 8 byte followed by 4-byte.
//
// Arguments:
// treeNode - tree node that is attempting to store indirect
//
//
// Return Value:
// None.
//
void CodeGen::genStoreIndTypeSIMD12(GenTree* treeNode)
{
assert(treeNode->OperGet() == GT_STOREIND);
GenTree* addr = treeNode->AsOp()->gtOp1;
GenTree* data = treeNode->AsOp()->gtOp2;
// addr and data should not be contained.
assert(!data->isContained());
assert(!addr->isContained());
#ifdef DEBUG
// Should not require a write barrier
GCInfo::WriteBarrierForm writeBarrierForm = gcInfo.gcIsWriteBarrierCandidate(treeNode, data);
assert(writeBarrierForm == GCInfo::WBF_NoBarrier);
#endif
genConsumeOperands(treeNode->AsOp());
// Need an additional integer register to extract upper 4 bytes from data.
regNumber tmpReg = treeNode->GetSingleTempReg();
assert(tmpReg != addr->GetRegNum());
// 8-byte write
GetEmitter()->emitIns_R_R(INS_str, EA_8BYTE, data->GetRegNum(), addr->GetRegNum());
// Extract upper 4-bytes from data
GetEmitter()->emitIns_R_R_I(INS_mov, EA_4BYTE, tmpReg, data->GetRegNum(), 2);
// 4-byte write
GetEmitter()->emitIns_R_R_I(INS_str, EA_4BYTE, tmpReg, addr->GetRegNum(), 8);
}
//-----------------------------------------------------------------------------
// genLoadIndTypeSIMD12: load indirect a TYP_SIMD12 (i.e. Vector3) value.
// Since Vector3 is not a hardware supported write size, it is performed
// as two loads: 8 byte followed by 4-byte.
//
// Arguments:
// treeNode - tree node of GT_IND
//
//
// Return Value:
// None.
//
void CodeGen::genLoadIndTypeSIMD12(GenTree* treeNode)
{
assert(treeNode->OperGet() == GT_IND);
GenTree* addr = treeNode->AsOp()->gtOp1;
regNumber targetReg = treeNode->GetRegNum();
assert(!addr->isContained());
regNumber operandReg = genConsumeReg(addr);
// Need an addtional int register to read upper 4 bytes, which is different from targetReg
regNumber tmpReg = treeNode->GetSingleTempReg();
// 8-byte read
GetEmitter()->emitIns_R_R(INS_ldr, EA_8BYTE, targetReg, addr->GetRegNum());
// 4-byte read
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, tmpReg, addr->GetRegNum(), 8);
// Insert upper 4-bytes into data
GetEmitter()->emitIns_R_R_I(INS_mov, EA_4BYTE, targetReg, tmpReg, 2);
genProduceReg(treeNode);
}
//-----------------------------------------------------------------------------
// genStoreLclTypeSIMD12: store a TYP_SIMD12 (i.e. Vector3) type field.
// Since Vector3 is not a hardware supported write size, it is performed
// as two stores: 8 byte followed by 4-byte.
//
// Arguments:
// treeNode - tree node that is attempting to store TYP_SIMD12 field
//
// Return Value:
// None.
//
void CodeGen::genStoreLclTypeSIMD12(GenTree* treeNode)
{
assert((treeNode->OperGet() == GT_STORE_LCL_FLD) || (treeNode->OperGet() == GT_STORE_LCL_VAR));
GenTreeLclVarCommon* lclVar = treeNode->AsLclVarCommon();
unsigned offs = lclVar->GetLclOffs();
unsigned varNum = lclVar->GetLclNum();
assert(varNum < compiler->lvaCount);
GenTree* op1 = lclVar->gtGetOp1();
if (op1->isContained())
{
// This is only possible for a zero-init.
assert(op1->IsIntegralConst(0) || op1->IsSIMDZero());
// store lower 8 bytes
GetEmitter()->emitIns_S_R(ins_Store(TYP_DOUBLE), EA_8BYTE, REG_ZR, varNum, offs);
// Store upper 4 bytes
GetEmitter()->emitIns_S_R(ins_Store(TYP_FLOAT), EA_4BYTE, REG_ZR, varNum, offs + 8);
return;
}
regNumber operandReg = genConsumeReg(op1);
// Need an additional integer register to extract upper 4 bytes from data.
regNumber tmpReg = lclVar->GetSingleTempReg();
GetEmitter()->emitStoreSIMD12ToLclOffset(varNum, offs, operandReg, tmpReg);
}
#endif // FEATURE_SIMD
#ifdef PROFILING_SUPPORTED
//-----------------------------------------------------------------------------------
// genProfilingEnterCallback: Generate the profiling function enter callback.
//
// Arguments:
// initReg - register to use as scratch register
// pInitRegZeroed - OUT parameter. *pInitRegZeroed set to 'false' if 'initReg' is
// set to non-zero value after this call.
//
// Return Value:
// None
//
void CodeGen::genProfilingEnterCallback(regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
if (!compiler->compIsProfilerHookNeeded())
{
return;
}
if (compiler->compProfilerMethHndIndirected)
{
instGen_Set_Reg_To_Imm(EA_PTR_DSP_RELOC, REG_PROFILER_ENTER_ARG_FUNC_ID,
(ssize_t)compiler->compProfilerMethHnd);
GetEmitter()->emitIns_R_R(INS_ldr, EA_PTRSIZE, REG_PROFILER_ENTER_ARG_FUNC_ID, REG_PROFILER_ENTER_ARG_FUNC_ID);
}
else
{
instGen_Set_Reg_To_Imm(EA_PTRSIZE, REG_PROFILER_ENTER_ARG_FUNC_ID, (ssize_t)compiler->compProfilerMethHnd);
}
int callerSPOffset = compiler->lvaToCallerSPRelativeOffset(0, isFramePointerUsed());
genInstrWithConstant(INS_add, EA_PTRSIZE, REG_PROFILER_ENTER_ARG_CALLER_SP, genFramePointerReg(),
(ssize_t)(-callerSPOffset), REG_PROFILER_ENTER_ARG_CALLER_SP);
genEmitHelperCall(CORINFO_HELP_PROF_FCN_ENTER, 0, EA_UNKNOWN);
if ((genRegMask(initReg) & RBM_PROFILER_ENTER_TRASH) != RBM_NONE)
{
*pInitRegZeroed = false;
}
}
//-----------------------------------------------------------------------------------
// genProfilingLeaveCallback: Generate the profiling function leave or tailcall callback.
// Technically, this is not part of the epilog; it is called when we are generating code for a GT_RETURN node.
//
// Arguments:
// helper - which helper to call. Either CORINFO_HELP_PROF_FCN_LEAVE or CORINFO_HELP_PROF_FCN_TAILCALL
//
// Return Value:
// None
//
void CodeGen::genProfilingLeaveCallback(unsigned helper)
{
assert((helper == CORINFO_HELP_PROF_FCN_LEAVE) || (helper == CORINFO_HELP_PROF_FCN_TAILCALL));
if (!compiler->compIsProfilerHookNeeded())
{
return;
}
compiler->info.compProfilerCallback = true;
if (compiler->compProfilerMethHndIndirected)
{
instGen_Set_Reg_To_Imm(EA_PTR_DSP_RELOC, REG_PROFILER_LEAVE_ARG_FUNC_ID,
(ssize_t)compiler->compProfilerMethHnd);
GetEmitter()->emitIns_R_R(INS_ldr, EA_PTRSIZE, REG_PROFILER_LEAVE_ARG_FUNC_ID, REG_PROFILER_LEAVE_ARG_FUNC_ID);
}
else
{
instGen_Set_Reg_To_Imm(EA_PTRSIZE, REG_PROFILER_LEAVE_ARG_FUNC_ID, (ssize_t)compiler->compProfilerMethHnd);
}
gcInfo.gcMarkRegSetNpt(RBM_PROFILER_LEAVE_ARG_FUNC_ID);
int callerSPOffset = compiler->lvaToCallerSPRelativeOffset(0, isFramePointerUsed());
genInstrWithConstant(INS_add, EA_PTRSIZE, REG_PROFILER_LEAVE_ARG_CALLER_SP, genFramePointerReg(),
(ssize_t)(-callerSPOffset), REG_PROFILER_LEAVE_ARG_CALLER_SP);
gcInfo.gcMarkRegSetNpt(RBM_PROFILER_LEAVE_ARG_CALLER_SP);
genEmitHelperCall(helper, 0, EA_UNKNOWN);
}
#endif // PROFILING_SUPPORTED
/*****************************************************************************
* Unit testing of the ARM64 emitter: generate a bunch of instructions into the prolog
* (it's as good a place as any), then use COMPlus_JitLateDisasm=* to see if the late
* disassembler thinks the instructions as the same as we do.
*/
// Uncomment "#define ALL_ARM64_EMITTER_UNIT_TESTS" to run all the unit tests here.
// After adding a unit test, and verifying it works, put it under this #ifdef, so we don't see it run every time.
//#define ALL_ARM64_EMITTER_UNIT_TESTS
#if defined(DEBUG)
void CodeGen::genArm64EmitterUnitTests()
{
if (!verbose)
{
return;
}
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
// Mark the "fake" instructions in the output.
printf("*************** In genArm64EmitterUnitTests()\n");
emitter* theEmitter = GetEmitter();
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
// We use this:
// genDefineTempLabel(genCreateTempLabel());
// to create artificial labels to help separate groups of tests.
//
// Loads/Stores basic general register
//
genDefineTempLabel(genCreateTempLabel());
// ldr/str Xt, [reg]
theEmitter->emitIns_R_R(INS_ldr, EA_8BYTE, REG_R8, REG_R9);
theEmitter->emitIns_R_R(INS_ldrb, EA_1BYTE, REG_R8, REG_R9);
theEmitter->emitIns_R_R(INS_ldrh, EA_2BYTE, REG_R8, REG_R9);
theEmitter->emitIns_R_R(INS_str, EA_8BYTE, REG_R8, REG_R9);
theEmitter->emitIns_R_R(INS_strb, EA_1BYTE, REG_R8, REG_R9);
theEmitter->emitIns_R_R(INS_strh, EA_2BYTE, REG_R8, REG_R9);
// ldr/str Wt, [reg]
theEmitter->emitIns_R_R(INS_ldr, EA_4BYTE, REG_R8, REG_R9);
theEmitter->emitIns_R_R(INS_ldrb, EA_1BYTE, REG_R8, REG_R9);
theEmitter->emitIns_R_R(INS_ldrh, EA_2BYTE, REG_R8, REG_R9);
theEmitter->emitIns_R_R(INS_str, EA_4BYTE, REG_R8, REG_R9);
theEmitter->emitIns_R_R(INS_strb, EA_1BYTE, REG_R8, REG_R9);
theEmitter->emitIns_R_R(INS_strh, EA_2BYTE, REG_R8, REG_R9);
theEmitter->emitIns_R_R(INS_ldrsb, EA_4BYTE, REG_R8, REG_R9); // target Wt
theEmitter->emitIns_R_R(INS_ldrsh, EA_4BYTE, REG_R8, REG_R9); // target Wt
theEmitter->emitIns_R_R(INS_ldrsb, EA_8BYTE, REG_R8, REG_R9); // target Xt
theEmitter->emitIns_R_R(INS_ldrsh, EA_8BYTE, REG_R8, REG_R9); // target Xt
theEmitter->emitIns_R_R(INS_ldrsw, EA_8BYTE, REG_R8, REG_R9); // target Xt
theEmitter->emitIns_R_R_I(INS_ldurb, EA_4BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_ldurh, EA_4BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_sturb, EA_4BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_sturh, EA_4BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_ldursb, EA_4BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_ldursb, EA_8BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_ldursh, EA_4BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_ldursh, EA_8BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_ldur, EA_8BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_ldur, EA_4BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_stur, EA_4BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_stur, EA_8BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_ldursw, EA_8BYTE, REG_R8, REG_R9, 1);
// SP and ZR tests
theEmitter->emitIns_R_R_I(INS_ldur, EA_8BYTE, REG_R8, REG_SP, 1);
theEmitter->emitIns_R_R_I(INS_ldurb, EA_8BYTE, REG_ZR, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_ldurh, EA_8BYTE, REG_ZR, REG_SP, 1);
// scaled
theEmitter->emitIns_R_R_I(INS_ldrb, EA_1BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_ldrh, EA_2BYTE, REG_R8, REG_R9, 2);
theEmitter->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_R8, REG_R9, 4);
theEmitter->emitIns_R_R_I(INS_ldr, EA_8BYTE, REG_R8, REG_R9, 8);
// pre-/post-indexed (unscaled)
theEmitter->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_R8, REG_R9, 1, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_R8, REG_R9, 1, INS_OPTS_PRE_INDEX);
theEmitter->emitIns_R_R_I(INS_ldr, EA_8BYTE, REG_R8, REG_R9, 1, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I(INS_ldr, EA_8BYTE, REG_R8, REG_R9, 1, INS_OPTS_PRE_INDEX);
// ldar/stlr Rt, [reg]
theEmitter->emitIns_R_R(INS_ldar, EA_8BYTE, REG_R9, REG_R8);
theEmitter->emitIns_R_R(INS_ldar, EA_4BYTE, REG_R7, REG_R10);
theEmitter->emitIns_R_R(INS_ldarb, EA_4BYTE, REG_R5, REG_R11);
theEmitter->emitIns_R_R(INS_ldarh, EA_4BYTE, REG_R5, REG_R12);
theEmitter->emitIns_R_R(INS_stlr, EA_8BYTE, REG_R9, REG_R8);
theEmitter->emitIns_R_R(INS_stlr, EA_4BYTE, REG_R7, REG_R13);
theEmitter->emitIns_R_R(INS_stlrb, EA_4BYTE, REG_R5, REG_R14);
theEmitter->emitIns_R_R(INS_stlrh, EA_4BYTE, REG_R3, REG_R15);
// ldapr Rt, [reg]
theEmitter->emitIns_R_R(INS_ldapr, EA_8BYTE, REG_R9, REG_R8);
theEmitter->emitIns_R_R(INS_ldapr, EA_4BYTE, REG_R7, REG_R10);
theEmitter->emitIns_R_R(INS_ldaprb, EA_4BYTE, REG_R5, REG_R11);
theEmitter->emitIns_R_R(INS_ldaprh, EA_4BYTE, REG_R5, REG_R12);
// ldaxr Rt, [reg]
theEmitter->emitIns_R_R(INS_ldaxr, EA_8BYTE, REG_R9, REG_R8);
theEmitter->emitIns_R_R(INS_ldaxr, EA_4BYTE, REG_R7, REG_R10);
theEmitter->emitIns_R_R(INS_ldaxrb, EA_4BYTE, REG_R5, REG_R11);
theEmitter->emitIns_R_R(INS_ldaxrh, EA_4BYTE, REG_R5, REG_R12);
// ldxr Rt, [reg]
theEmitter->emitIns_R_R(INS_ldxr, EA_8BYTE, REG_R9, REG_R8);
theEmitter->emitIns_R_R(INS_ldxr, EA_4BYTE, REG_R7, REG_R10);
theEmitter->emitIns_R_R(INS_ldxrb, EA_4BYTE, REG_R5, REG_R11);
theEmitter->emitIns_R_R(INS_ldxrh, EA_4BYTE, REG_R5, REG_R12);
// stxr Ws, Rt, [reg]
theEmitter->emitIns_R_R_R(INS_stxr, EA_8BYTE, REG_R1, REG_R9, REG_R8);
theEmitter->emitIns_R_R_R(INS_stxr, EA_4BYTE, REG_R3, REG_R7, REG_R13);
theEmitter->emitIns_R_R_R(INS_stxrb, EA_4BYTE, REG_R8, REG_R5, REG_R14);
theEmitter->emitIns_R_R_R(INS_stxrh, EA_4BYTE, REG_R12, REG_R3, REG_R15);
// stlxr Ws, Rt, [reg]
theEmitter->emitIns_R_R_R(INS_stlxr, EA_8BYTE, REG_R1, REG_R9, REG_R8);
theEmitter->emitIns_R_R_R(INS_stlxr, EA_4BYTE, REG_R3, REG_R7, REG_R13);
theEmitter->emitIns_R_R_R(INS_stlxrb, EA_4BYTE, REG_R8, REG_R5, REG_R14);
theEmitter->emitIns_R_R_R(INS_stlxrh, EA_4BYTE, REG_R12, REG_R3, REG_R15);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// Loads to and Stores from one, two, three, or four SIMD&FP registers
//
genDefineTempLabel(genCreateTempLabel());
// ld1 {Vt}, [Xn|SP]
theEmitter->emitIns_R_R(INS_ld1, EA_8BYTE, REG_V0, REG_R1, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_ld1, EA_16BYTE, REG_V2, REG_R3, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_ld1, EA_8BYTE, REG_V4, REG_R5, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_ld1, EA_16BYTE, REG_V6, REG_R7, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_ld1, EA_8BYTE, REG_V8, REG_R9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_ld1, EA_16BYTE, REG_V10, REG_R11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_ld1, EA_8BYTE, REG_V12, REG_R13, INS_OPTS_1D);
theEmitter->emitIns_R_R(INS_ld1, EA_16BYTE, REG_V14, REG_R15, INS_OPTS_2D);
// ld1 {Vt, Vt2}, [Xn|SP]
theEmitter->emitIns_R_R(INS_ld1_2regs, EA_8BYTE, REG_V0, REG_R2, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_ld1_2regs, EA_16BYTE, REG_V3, REG_R5, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_ld1_2regs, EA_8BYTE, REG_V6, REG_R8, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_ld1_2regs, EA_16BYTE, REG_V9, REG_R11, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_ld1_2regs, EA_8BYTE, REG_V12, REG_R14, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_ld1_2regs, EA_16BYTE, REG_V15, REG_R17, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_ld1_2regs, EA_8BYTE, REG_V18, REG_R20, INS_OPTS_1D);
theEmitter->emitIns_R_R(INS_ld1_2regs, EA_16BYTE, REG_V21, REG_R23, INS_OPTS_2D);
// ld1 {Vt, Vt2, Vt3}, [Xn|SP]
theEmitter->emitIns_R_R(INS_ld1_3regs, EA_8BYTE, REG_V0, REG_R3, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_ld1_3regs, EA_16BYTE, REG_V4, REG_R7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_ld1_3regs, EA_8BYTE, REG_V8, REG_R11, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_ld1_3regs, EA_16BYTE, REG_V12, REG_R15, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_ld1_3regs, EA_8BYTE, REG_V16, REG_R19, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_ld1_3regs, EA_16BYTE, REG_V20, REG_R23, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_ld1_3regs, EA_8BYTE, REG_V24, REG_R27, INS_OPTS_1D);
theEmitter->emitIns_R_R(INS_ld1_3regs, EA_16BYTE, REG_V28, REG_SP, INS_OPTS_2D);
// ld1 {Vt, Vt2, Vt3, Vt4}, [Xn|SP]
theEmitter->emitIns_R_R(INS_ld1_4regs, EA_8BYTE, REG_V0, REG_R4, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_ld1_4regs, EA_16BYTE, REG_V5, REG_R9, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_ld1_4regs, EA_8BYTE, REG_V10, REG_R14, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_ld1_4regs, EA_16BYTE, REG_V15, REG_R19, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_ld1_4regs, EA_8BYTE, REG_V20, REG_R24, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_ld1_4regs, EA_16BYTE, REG_V25, REG_R29, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_ld1_4regs, EA_8BYTE, REG_V30, REG_R2, INS_OPTS_1D);
theEmitter->emitIns_R_R(INS_ld1_4regs, EA_16BYTE, REG_V3, REG_R7, INS_OPTS_2D);
// ld2 {Vt, Vt2}, [Xn|SP]
theEmitter->emitIns_R_R(INS_ld2, EA_8BYTE, REG_V0, REG_R2, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_ld2, EA_16BYTE, REG_V3, REG_R5, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_ld2, EA_8BYTE, REG_V6, REG_R8, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_ld2, EA_16BYTE, REG_V9, REG_R11, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_ld2, EA_8BYTE, REG_V12, REG_R14, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_ld2, EA_16BYTE, REG_V15, REG_R17, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_ld2, EA_16BYTE, REG_V18, REG_R20, INS_OPTS_2D);
// ld3 {Vt, Vt2, Vt3}, [Xn|SP]
theEmitter->emitIns_R_R(INS_ld3, EA_8BYTE, REG_V0, REG_R3, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_ld3, EA_16BYTE, REG_V4, REG_R7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_ld3, EA_8BYTE, REG_V8, REG_R11, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_ld3, EA_16BYTE, REG_V12, REG_R15, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_ld3, EA_8BYTE, REG_V16, REG_R19, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_ld3, EA_16BYTE, REG_V20, REG_R23, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_ld3, EA_16BYTE, REG_V24, REG_R27, INS_OPTS_2D);
// ld4 {Vt, Vt2, Vt3, Vt4}, [Xn|SP]
theEmitter->emitIns_R_R(INS_ld4, EA_8BYTE, REG_V0, REG_R4, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_ld4, EA_16BYTE, REG_V5, REG_R9, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_ld4, EA_8BYTE, REG_V10, REG_R14, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_ld4, EA_16BYTE, REG_V15, REG_R19, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_ld4, EA_8BYTE, REG_V20, REG_R24, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_ld4, EA_16BYTE, REG_V25, REG_R29, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_ld4, EA_16BYTE, REG_V30, REG_R2, INS_OPTS_2D);
// st1 {Vt}, [Xn|SP]
theEmitter->emitIns_R_R(INS_st1, EA_8BYTE, REG_V0, REG_R1, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_st1, EA_16BYTE, REG_V2, REG_R3, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_st1, EA_8BYTE, REG_V4, REG_R5, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_st1, EA_16BYTE, REG_V6, REG_R7, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_st1, EA_8BYTE, REG_V8, REG_R9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_st1, EA_16BYTE, REG_V10, REG_R11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_st1, EA_8BYTE, REG_V12, REG_R13, INS_OPTS_1D);
theEmitter->emitIns_R_R(INS_st1, EA_16BYTE, REG_V14, REG_R15, INS_OPTS_2D);
// st1 {Vt, Vt2}, [Xn|SP]
theEmitter->emitIns_R_R(INS_st1_2regs, EA_8BYTE, REG_V0, REG_R2, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_st1_2regs, EA_16BYTE, REG_V3, REG_R5, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_st1_2regs, EA_8BYTE, REG_V6, REG_R8, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_st1_2regs, EA_16BYTE, REG_V9, REG_R11, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_st1_2regs, EA_8BYTE, REG_V12, REG_R14, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_st1_2regs, EA_16BYTE, REG_V15, REG_R17, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_st1_2regs, EA_8BYTE, REG_V18, REG_R20, INS_OPTS_1D);
theEmitter->emitIns_R_R(INS_st1_2regs, EA_16BYTE, REG_V21, REG_R23, INS_OPTS_2D);
// st1 {Vt, Vt2, Vt3}, [Xn|SP]
theEmitter->emitIns_R_R(INS_st1_3regs, EA_8BYTE, REG_V0, REG_R3, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_st1_3regs, EA_16BYTE, REG_V4, REG_R7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_st1_3regs, EA_8BYTE, REG_V8, REG_R11, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_st1_3regs, EA_16BYTE, REG_V12, REG_R15, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_st1_3regs, EA_8BYTE, REG_V16, REG_R19, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_st1_3regs, EA_16BYTE, REG_V20, REG_R23, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_st1_3regs, EA_8BYTE, REG_V24, REG_R27, INS_OPTS_1D);
theEmitter->emitIns_R_R(INS_st1_3regs, EA_16BYTE, REG_V28, REG_SP, INS_OPTS_2D);
// st1 {Vt, Vt2, Vt3, Vt4}, [Xn|SP]
theEmitter->emitIns_R_R(INS_st1_4regs, EA_8BYTE, REG_V0, REG_R4, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_st1_4regs, EA_16BYTE, REG_V5, REG_R9, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_st1_4regs, EA_8BYTE, REG_V10, REG_R14, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_st1_4regs, EA_16BYTE, REG_V15, REG_R19, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_st1_4regs, EA_8BYTE, REG_V20, REG_R24, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_st1_4regs, EA_16BYTE, REG_V25, REG_R29, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_st1_4regs, EA_8BYTE, REG_V30, REG_R2, INS_OPTS_1D);
theEmitter->emitIns_R_R(INS_st1_4regs, EA_16BYTE, REG_V3, REG_R7, INS_OPTS_2D);
// st2 {Vt, Vt2}, [Xn|SP]
theEmitter->emitIns_R_R(INS_st2, EA_8BYTE, REG_V0, REG_R2, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_st2, EA_16BYTE, REG_V3, REG_R5, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_st2, EA_8BYTE, REG_V6, REG_R8, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_st2, EA_16BYTE, REG_V9, REG_R11, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_st2, EA_8BYTE, REG_V12, REG_R14, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_st2, EA_16BYTE, REG_V15, REG_R17, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_st2, EA_16BYTE, REG_V18, REG_R20, INS_OPTS_2D);
// st3 {Vt, Vt2, Vt3}, [Xn|SP]
theEmitter->emitIns_R_R(INS_st3, EA_8BYTE, REG_V0, REG_R3, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_st3, EA_16BYTE, REG_V4, REG_R7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_st3, EA_8BYTE, REG_V8, REG_R11, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_st3, EA_16BYTE, REG_V12, REG_R15, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_st3, EA_8BYTE, REG_V16, REG_R19, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_st3, EA_16BYTE, REG_V20, REG_R23, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_st3, EA_16BYTE, REG_V24, REG_R27, INS_OPTS_2D);
// st4 {Vt, Vt2, Vt3, Vt4}, [Xn|SP]
theEmitter->emitIns_R_R(INS_st4, EA_8BYTE, REG_V0, REG_R4, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_st4, EA_16BYTE, REG_V5, REG_R9, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_st4, EA_8BYTE, REG_V10, REG_R14, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_st4, EA_16BYTE, REG_V15, REG_R19, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_st4, EA_8BYTE, REG_V20, REG_R24, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_st4, EA_16BYTE, REG_V25, REG_R29, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_st4, EA_16BYTE, REG_V30, REG_R2, INS_OPTS_2D);
// ld1r {Vt}, [Xn|SP]
theEmitter->emitIns_R_R(INS_ld1r, EA_8BYTE, REG_V0, REG_R1, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_ld1r, EA_16BYTE, REG_V2, REG_R3, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_ld1r, EA_8BYTE, REG_V4, REG_R5, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_ld1r, EA_16BYTE, REG_V6, REG_R7, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_ld1r, EA_8BYTE, REG_V8, REG_R9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_ld1r, EA_16BYTE, REG_V10, REG_R11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_ld1r, EA_8BYTE, REG_V12, REG_R13, INS_OPTS_1D);
theEmitter->emitIns_R_R(INS_ld1r, EA_16BYTE, REG_V14, REG_R15, INS_OPTS_2D);
// ld2r {Vt, Vt2}, [Xn|SP]
theEmitter->emitIns_R_R(INS_ld2r, EA_8BYTE, REG_V0, REG_R2, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_ld2r, EA_16BYTE, REG_V3, REG_R5, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_ld2r, EA_8BYTE, REG_V6, REG_R8, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_ld2r, EA_16BYTE, REG_V9, REG_R11, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_ld2r, EA_8BYTE, REG_V12, REG_R14, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_ld2r, EA_16BYTE, REG_V15, REG_R17, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_ld2r, EA_8BYTE, REG_V18, REG_R20, INS_OPTS_1D);
theEmitter->emitIns_R_R(INS_ld2r, EA_16BYTE, REG_V21, REG_R23, INS_OPTS_2D);
// ld3r {Vt, Vt2, Vt3}, [Xn|SP]
theEmitter->emitIns_R_R(INS_ld3r, EA_8BYTE, REG_V0, REG_R3, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_ld3r, EA_16BYTE, REG_V4, REG_R7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_ld3r, EA_8BYTE, REG_V8, REG_R11, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_ld3r, EA_16BYTE, REG_V12, REG_R15, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_ld3r, EA_8BYTE, REG_V16, REG_R19, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_ld3r, EA_16BYTE, REG_V20, REG_R23, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_ld3r, EA_8BYTE, REG_V24, REG_R27, INS_OPTS_1D);
theEmitter->emitIns_R_R(INS_ld3r, EA_16BYTE, REG_V28, REG_SP, INS_OPTS_2D);
// ld4r {Vt, Vt2, Vt3, Vt4}, [Xn|SP]
theEmitter->emitIns_R_R(INS_ld4r, EA_8BYTE, REG_V0, REG_R4, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_ld4r, EA_16BYTE, REG_V5, REG_R9, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_ld4r, EA_8BYTE, REG_V10, REG_R14, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_ld4r, EA_16BYTE, REG_V15, REG_R19, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_ld4r, EA_8BYTE, REG_V20, REG_R24, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_ld4r, EA_16BYTE, REG_V25, REG_R29, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_ld4r, EA_8BYTE, REG_V30, REG_R2, INS_OPTS_1D);
theEmitter->emitIns_R_R(INS_ld4r, EA_16BYTE, REG_V3, REG_R7, INS_OPTS_2D);
// tbl Vd, {Vt}, Vm
theEmitter->emitIns_R_R_R(INS_tbl, EA_8BYTE, REG_V0, REG_V1, REG_V6, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_tbl, EA_16BYTE, REG_V0, REG_V1, REG_V6, INS_OPTS_16B);
// tbx Vd, {Vt}, Vm
theEmitter->emitIns_R_R_R(INS_tbx, EA_8BYTE, REG_V0, REG_V1, REG_V6, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_tbx, EA_16BYTE, REG_V0, REG_V1, REG_V6, INS_OPTS_16B);
// tbl Vd, {Vt, Vt2}, Vm
theEmitter->emitIns_R_R_R(INS_tbl_2regs, EA_8BYTE, REG_V0, REG_V1, REG_V6, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_tbl_2regs, EA_16BYTE, REG_V0, REG_V1, REG_V6, INS_OPTS_16B);
// tbx Vd, {Vt, Vt2}, Vm
theEmitter->emitIns_R_R_R(INS_tbx_2regs, EA_8BYTE, REG_V0, REG_V1, REG_V6, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_tbx_2regs, EA_16BYTE, REG_V0, REG_V1, REG_V6, INS_OPTS_16B);
// tbl Vd, {Vt, Vt2, Vt3}, Vm
theEmitter->emitIns_R_R_R(INS_tbl_3regs, EA_8BYTE, REG_V0, REG_V1, REG_V6, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_tbl_3regs, EA_16BYTE, REG_V0, REG_V1, REG_V6, INS_OPTS_16B);
// tbx Vd, {Vt, Vt2, Vt3}, Vm
theEmitter->emitIns_R_R_R(INS_tbx_3regs, EA_8BYTE, REG_V0, REG_V1, REG_V6, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_tbx_3regs, EA_16BYTE, REG_V0, REG_V1, REG_V6, INS_OPTS_16B);
// tbl Vd, {Vt, Vt2, Vt3, Vt4}, Vm
theEmitter->emitIns_R_R_R(INS_tbl_4regs, EA_8BYTE, REG_V0, REG_V1, REG_V6, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_tbl_4regs, EA_16BYTE, REG_V0, REG_V1, REG_V6, INS_OPTS_16B);
// tbx Vd, {Vt, Vt2, Vt3, Vt4}, Vm
theEmitter->emitIns_R_R_R(INS_tbx_4regs, EA_8BYTE, REG_V0, REG_V1, REG_V6, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_tbx_4regs, EA_16BYTE, REG_V0, REG_V1, REG_V6, INS_OPTS_16B);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// Loads to and Stores from one, two, three, or four SIMD&FP registers
//
genDefineTempLabel(genCreateTempLabel());
// ld1 {Vt}, [Xn|SP], Xm
theEmitter->emitIns_R_R_R(INS_ld1, EA_8BYTE, REG_V0, REG_R1, REG_R2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_ld1, EA_16BYTE, REG_V3, REG_R4, REG_R5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_ld1, EA_8BYTE, REG_V6, REG_R7, REG_R8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_ld1, EA_16BYTE, REG_V9, REG_R10, REG_R11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_ld1, EA_8BYTE, REG_V12, REG_R13, REG_R14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_ld1, EA_16BYTE, REG_V15, REG_R16, REG_R17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_ld1, EA_8BYTE, REG_V18, REG_R19, REG_R20, INS_OPTS_1D);
theEmitter->emitIns_R_R_R(INS_ld1, EA_16BYTE, REG_V21, REG_R22, REG_R23, INS_OPTS_2D);
// ld1 {Vt, Vt2}, [Xn|SP], Xm
theEmitter->emitIns_R_R_R(INS_ld1_2regs, EA_8BYTE, REG_V0, REG_R2, REG_R3, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_ld1_2regs, EA_16BYTE, REG_V4, REG_R6, REG_R7, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_ld1_2regs, EA_8BYTE, REG_V8, REG_R10, REG_R11, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_ld1_2regs, EA_16BYTE, REG_V12, REG_R14, REG_R15, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_ld1_2regs, EA_8BYTE, REG_V16, REG_R18, REG_R19, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_ld1_2regs, EA_16BYTE, REG_V20, REG_R22, REG_R23, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_ld1_2regs, EA_8BYTE, REG_V24, REG_R26, REG_R27, INS_OPTS_1D);
theEmitter->emitIns_R_R_R(INS_ld1_2regs, EA_16BYTE, REG_V28, REG_SP, REG_R30, INS_OPTS_2D);
// ld1 {Vt, Vt2, Vt3}, [Xn|SP], Xm
theEmitter->emitIns_R_R_R(INS_ld1_3regs, EA_8BYTE, REG_V0, REG_R3, REG_R4, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_ld1_3regs, EA_16BYTE, REG_V5, REG_R8, REG_R9, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_ld1_3regs, EA_8BYTE, REG_V10, REG_R13, REG_R14, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_ld1_3regs, EA_16BYTE, REG_V15, REG_R18, REG_R19, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_ld1_3regs, EA_8BYTE, REG_V20, REG_R23, REG_R24, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_ld1_3regs, EA_16BYTE, REG_V25, REG_R28, REG_R29, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_ld1_3regs, EA_8BYTE, REG_V30, REG_R0, REG_R1, INS_OPTS_1D);
theEmitter->emitIns_R_R_R(INS_ld1_3regs, EA_16BYTE, REG_V2, REG_R5, REG_R6, INS_OPTS_2D);
// ld1 {Vt, Vt2, Vt3, Vt4}, [Xn|SP], Xm
theEmitter->emitIns_R_R_R(INS_ld1_4regs, EA_8BYTE, REG_V0, REG_R4, REG_R5, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_ld1_4regs, EA_16BYTE, REG_V6, REG_R10, REG_R11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_ld1_4regs, EA_8BYTE, REG_V12, REG_R16, REG_R17, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_ld1_4regs, EA_16BYTE, REG_V18, REG_R22, REG_R23, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_ld1_4regs, EA_8BYTE, REG_V24, REG_R28, REG_R29, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_ld1_4regs, EA_16BYTE, REG_V30, REG_R2, REG_R3, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_ld1_4regs, EA_8BYTE, REG_V4, REG_R8, REG_R9, INS_OPTS_1D);
theEmitter->emitIns_R_R_R(INS_ld1_4regs, EA_16BYTE, REG_V10, REG_R14, REG_R15, INS_OPTS_2D);
// ld2 {Vt, Vt2}, [Xn|SP], Xm
theEmitter->emitIns_R_R_R(INS_ld2, EA_8BYTE, REG_V0, REG_R2, REG_R3, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_ld2, EA_16BYTE, REG_V4, REG_R6, REG_R7, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_ld2, EA_8BYTE, REG_V8, REG_R10, REG_R11, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_ld2, EA_16BYTE, REG_V12, REG_R14, REG_R15, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_ld2, EA_8BYTE, REG_V16, REG_R18, REG_R19, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_ld2, EA_16BYTE, REG_V20, REG_R22, REG_R23, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_ld2, EA_16BYTE, REG_V24, REG_R26, REG_R27, INS_OPTS_2D);
// ld3 {Vt, Vt2, Vt3}, [Xn|SP], Xm
theEmitter->emitIns_R_R_R(INS_ld3, EA_8BYTE, REG_V0, REG_R3, REG_R4, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_ld3, EA_16BYTE, REG_V5, REG_R8, REG_R9, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_ld3, EA_8BYTE, REG_V10, REG_R13, REG_R14, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_ld3, EA_16BYTE, REG_V15, REG_R18, REG_R19, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_ld3, EA_8BYTE, REG_V20, REG_R23, REG_R24, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_ld3, EA_16BYTE, REG_V25, REG_R28, REG_R29, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_ld3, EA_16BYTE, REG_V30, REG_R0, REG_R1, INS_OPTS_2D);
// ld4 {Vt, Vt2, Vt3, Vt4}, [Xn|SP], Xm
theEmitter->emitIns_R_R_R(INS_ld4, EA_8BYTE, REG_V0, REG_R4, REG_R5, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_ld4, EA_16BYTE, REG_V6, REG_R10, REG_R11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_ld4, EA_8BYTE, REG_V12, REG_R16, REG_R17, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_ld4, EA_16BYTE, REG_V18, REG_R22, REG_R23, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_ld4, EA_8BYTE, REG_V24, REG_R28, REG_R29, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_ld4, EA_16BYTE, REG_V30, REG_R2, REG_R3, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_ld4, EA_16BYTE, REG_V4, REG_R8, REG_R9, INS_OPTS_2D);
// st1 {Vt}, [Xn|SP], Xm
theEmitter->emitIns_R_R_R(INS_st1, EA_8BYTE, REG_V0, REG_R1, REG_R2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_st1, EA_16BYTE, REG_V3, REG_R4, REG_R5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_st1, EA_8BYTE, REG_V6, REG_R7, REG_R8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_st1, EA_16BYTE, REG_V9, REG_R10, REG_R11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_st1, EA_8BYTE, REG_V12, REG_R13, REG_R14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_st1, EA_16BYTE, REG_V15, REG_R16, REG_R17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_st1, EA_8BYTE, REG_V18, REG_R19, REG_R20, INS_OPTS_1D);
theEmitter->emitIns_R_R_R(INS_st1, EA_16BYTE, REG_V21, REG_R22, REG_R23, INS_OPTS_2D);
// st1 {Vt, Vt2}, [Xn|SP], Xm
theEmitter->emitIns_R_R_R(INS_st1_2regs, EA_8BYTE, REG_V0, REG_R2, REG_R3, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_st1_2regs, EA_16BYTE, REG_V4, REG_R6, REG_R7, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_st1_2regs, EA_8BYTE, REG_V8, REG_R10, REG_R11, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_st1_2regs, EA_16BYTE, REG_V12, REG_R14, REG_R15, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_st1_2regs, EA_8BYTE, REG_V16, REG_R18, REG_R19, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_st1_2regs, EA_16BYTE, REG_V20, REG_R22, REG_R23, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_st1_2regs, EA_8BYTE, REG_V24, REG_R26, REG_R27, INS_OPTS_1D);
theEmitter->emitIns_R_R_R(INS_st1_2regs, EA_16BYTE, REG_V28, REG_SP, REG_R30, INS_OPTS_2D);
// st1 {Vt, Vt2, Vt3}, [Xn|SP], Xm
theEmitter->emitIns_R_R_R(INS_st1_3regs, EA_8BYTE, REG_V0, REG_R3, REG_R4, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_st1_3regs, EA_16BYTE, REG_V5, REG_R8, REG_R9, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_st1_3regs, EA_8BYTE, REG_V10, REG_R13, REG_R14, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_st1_3regs, EA_16BYTE, REG_V15, REG_R18, REG_R19, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_st1_3regs, EA_8BYTE, REG_V20, REG_R23, REG_R24, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_st1_3regs, EA_16BYTE, REG_V25, REG_R28, REG_R29, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_st1_3regs, EA_8BYTE, REG_V30, REG_R0, REG_R1, INS_OPTS_1D);
theEmitter->emitIns_R_R_R(INS_st1_3regs, EA_16BYTE, REG_V2, REG_R5, REG_R6, INS_OPTS_2D);
// st1 {Vt, Vt2, Vt3, Vt4}, [Xn|SP], Xm
theEmitter->emitIns_R_R_R(INS_st1_4regs, EA_8BYTE, REG_V0, REG_R4, REG_R5, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_st1_4regs, EA_16BYTE, REG_V6, REG_R10, REG_R11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_st1_4regs, EA_8BYTE, REG_V12, REG_R16, REG_R17, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_st1_4regs, EA_16BYTE, REG_V18, REG_R22, REG_R23, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_st1_4regs, EA_8BYTE, REG_V24, REG_R28, REG_R29, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_st1_4regs, EA_16BYTE, REG_V30, REG_R2, REG_R3, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_st1_4regs, EA_8BYTE, REG_V4, REG_R8, REG_R9, INS_OPTS_1D);
theEmitter->emitIns_R_R_R(INS_st1_4regs, EA_16BYTE, REG_V10, REG_R14, REG_R15, INS_OPTS_2D);
// st2 {Vt, Vt2}, [Xn|SP], Xm
theEmitter->emitIns_R_R_R(INS_st2, EA_8BYTE, REG_V0, REG_R2, REG_R3, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_st2, EA_16BYTE, REG_V4, REG_R6, REG_R7, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_st2, EA_8BYTE, REG_V8, REG_R10, REG_R11, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_st2, EA_16BYTE, REG_V12, REG_R14, REG_R15, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_st2, EA_8BYTE, REG_V16, REG_R18, REG_R19, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_st2, EA_16BYTE, REG_V20, REG_R22, REG_R23, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_st2, EA_16BYTE, REG_V24, REG_R26, REG_R27, INS_OPTS_2D);
// st3 {Vt, Vt2, Vt3}, [Xn|SP], Xm
theEmitter->emitIns_R_R_R(INS_st3, EA_8BYTE, REG_V0, REG_R3, REG_R4, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_st3, EA_16BYTE, REG_V5, REG_R8, REG_R9, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_st3, EA_8BYTE, REG_V10, REG_R13, REG_R14, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_st3, EA_16BYTE, REG_V15, REG_R18, REG_R19, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_st3, EA_8BYTE, REG_V20, REG_R23, REG_R24, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_st3, EA_16BYTE, REG_V25, REG_R28, REG_R29, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_st3, EA_16BYTE, REG_V30, REG_R0, REG_R1, INS_OPTS_2D);
// st4 {Vt, Vt2, Vt3, Vt4}, [Xn|SP], Xm
theEmitter->emitIns_R_R_R(INS_st4, EA_8BYTE, REG_V0, REG_R4, REG_R5, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_st4, EA_16BYTE, REG_V6, REG_R10, REG_R11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_st4, EA_8BYTE, REG_V12, REG_R16, REG_R17, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_st4, EA_16BYTE, REG_V18, REG_R22, REG_R23, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_st4, EA_8BYTE, REG_V24, REG_R28, REG_R29, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_st4, EA_16BYTE, REG_V30, REG_R2, REG_R3, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_st4, EA_16BYTE, REG_V4, REG_R8, REG_R9, INS_OPTS_2D);
// ld1r {Vt}, [Xn|SP], Xm
theEmitter->emitIns_R_R_R(INS_ld1r, EA_8BYTE, REG_V0, REG_R1, REG_R2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_ld1r, EA_16BYTE, REG_V3, REG_R4, REG_R5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_ld1r, EA_8BYTE, REG_V6, REG_R7, REG_R8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_ld1r, EA_16BYTE, REG_V9, REG_R10, REG_R11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_ld1r, EA_8BYTE, REG_V12, REG_R13, REG_R14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_ld1r, EA_16BYTE, REG_V15, REG_R16, REG_R17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_ld1r, EA_8BYTE, REG_V18, REG_R19, REG_R20, INS_OPTS_1D);
theEmitter->emitIns_R_R_R(INS_ld1r, EA_16BYTE, REG_V21, REG_R22, REG_R23, INS_OPTS_2D);
// ld2r {Vt, Vt2}, [Xn|SP], Xm
theEmitter->emitIns_R_R_R(INS_ld2r, EA_8BYTE, REG_V0, REG_R2, REG_R3, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_ld2r, EA_16BYTE, REG_V4, REG_R6, REG_R7, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_ld2r, EA_8BYTE, REG_V8, REG_R10, REG_R11, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_ld2r, EA_16BYTE, REG_V12, REG_R14, REG_R15, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_ld2r, EA_8BYTE, REG_V16, REG_R18, REG_R19, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_ld2r, EA_16BYTE, REG_V20, REG_R22, REG_R23, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_ld2r, EA_8BYTE, REG_V24, REG_R26, REG_R27, INS_OPTS_1D);
theEmitter->emitIns_R_R_R(INS_ld2r, EA_16BYTE, REG_V28, REG_SP, REG_R30, INS_OPTS_2D);
// ld3r {Vt, Vt2, Vt3}, [Xn|SP], Xm
theEmitter->emitIns_R_R_R(INS_ld3r, EA_8BYTE, REG_V0, REG_R3, REG_R4, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_ld3r, EA_16BYTE, REG_V5, REG_R8, REG_R9, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_ld3r, EA_8BYTE, REG_V10, REG_R13, REG_R14, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_ld3r, EA_16BYTE, REG_V15, REG_R18, REG_R19, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_ld3r, EA_8BYTE, REG_V20, REG_R23, REG_R24, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_ld3r, EA_16BYTE, REG_V25, REG_R28, REG_R29, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_ld3r, EA_8BYTE, REG_V30, REG_R0, REG_R1, INS_OPTS_1D);
theEmitter->emitIns_R_R_R(INS_ld3r, EA_16BYTE, REG_V2, REG_R5, REG_R6, INS_OPTS_2D);
// ld4r {Vt, Vt2, Vt3, Vt4}, [Xn|SP], Xm
theEmitter->emitIns_R_R_R(INS_ld4r, EA_8BYTE, REG_V0, REG_R4, REG_R5, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_ld4r, EA_16BYTE, REG_V6, REG_R10, REG_R11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_ld4r, EA_8BYTE, REG_V12, REG_R16, REG_R17, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_ld4r, EA_16BYTE, REG_V18, REG_R22, REG_R23, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_ld4r, EA_8BYTE, REG_V24, REG_R28, REG_R29, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_ld4r, EA_16BYTE, REG_V30, REG_R2, REG_R3, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_ld4r, EA_8BYTE, REG_V4, REG_R8, REG_R9, INS_OPTS_1D);
theEmitter->emitIns_R_R_R(INS_ld4r, EA_16BYTE, REG_V10, REG_R14, REG_R15, INS_OPTS_2D);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// Loads to and Stores from one, two, three, or four SIMD&FP registers
//
genDefineTempLabel(genCreateTempLabel());
// ld1 {Vt}, [Xn|SP], #imm
theEmitter->emitIns_R_R_I(INS_ld1, EA_8BYTE, REG_V0, REG_R1, 8, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_ld1, EA_16BYTE, REG_V2, REG_R3, 16, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_ld1, EA_8BYTE, REG_V4, REG_R5, 8, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_ld1, EA_16BYTE, REG_V6, REG_R7, 16, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_ld1, EA_8BYTE, REG_V8, REG_R9, 8, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_ld1, EA_16BYTE, REG_V10, REG_R11, 16, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_ld1, EA_8BYTE, REG_V12, REG_R13, 8, INS_OPTS_1D);
theEmitter->emitIns_R_R_I(INS_ld1, EA_16BYTE, REG_V14, REG_R15, 16, INS_OPTS_2D);
// ld1 {Vt, Vt2}, [Xn|SP], #imm
theEmitter->emitIns_R_R_I(INS_ld1_2regs, EA_8BYTE, REG_V0, REG_R2, 16, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_ld1_2regs, EA_16BYTE, REG_V3, REG_R5, 32, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_ld1_2regs, EA_8BYTE, REG_V6, REG_R8, 16, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_ld1_2regs, EA_16BYTE, REG_V9, REG_R11, 32, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_ld1_2regs, EA_8BYTE, REG_V12, REG_R14, 16, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_ld1_2regs, EA_16BYTE, REG_V15, REG_R17, 32, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_ld1_2regs, EA_8BYTE, REG_V18, REG_R20, 16, INS_OPTS_1D);
theEmitter->emitIns_R_R_I(INS_ld1_2regs, EA_16BYTE, REG_V21, REG_R23, 32, INS_OPTS_2D);
// ld1 {Vt, Vt2, Vt3}, [Xn|SP], #imm
theEmitter->emitIns_R_R_I(INS_ld1_3regs, EA_8BYTE, REG_V0, REG_R3, 24, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_ld1_3regs, EA_16BYTE, REG_V4, REG_R7, 48, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_ld1_3regs, EA_8BYTE, REG_V8, REG_R11, 24, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_ld1_3regs, EA_16BYTE, REG_V12, REG_R15, 48, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_ld1_3regs, EA_8BYTE, REG_V16, REG_R19, 24, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_ld1_3regs, EA_16BYTE, REG_V20, REG_R23, 48, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_ld1_3regs, EA_8BYTE, REG_V24, REG_R27, 24, INS_OPTS_1D);
theEmitter->emitIns_R_R_I(INS_ld1_3regs, EA_16BYTE, REG_V28, REG_SP, 48, INS_OPTS_2D);
// ld1 {Vt, Vt2, Vt3, Vt4}, [Xn|SP], #imm
theEmitter->emitIns_R_R_I(INS_ld1_4regs, EA_8BYTE, REG_V0, REG_R4, 32, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_ld1_4regs, EA_16BYTE, REG_V5, REG_R9, 64, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_ld1_4regs, EA_8BYTE, REG_V10, REG_R14, 32, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_ld1_4regs, EA_16BYTE, REG_V15, REG_R19, 64, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_ld1_4regs, EA_8BYTE, REG_V20, REG_R24, 32, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_ld1_4regs, EA_16BYTE, REG_V25, REG_R29, 64, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_ld1_4regs, EA_8BYTE, REG_V30, REG_R2, 32, INS_OPTS_1D);
theEmitter->emitIns_R_R_I(INS_ld1_4regs, EA_16BYTE, REG_V3, REG_R7, 64, INS_OPTS_2D);
// ld2 {Vt, Vt2}, [Xn|SP], #imm
theEmitter->emitIns_R_R_I(INS_ld2, EA_8BYTE, REG_V0, REG_R2, 16, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_ld2, EA_16BYTE, REG_V3, REG_R5, 32, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_ld2, EA_8BYTE, REG_V6, REG_R8, 16, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_ld2, EA_16BYTE, REG_V9, REG_R11, 32, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_ld2, EA_8BYTE, REG_V12, REG_R14, 16, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_ld2, EA_16BYTE, REG_V15, REG_R17, 32, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_ld2, EA_16BYTE, REG_V18, REG_R20, 32, INS_OPTS_2D);
// ld3 {Vt, Vt2, Vt3}, [Xn|SP], #imm
theEmitter->emitIns_R_R_I(INS_ld3, EA_8BYTE, REG_V0, REG_R3, 24, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_ld3, EA_16BYTE, REG_V4, REG_R7, 48, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_ld3, EA_8BYTE, REG_V8, REG_R11, 24, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_ld3, EA_16BYTE, REG_V12, REG_R15, 48, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_ld3, EA_8BYTE, REG_V16, REG_R19, 24, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_ld3, EA_16BYTE, REG_V20, REG_R23, 48, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_ld3, EA_16BYTE, REG_V24, REG_R27, 48, INS_OPTS_2D);
// ld4 {Vt, Vt2, Vt3, Vt4}, [Xn|SP], #imm
theEmitter->emitIns_R_R_I(INS_ld4, EA_8BYTE, REG_V0, REG_R4, 32, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_ld4, EA_16BYTE, REG_V5, REG_R9, 64, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_ld4, EA_8BYTE, REG_V10, REG_R14, 32, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_ld4, EA_16BYTE, REG_V15, REG_R19, 64, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_ld4, EA_8BYTE, REG_V20, REG_R24, 32, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_ld4, EA_16BYTE, REG_V25, REG_R29, 64, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_ld4, EA_16BYTE, REG_V30, REG_R2, 64, INS_OPTS_2D);
// st1 {Vt}, [Xn|SP], #imm
theEmitter->emitIns_R_R_I(INS_st1, EA_8BYTE, REG_V0, REG_R1, 8, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_st1, EA_16BYTE, REG_V2, REG_R3, 16, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_st1, EA_8BYTE, REG_V4, REG_R5, 8, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_st1, EA_16BYTE, REG_V6, REG_R7, 16, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_st1, EA_8BYTE, REG_V8, REG_R9, 8, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_st1, EA_16BYTE, REG_V10, REG_R11, 16, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_st1, EA_8BYTE, REG_V12, REG_R13, 8, INS_OPTS_1D);
theEmitter->emitIns_R_R_I(INS_st1, EA_16BYTE, REG_V14, REG_R15, 16, INS_OPTS_2D);
// st1 {Vt, Vt2}, [Xn|SP], #imm
theEmitter->emitIns_R_R_I(INS_st1_2regs, EA_8BYTE, REG_V0, REG_R2, 16, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_st1_2regs, EA_16BYTE, REG_V3, REG_R5, 32, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_st1_2regs, EA_8BYTE, REG_V6, REG_R8, 16, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_st1_2regs, EA_16BYTE, REG_V9, REG_R11, 32, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_st1_2regs, EA_8BYTE, REG_V12, REG_R14, 16, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_st1_2regs, EA_16BYTE, REG_V15, REG_R17, 32, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_st1_2regs, EA_8BYTE, REG_V18, REG_R20, 16, INS_OPTS_1D);
theEmitter->emitIns_R_R_I(INS_st1_2regs, EA_16BYTE, REG_V21, REG_R23, 32, INS_OPTS_2D);
// st1 {Vt, Vt2, Vt3}, [Xn|SP], #imm
theEmitter->emitIns_R_R_I(INS_st1_3regs, EA_8BYTE, REG_V0, REG_R3, 24, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_st1_3regs, EA_16BYTE, REG_V4, REG_R7, 48, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_st1_3regs, EA_8BYTE, REG_V8, REG_R11, 24, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_st1_3regs, EA_16BYTE, REG_V12, REG_R15, 48, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_st1_3regs, EA_8BYTE, REG_V16, REG_R19, 24, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_st1_3regs, EA_16BYTE, REG_V20, REG_R23, 48, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_st1_3regs, EA_8BYTE, REG_V24, REG_R27, 24, INS_OPTS_1D);
theEmitter->emitIns_R_R_I(INS_st1_3regs, EA_16BYTE, REG_V28, REG_SP, 48, INS_OPTS_2D);
// st1 {Vt, Vt2, Vt3, Vt4}, [Xn|SP], #imm
theEmitter->emitIns_R_R_I(INS_st1_4regs, EA_8BYTE, REG_V0, REG_R4, 32, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_st1_4regs, EA_16BYTE, REG_V5, REG_R9, 64, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_st1_4regs, EA_8BYTE, REG_V10, REG_R14, 32, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_st1_4regs, EA_16BYTE, REG_V15, REG_R19, 64, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_st1_4regs, EA_8BYTE, REG_V20, REG_R24, 32, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_st1_4regs, EA_16BYTE, REG_V25, REG_R29, 64, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_st1_4regs, EA_8BYTE, REG_V30, REG_R2, 32, INS_OPTS_1D);
theEmitter->emitIns_R_R_I(INS_st1_4regs, EA_16BYTE, REG_V3, REG_R7, 64, INS_OPTS_2D);
// st2 {Vt, Vt2}, [Xn|SP], #imm
theEmitter->emitIns_R_R_I(INS_st2, EA_8BYTE, REG_V0, REG_R2, 16, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_st2, EA_16BYTE, REG_V3, REG_R5, 32, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_st2, EA_8BYTE, REG_V6, REG_R8, 16, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_st2, EA_16BYTE, REG_V9, REG_R11, 32, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_st2, EA_8BYTE, REG_V12, REG_R14, 16, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_st2, EA_16BYTE, REG_V15, REG_R17, 32, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_st2, EA_16BYTE, REG_V18, REG_R20, 32, INS_OPTS_2D);
// st3 {Vt, Vt2, Vt3}, [Xn|SP], #imm
theEmitter->emitIns_R_R_I(INS_st3, EA_8BYTE, REG_V0, REG_R3, 24, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_st3, EA_16BYTE, REG_V4, REG_R7, 48, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_st3, EA_8BYTE, REG_V8, REG_R11, 24, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_st3, EA_16BYTE, REG_V12, REG_R15, 48, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_st3, EA_8BYTE, REG_V16, REG_R19, 24, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_st3, EA_16BYTE, REG_V20, REG_R23, 48, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_st3, EA_16BYTE, REG_V24, REG_R27, 48, INS_OPTS_2D);
// st4 {Vt, Vt2, Vt3, Vt4}, [Xn|SP], #imm
theEmitter->emitIns_R_R_I(INS_st4, EA_8BYTE, REG_V0, REG_R4, 32, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_st4, EA_16BYTE, REG_V5, REG_R9, 64, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_st4, EA_8BYTE, REG_V10, REG_R14, 32, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_st4, EA_16BYTE, REG_V15, REG_R19, 64, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_st4, EA_8BYTE, REG_V20, REG_R24, 32, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_st4, EA_16BYTE, REG_V25, REG_R29, 64, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_st4, EA_16BYTE, REG_V30, REG_R2, 64, INS_OPTS_2D);
// ld1r {Vt}, [Xn|SP], #imm
theEmitter->emitIns_R_R_I(INS_ld1r, EA_8BYTE, REG_V0, REG_R1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_ld1r, EA_16BYTE, REG_V2, REG_R3, 1, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_ld1r, EA_8BYTE, REG_V4, REG_R5, 2, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_ld1r, EA_16BYTE, REG_V6, REG_R7, 2, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_ld1r, EA_8BYTE, REG_V8, REG_R9, 4, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_ld1r, EA_16BYTE, REG_V10, REG_R11, 4, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_ld1r, EA_8BYTE, REG_V12, REG_R13, 8, INS_OPTS_1D);
theEmitter->emitIns_R_R_I(INS_ld1r, EA_16BYTE, REG_V14, REG_R15, 8, INS_OPTS_2D);
// ld2r {Vt, Vt2}, [Xn|SP], #imm
theEmitter->emitIns_R_R_I(INS_ld2r, EA_8BYTE, REG_V0, REG_R2, 2, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_ld2r, EA_16BYTE, REG_V3, REG_R5, 2, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_ld2r, EA_8BYTE, REG_V6, REG_R8, 4, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_ld2r, EA_16BYTE, REG_V9, REG_R11, 4, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_ld2r, EA_8BYTE, REG_V12, REG_R14, 8, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_ld2r, EA_16BYTE, REG_V15, REG_R17, 8, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_ld2r, EA_8BYTE, REG_V18, REG_R20, 16, INS_OPTS_1D);
theEmitter->emitIns_R_R_I(INS_ld2r, EA_16BYTE, REG_V21, REG_R23, 16, INS_OPTS_2D);
// ld3r {Vt, Vt2, Vt3}, [Xn|SP], #imm
theEmitter->emitIns_R_R_I(INS_ld3r, EA_8BYTE, REG_V0, REG_R3, 3, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_ld3r, EA_16BYTE, REG_V4, REG_R7, 3, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_ld3r, EA_8BYTE, REG_V8, REG_R11, 6, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_ld3r, EA_16BYTE, REG_V12, REG_R15, 6, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_ld3r, EA_8BYTE, REG_V16, REG_R19, 12, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_ld3r, EA_16BYTE, REG_V20, REG_R23, 12, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_ld3r, EA_8BYTE, REG_V24, REG_R27, 24, INS_OPTS_1D);
theEmitter->emitIns_R_R_I(INS_ld3r, EA_16BYTE, REG_V28, REG_SP, 24, INS_OPTS_2D);
// ld4r {Vt, Vt2, Vt3, Vt4}, [Xn|SP], #imm
theEmitter->emitIns_R_R_I(INS_ld4r, EA_8BYTE, REG_V0, REG_R4, 4, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_ld4r, EA_16BYTE, REG_V5, REG_R9, 4, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_ld4r, EA_8BYTE, REG_V10, REG_R14, 8, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_ld4r, EA_16BYTE, REG_V15, REG_R19, 8, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_ld4r, EA_8BYTE, REG_V20, REG_R24, 16, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_ld4r, EA_16BYTE, REG_V25, REG_R29, 16, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_ld4r, EA_8BYTE, REG_V30, REG_R2, 32, INS_OPTS_1D);
theEmitter->emitIns_R_R_I(INS_ld4r, EA_16BYTE, REG_V3, REG_R7, 32, INS_OPTS_2D);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// Loads to and Stores from one, two, three, or four SIMD&FP registers
//
genDefineTempLabel(genCreateTempLabel());
// ld1 {Vt}[#index], [Xn|SP]
theEmitter->emitIns_R_R_I(INS_ld1, EA_1BYTE, REG_V0, REG_R1, 3);
theEmitter->emitIns_R_R_I(INS_ld1, EA_2BYTE, REG_V2, REG_R3, 2);
theEmitter->emitIns_R_R_I(INS_ld1, EA_4BYTE, REG_V4, REG_R5, 1);
theEmitter->emitIns_R_R_I(INS_ld1, EA_8BYTE, REG_V6, REG_R7, 0);
// ld2 {Vt, Vt2}[#index], [Xn|SP]
theEmitter->emitIns_R_R_I(INS_ld2, EA_1BYTE, REG_V0, REG_R2, 4);
theEmitter->emitIns_R_R_I(INS_ld2, EA_2BYTE, REG_V3, REG_R5, 3);
theEmitter->emitIns_R_R_I(INS_ld2, EA_4BYTE, REG_V6, REG_R8, 2);
theEmitter->emitIns_R_R_I(INS_ld2, EA_8BYTE, REG_V9, REG_R11, 1);
// ld3 {Vt, Vt2, Vt3}[#index], [Xn|SP]
theEmitter->emitIns_R_R_I(INS_ld3, EA_1BYTE, REG_V0, REG_R3, 5);
theEmitter->emitIns_R_R_I(INS_ld3, EA_2BYTE, REG_V4, REG_R7, 4);
theEmitter->emitIns_R_R_I(INS_ld3, EA_4BYTE, REG_V8, REG_R11, 3);
theEmitter->emitIns_R_R_I(INS_ld3, EA_8BYTE, REG_V12, REG_R15, 0);
// ld4 {Vt, Vt2, Vt3, Vt4}[#index], [Xn|SP]
theEmitter->emitIns_R_R_I(INS_ld4, EA_1BYTE, REG_V0, REG_R4, 6);
theEmitter->emitIns_R_R_I(INS_ld4, EA_2BYTE, REG_V5, REG_R9, 5);
theEmitter->emitIns_R_R_I(INS_ld4, EA_4BYTE, REG_V10, REG_R14, 0);
theEmitter->emitIns_R_R_I(INS_ld4, EA_8BYTE, REG_V15, REG_R19, 1);
// st1 {Vt}[#index], [Xn|SP]
theEmitter->emitIns_R_R_I(INS_st1, EA_1BYTE, REG_V0, REG_R1, 7);
theEmitter->emitIns_R_R_I(INS_st1, EA_2BYTE, REG_V2, REG_R3, 6);
theEmitter->emitIns_R_R_I(INS_st1, EA_4BYTE, REG_V4, REG_R5, 1);
theEmitter->emitIns_R_R_I(INS_st1, EA_8BYTE, REG_V6, REG_R7, 0);
// st2 {Vt, Vt2}[#index], [Xn|SP]
theEmitter->emitIns_R_R_I(INS_st2, EA_1BYTE, REG_V0, REG_R2, 8);
theEmitter->emitIns_R_R_I(INS_st2, EA_2BYTE, REG_V3, REG_R5, 7);
theEmitter->emitIns_R_R_I(INS_st2, EA_4BYTE, REG_V6, REG_R8, 2);
theEmitter->emitIns_R_R_I(INS_st2, EA_8BYTE, REG_V9, REG_R11, 1);
// st3 {Vt, Vt2, Vt3}[#index], [Xn|SP]
theEmitter->emitIns_R_R_I(INS_st3, EA_1BYTE, REG_V0, REG_R3, 9);
theEmitter->emitIns_R_R_I(INS_st3, EA_2BYTE, REG_V4, REG_R7, 0);
theEmitter->emitIns_R_R_I(INS_st3, EA_4BYTE, REG_V8, REG_R11, 3);
theEmitter->emitIns_R_R_I(INS_st3, EA_8BYTE, REG_V12, REG_R15, 0);
// st4 {Vt, Vt2, Vt3, Vt4}[#index], [Xn|SP]
theEmitter->emitIns_R_R_I(INS_st4, EA_1BYTE, REG_V0, REG_R4, 10);
theEmitter->emitIns_R_R_I(INS_st4, EA_2BYTE, REG_V5, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_st4, EA_4BYTE, REG_V10, REG_R14, 0);
theEmitter->emitIns_R_R_I(INS_st4, EA_8BYTE, REG_V15, REG_R19, 1);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// Loads to and Stores from one, two, three, or four SIMD&FP registers
//
genDefineTempLabel(genCreateTempLabel());
// ld1 {Vt}[#index], [Xn|SP], Xm
theEmitter->emitIns_R_R_R_I(INS_ld1, EA_1BYTE, REG_V0, REG_R1, REG_R2, 3, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_ld1, EA_2BYTE, REG_V3, REG_R4, REG_R5, 2, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_ld1, EA_4BYTE, REG_V6, REG_R7, REG_R8, 1, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_ld1, EA_8BYTE, REG_V9, REG_R10, REG_R11, 0, INS_OPTS_POST_INDEX);
// ld2 {Vt, Vt2}[#index], [Xn|SP], Xm
theEmitter->emitIns_R_R_R_I(INS_ld2, EA_1BYTE, REG_V0, REG_R2, REG_R3, 4, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_ld2, EA_2BYTE, REG_V4, REG_R6, REG_R7, 3, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_ld2, EA_4BYTE, REG_V8, REG_R10, REG_R11, 2, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_ld2, EA_8BYTE, REG_V12, REG_R14, REG_R15, 1, INS_OPTS_POST_INDEX);
// ld3 {Vt, Vt2, Vt3}[#index], [Xn|SP], Xm
theEmitter->emitIns_R_R_R_I(INS_ld3, EA_1BYTE, REG_V0, REG_R3, REG_R4, 5, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_ld3, EA_2BYTE, REG_V5, REG_R8, REG_R9, 4, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_ld3, EA_4BYTE, REG_V10, REG_R13, REG_R14, 3, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_ld3, EA_8BYTE, REG_V15, REG_R18, REG_R19, 0, INS_OPTS_POST_INDEX);
// ld4 {Vt, Vt2, Vt3, Vt4}[#index], [Xn|SP], Xm
theEmitter->emitIns_R_R_R_I(INS_ld4, EA_1BYTE, REG_V0, REG_R4, REG_R5, 6, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_ld4, EA_2BYTE, REG_V6, REG_R10, REG_R11, 5, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_ld4, EA_4BYTE, REG_V12, REG_R16, REG_R17, 0, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_ld4, EA_8BYTE, REG_V18, REG_R22, REG_R23, 1, INS_OPTS_POST_INDEX);
// st1 {Vt}[#index], [Xn|SP], Xm
theEmitter->emitIns_R_R_R_I(INS_st1, EA_1BYTE, REG_V0, REG_R1, REG_R2, 7, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_st1, EA_2BYTE, REG_V3, REG_R4, REG_R5, 6, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_st1, EA_4BYTE, REG_V6, REG_R7, REG_R8, 1, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_st1, EA_8BYTE, REG_V9, REG_R10, REG_R11, 0, INS_OPTS_POST_INDEX);
// st2 {Vt, Vt2}[#index], [Xn|SP], Xm
theEmitter->emitIns_R_R_R_I(INS_st2, EA_1BYTE, REG_V0, REG_R2, REG_R3, 8, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_st2, EA_2BYTE, REG_V4, REG_R6, REG_R7, 7, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_st2, EA_4BYTE, REG_V8, REG_R10, REG_R11, 2, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_st2, EA_8BYTE, REG_V12, REG_R14, REG_R15, 1, INS_OPTS_POST_INDEX);
// st3 {Vt, Vt2, Vt3}[#index], [Xn|SP], Xm
theEmitter->emitIns_R_R_R_I(INS_st3, EA_1BYTE, REG_V0, REG_R3, REG_R4, 9, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_st3, EA_2BYTE, REG_V5, REG_R8, REG_R9, 0, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_st3, EA_4BYTE, REG_V10, REG_R13, REG_R14, 3, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_st3, EA_8BYTE, REG_V15, REG_R18, REG_R19, 0, INS_OPTS_POST_INDEX);
// st4 {Vt, Vt2, Vt3, Vt4}[#index], [Xn|SP], Xm
theEmitter->emitIns_R_R_R_I(INS_st4, EA_1BYTE, REG_V0, REG_R4, REG_R5, 10, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_st4, EA_2BYTE, REG_V6, REG_R10, REG_R11, 1, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_st4, EA_4BYTE, REG_V12, REG_R16, REG_R17, 0, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_st4, EA_8BYTE, REG_V18, REG_R22, REG_R23, 1, INS_OPTS_POST_INDEX);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// Loads to and Stores from one, two, three, or four SIMD&FP registers
//
genDefineTempLabel(genCreateTempLabel());
// ld1 {Vt}[#index], [Xn|SP], #imm
theEmitter->emitIns_R_R_I_I(INS_ld1, EA_1BYTE, REG_V0, REG_R1, 3, 1, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_ld1, EA_2BYTE, REG_V2, REG_R3, 2, 2, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_ld1, EA_4BYTE, REG_V4, REG_R5, 1, 4, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_ld1, EA_8BYTE, REG_V6, REG_R7, 0, 8, INS_OPTS_POST_INDEX);
// ld2 {Vt, Vt2}[#index], [Xn|SP], #imm
theEmitter->emitIns_R_R_I_I(INS_ld2, EA_1BYTE, REG_V0, REG_R2, 4, 2, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_ld2, EA_2BYTE, REG_V3, REG_R5, 3, 4, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_ld2, EA_4BYTE, REG_V6, REG_R8, 2, 8, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_ld2, EA_8BYTE, REG_V9, REG_R11, 1, 16, INS_OPTS_POST_INDEX);
// ld3 {Vt, Vt2, Vt3}[#index], [Xn|SP], #imm
theEmitter->emitIns_R_R_I_I(INS_ld3, EA_1BYTE, REG_V0, REG_R3, 5, 3, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_ld3, EA_2BYTE, REG_V4, REG_R7, 4, 6, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_ld3, EA_4BYTE, REG_V8, REG_R11, 3, 12, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_ld3, EA_8BYTE, REG_V12, REG_R15, 0, 24, INS_OPTS_POST_INDEX);
// ld4 {Vt, Vt2, Vt3, Vt4}[#index], [Xn|SP], #imm
theEmitter->emitIns_R_R_I_I(INS_ld4, EA_1BYTE, REG_V0, REG_R4, 6, 4, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_ld4, EA_2BYTE, REG_V5, REG_R9, 5, 8, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_ld4, EA_4BYTE, REG_V10, REG_R14, 0, 16, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_ld4, EA_8BYTE, REG_V15, REG_R19, 1, 32, INS_OPTS_POST_INDEX);
// st1 {Vt}[#index], [Xn|SP], #imm
theEmitter->emitIns_R_R_I_I(INS_st1, EA_1BYTE, REG_V0, REG_R1, 3, 1, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_st1, EA_2BYTE, REG_V2, REG_R3, 2, 2, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_st1, EA_4BYTE, REG_V4, REG_R5, 1, 4, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_st1, EA_8BYTE, REG_V6, REG_R7, 0, 8, INS_OPTS_POST_INDEX);
// st2 {Vt, Vt2}[#index], [Xn|SP], #imm
theEmitter->emitIns_R_R_I_I(INS_st2, EA_1BYTE, REG_V0, REG_R2, 4, 2, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_st2, EA_2BYTE, REG_V3, REG_R5, 3, 4, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_st2, EA_4BYTE, REG_V6, REG_R8, 2, 8, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_st2, EA_8BYTE, REG_V9, REG_R11, 1, 16, INS_OPTS_POST_INDEX);
// st3 {Vt, Vt2, Vt3}[#index], [Xn|SP], #imm
theEmitter->emitIns_R_R_I_I(INS_st3, EA_1BYTE, REG_V0, REG_R3, 5, 3, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_st3, EA_2BYTE, REG_V4, REG_R7, 4, 6, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_st3, EA_4BYTE, REG_V8, REG_R11, 3, 12, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_st3, EA_8BYTE, REG_V12, REG_R15, 0, 24, INS_OPTS_POST_INDEX);
// st4 {Vt, Vt2, Vt3, Vt4}[#index], [Xn|SP], #imm
theEmitter->emitIns_R_R_I_I(INS_st4, EA_1BYTE, REG_V0, REG_R4, 6, 4, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_st4, EA_2BYTE, REG_V5, REG_R9, 5, 8, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_st4, EA_4BYTE, REG_V10, REG_R14, 0, 16, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_st4, EA_8BYTE, REG_V15, REG_R19, 1, 32, INS_OPTS_POST_INDEX);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// Compares
//
genDefineTempLabel(genCreateTempLabel());
// cmp reg, reg
theEmitter->emitIns_R_R(INS_cmp, EA_8BYTE, REG_R8, REG_R9);
theEmitter->emitIns_R_R(INS_cmn, EA_8BYTE, REG_R8, REG_R9);
// cmp reg, imm
theEmitter->emitIns_R_I(INS_cmp, EA_8BYTE, REG_R8, 0);
theEmitter->emitIns_R_I(INS_cmp, EA_8BYTE, REG_R8, 4095);
theEmitter->emitIns_R_I(INS_cmp, EA_8BYTE, REG_R8, 1 << 12);
theEmitter->emitIns_R_I(INS_cmp, EA_8BYTE, REG_R8, 4095 << 12);
theEmitter->emitIns_R_I(INS_cmn, EA_8BYTE, REG_R8, 0);
theEmitter->emitIns_R_I(INS_cmn, EA_8BYTE, REG_R8, 4095);
theEmitter->emitIns_R_I(INS_cmn, EA_8BYTE, REG_R8, 1 << 12);
theEmitter->emitIns_R_I(INS_cmn, EA_8BYTE, REG_R8, 4095 << 12);
theEmitter->emitIns_R_I(INS_cmp, EA_8BYTE, REG_R8, -1);
theEmitter->emitIns_R_I(INS_cmp, EA_8BYTE, REG_R8, -0xfff);
theEmitter->emitIns_R_I(INS_cmp, EA_8BYTE, REG_R8, 0xfffffffffffff000LL);
theEmitter->emitIns_R_I(INS_cmp, EA_8BYTE, REG_R8, 0xffffffffff800000LL);
theEmitter->emitIns_R_I(INS_cmn, EA_8BYTE, REG_R8, -1);
theEmitter->emitIns_R_I(INS_cmn, EA_8BYTE, REG_R8, -0xfff);
theEmitter->emitIns_R_I(INS_cmn, EA_8BYTE, REG_R8, 0xfffffffffffff000LL);
theEmitter->emitIns_R_I(INS_cmn, EA_8BYTE, REG_R8, 0xffffffffff800000LL);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
// R_R
//
genDefineTempLabel(genCreateTempLabel());
theEmitter->emitIns_R_R(INS_cls, EA_8BYTE, REG_R1, REG_R12);
theEmitter->emitIns_R_R(INS_clz, EA_8BYTE, REG_R2, REG_R13);
theEmitter->emitIns_R_R(INS_rbit, EA_8BYTE, REG_R3, REG_R14);
theEmitter->emitIns_R_R(INS_rev, EA_8BYTE, REG_R4, REG_R15);
theEmitter->emitIns_R_R(INS_rev16, EA_8BYTE, REG_R5, REG_R0);
theEmitter->emitIns_R_R(INS_rev32, EA_8BYTE, REG_R6, REG_R1);
theEmitter->emitIns_R_R(INS_cls, EA_4BYTE, REG_R7, REG_R2);
theEmitter->emitIns_R_R(INS_clz, EA_4BYTE, REG_R8, REG_R3);
theEmitter->emitIns_R_R(INS_rbit, EA_4BYTE, REG_R9, REG_R4);
theEmitter->emitIns_R_R(INS_rev, EA_4BYTE, REG_R10, REG_R5);
theEmitter->emitIns_R_R(INS_rev16, EA_4BYTE, REG_R11, REG_R6);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_I
//
genDefineTempLabel(genCreateTempLabel());
// mov reg, imm(i16,hw)
theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0x0000000000001234);
theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0x0000000043210000);
theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0x0000567800000000);
theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0x8765000000000000);
theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0xFFFFFFFFFFFF1234);
theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0xFFFFFFFF4321FFFF);
theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0xFFFF5678FFFFFFFF);
theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0x8765FFFFFFFFFFFF);
theEmitter->emitIns_R_I(INS_mov, EA_4BYTE, REG_R8, 0x00001234);
theEmitter->emitIns_R_I(INS_mov, EA_4BYTE, REG_R8, 0x87650000);
theEmitter->emitIns_R_I(INS_mov, EA_4BYTE, REG_R8, 0xFFFF1234);
theEmitter->emitIns_R_I(INS_mov, EA_4BYTE, REG_R8, 0x4567FFFF);
// mov reg, imm(N,r,s)
theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0x00FFFFF000000000);
theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0x6666666666666666);
theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_SP, 0x7FFF00007FFF0000);
theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0x5555555555555555);
theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0xE003E003E003E003);
theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0x0707070707070707);
theEmitter->emitIns_R_I(INS_mov, EA_4BYTE, REG_R8, 0x00FFFFF0);
theEmitter->emitIns_R_I(INS_mov, EA_4BYTE, REG_R8, 0x66666666);
theEmitter->emitIns_R_I(INS_mov, EA_4BYTE, REG_R8, 0x03FFC000);
theEmitter->emitIns_R_I(INS_mov, EA_4BYTE, REG_R8, 0x55555555);
theEmitter->emitIns_R_I(INS_mov, EA_4BYTE, REG_R8, 0xE003E003);
theEmitter->emitIns_R_I(INS_mov, EA_4BYTE, REG_R8, 0x07070707);
theEmitter->emitIns_R_I(INS_tst, EA_8BYTE, REG_R8, 0xE003E003E003E003);
theEmitter->emitIns_R_I(INS_tst, EA_8BYTE, REG_R8, 0x00FFFFF000000000);
theEmitter->emitIns_R_I(INS_tst, EA_8BYTE, REG_R8, 0x6666666666666666);
theEmitter->emitIns_R_I(INS_tst, EA_8BYTE, REG_R8, 0x0707070707070707);
theEmitter->emitIns_R_I(INS_tst, EA_8BYTE, REG_R8, 0x7FFF00007FFF0000);
theEmitter->emitIns_R_I(INS_tst, EA_8BYTE, REG_R8, 0x5555555555555555);
theEmitter->emitIns_R_I(INS_tst, EA_4BYTE, REG_R8, 0xE003E003);
theEmitter->emitIns_R_I(INS_tst, EA_4BYTE, REG_R8, 0x00FFFFF0);
theEmitter->emitIns_R_I(INS_tst, EA_4BYTE, REG_R8, 0x66666666);
theEmitter->emitIns_R_I(INS_tst, EA_4BYTE, REG_R8, 0x07070707);
theEmitter->emitIns_R_I(INS_tst, EA_4BYTE, REG_R8, 0xFFF00000);
theEmitter->emitIns_R_I(INS_tst, EA_4BYTE, REG_R8, 0x55555555);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_R
//
genDefineTempLabel(genCreateTempLabel());
// tst reg, reg
theEmitter->emitIns_R_R(INS_tst, EA_8BYTE, REG_R7, REG_R10);
// mov reg, reg
theEmitter->emitIns_Mov(INS_mov, EA_8BYTE, REG_R7, REG_R10, /* canSkip */ false);
theEmitter->emitIns_Mov(INS_mov, EA_8BYTE, REG_R8, REG_SP, /* canSkip */ false);
theEmitter->emitIns_Mov(INS_mov, EA_8BYTE, REG_SP, REG_R9, /* canSkip */ false);
theEmitter->emitIns_R_R(INS_mvn, EA_8BYTE, REG_R5, REG_R11);
theEmitter->emitIns_R_R(INS_neg, EA_8BYTE, REG_R4, REG_R12);
theEmitter->emitIns_R_R(INS_negs, EA_8BYTE, REG_R3, REG_R13);
theEmitter->emitIns_Mov(INS_mov, EA_4BYTE, REG_R7, REG_R10, /* canSkip */ false);
theEmitter->emitIns_R_R(INS_mvn, EA_4BYTE, REG_R5, REG_R11);
theEmitter->emitIns_R_R(INS_neg, EA_4BYTE, REG_R4, REG_R12);
theEmitter->emitIns_R_R(INS_negs, EA_4BYTE, REG_R3, REG_R13);
theEmitter->emitIns_Mov(INS_sxtb, EA_8BYTE, REG_R7, REG_R10, /* canSkip */ false);
theEmitter->emitIns_Mov(INS_sxth, EA_8BYTE, REG_R5, REG_R11, /* canSkip */ false);
theEmitter->emitIns_Mov(INS_sxtw, EA_8BYTE, REG_R4, REG_R12, /* canSkip */ false);
theEmitter->emitIns_Mov(INS_uxtb, EA_8BYTE, REG_R3, REG_R13, /* canSkip */ false); // map to Wt
theEmitter->emitIns_Mov(INS_uxth, EA_8BYTE, REG_R2, REG_R14, /* canSkip */ false); // map to Wt
theEmitter->emitIns_Mov(INS_sxtb, EA_4BYTE, REG_R7, REG_R10, /* canSkip */ false);
theEmitter->emitIns_Mov(INS_sxth, EA_4BYTE, REG_R5, REG_R11, /* canSkip */ false);
theEmitter->emitIns_Mov(INS_uxtb, EA_4BYTE, REG_R3, REG_R13, /* canSkip */ false);
theEmitter->emitIns_Mov(INS_uxth, EA_4BYTE, REG_R2, REG_R14, /* canSkip */ false);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_I_I
//
genDefineTempLabel(genCreateTempLabel());
// mov reg, imm(i16,hw)
theEmitter->emitIns_R_I_I(INS_mov, EA_8BYTE, REG_R8, 0x1234, 0, INS_OPTS_LSL);
theEmitter->emitIns_R_I_I(INS_mov, EA_8BYTE, REG_R8, 0x4321, 16, INS_OPTS_LSL);
theEmitter->emitIns_R_I_I(INS_movk, EA_8BYTE, REG_R8, 0x4321, 16, INS_OPTS_LSL);
theEmitter->emitIns_R_I_I(INS_movn, EA_8BYTE, REG_R8, 0x5678, 32, INS_OPTS_LSL);
theEmitter->emitIns_R_I_I(INS_movz, EA_8BYTE, REG_R8, 0x8765, 48, INS_OPTS_LSL);
theEmitter->emitIns_R_I_I(INS_movk, EA_4BYTE, REG_R8, 0x4321, 16, INS_OPTS_LSL);
theEmitter->emitIns_R_I_I(INS_movn, EA_4BYTE, REG_R8, 0x5678, 16, INS_OPTS_LSL);
theEmitter->emitIns_R_I_I(INS_movz, EA_4BYTE, REG_R8, 0x8765, 16, INS_OPTS_LSL);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_R_I
//
genDefineTempLabel(genCreateTempLabel());
theEmitter->emitIns_R_R_I(INS_lsl, EA_8BYTE, REG_R0, REG_R0, 1);
theEmitter->emitIns_R_R_I(INS_lsl, EA_4BYTE, REG_R9, REG_R3, 18);
theEmitter->emitIns_R_R_I(INS_lsr, EA_8BYTE, REG_R7, REG_R0, 37);
theEmitter->emitIns_R_R_I(INS_lsr, EA_4BYTE, REG_R0, REG_R1, 2);
theEmitter->emitIns_R_R_I(INS_asr, EA_8BYTE, REG_R2, REG_R3, 53);
theEmitter->emitIns_R_R_I(INS_asr, EA_4BYTE, REG_R9, REG_R3, 18);
theEmitter->emitIns_R_R_I(INS_and, EA_8BYTE, REG_R2, REG_R3, 0x5555555555555555);
theEmitter->emitIns_R_R_I(INS_ands, EA_8BYTE, REG_R1, REG_R5, 0x6666666666666666);
theEmitter->emitIns_R_R_I(INS_eor, EA_8BYTE, REG_R8, REG_R9, 0x0707070707070707);
theEmitter->emitIns_R_R_I(INS_orr, EA_8BYTE, REG_SP, REG_R3, 0xFFFC000000000000);
theEmitter->emitIns_R_R_I(INS_ands, EA_4BYTE, REG_R8, REG_R9, 0xE003E003);
theEmitter->emitIns_R_R_I(INS_ror, EA_8BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_ror, EA_8BYTE, REG_R8, REG_R9, 31);
theEmitter->emitIns_R_R_I(INS_ror, EA_8BYTE, REG_R8, REG_R9, 32);
theEmitter->emitIns_R_R_I(INS_ror, EA_8BYTE, REG_R8, REG_R9, 63);
theEmitter->emitIns_R_R_I(INS_ror, EA_4BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_ror, EA_4BYTE, REG_R8, REG_R9, 31);
theEmitter->emitIns_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, 0); // == mov
theEmitter->emitIns_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, -1);
theEmitter->emitIns_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, 0xfff);
theEmitter->emitIns_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, -0xfff);
theEmitter->emitIns_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, 0x1000);
theEmitter->emitIns_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, 0xfff000);
theEmitter->emitIns_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, 0xfffffffffffff000LL);
theEmitter->emitIns_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, 0xffffffffff800000LL);
theEmitter->emitIns_R_R_I(INS_add, EA_4BYTE, REG_R8, REG_R9, 0); // == mov
theEmitter->emitIns_R_R_I(INS_add, EA_4BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_add, EA_4BYTE, REG_R8, REG_R9, -1);
theEmitter->emitIns_R_R_I(INS_add, EA_4BYTE, REG_R8, REG_R9, 0xfff);
theEmitter->emitIns_R_R_I(INS_add, EA_4BYTE, REG_R8, REG_R9, -0xfff);
theEmitter->emitIns_R_R_I(INS_add, EA_4BYTE, REG_R8, REG_R9, 0x1000);
theEmitter->emitIns_R_R_I(INS_add, EA_4BYTE, REG_R8, REG_R9, 0xfff000);
theEmitter->emitIns_R_R_I(INS_add, EA_4BYTE, REG_R8, REG_R9, 0xfffffffffffff000LL);
theEmitter->emitIns_R_R_I(INS_add, EA_4BYTE, REG_R8, REG_R9, 0xffffffffff800000LL);
theEmitter->emitIns_R_R_I(INS_sub, EA_8BYTE, REG_R8, REG_R9, 0); // == mov
theEmitter->emitIns_R_R_I(INS_sub, EA_8BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_sub, EA_8BYTE, REG_R8, REG_R9, -1);
theEmitter->emitIns_R_R_I(INS_sub, EA_8BYTE, REG_R8, REG_R9, 0xfff);
theEmitter->emitIns_R_R_I(INS_sub, EA_8BYTE, REG_R8, REG_R9, -0xfff);
theEmitter->emitIns_R_R_I(INS_sub, EA_8BYTE, REG_R8, REG_R9, 0x1000);
theEmitter->emitIns_R_R_I(INS_sub, EA_8BYTE, REG_R8, REG_R9, 0xfff000);
theEmitter->emitIns_R_R_I(INS_sub, EA_8BYTE, REG_R8, REG_R9, 0xfffffffffffff000LL);
theEmitter->emitIns_R_R_I(INS_sub, EA_8BYTE, REG_R8, REG_R9, 0xffffffffff800000LL);
theEmitter->emitIns_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, 0); // == mov
theEmitter->emitIns_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, -1);
theEmitter->emitIns_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, 0xfff);
theEmitter->emitIns_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, -0xfff);
theEmitter->emitIns_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, 0x1000);
theEmitter->emitIns_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, 0xfff000);
theEmitter->emitIns_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, 0xfffffffffffff000LL);
theEmitter->emitIns_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, 0xffffffffff800000LL);
theEmitter->emitIns_R_R_I(INS_adds, EA_8BYTE, REG_R8, REG_R9, 0); // == mov
theEmitter->emitIns_R_R_I(INS_adds, EA_8BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_adds, EA_8BYTE, REG_R8, REG_R9, -1);
theEmitter->emitIns_R_R_I(INS_adds, EA_8BYTE, REG_R8, REG_R9, 0xfff);
theEmitter->emitIns_R_R_I(INS_adds, EA_8BYTE, REG_R8, REG_R9, -0xfff);
theEmitter->emitIns_R_R_I(INS_adds, EA_8BYTE, REG_R8, REG_R9, 0x1000);
theEmitter->emitIns_R_R_I(INS_adds, EA_8BYTE, REG_R8, REG_R9, 0xfff000);
theEmitter->emitIns_R_R_I(INS_adds, EA_8BYTE, REG_R8, REG_R9, 0xfffffffffffff000LL);
theEmitter->emitIns_R_R_I(INS_adds, EA_8BYTE, REG_R8, REG_R9, 0xffffffffff800000LL);
theEmitter->emitIns_R_R_I(INS_adds, EA_4BYTE, REG_R8, REG_R9, 0); // == mov
theEmitter->emitIns_R_R_I(INS_adds, EA_4BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_adds, EA_4BYTE, REG_R8, REG_R9, -1);
theEmitter->emitIns_R_R_I(INS_adds, EA_4BYTE, REG_R8, REG_R9, 0xfff);
theEmitter->emitIns_R_R_I(INS_adds, EA_4BYTE, REG_R8, REG_R9, -0xfff);
theEmitter->emitIns_R_R_I(INS_adds, EA_4BYTE, REG_R8, REG_R9, 0x1000);
theEmitter->emitIns_R_R_I(INS_adds, EA_4BYTE, REG_R8, REG_R9, 0xfff000);
theEmitter->emitIns_R_R_I(INS_adds, EA_4BYTE, REG_R8, REG_R9, 0xfffffffffffff000LL);
theEmitter->emitIns_R_R_I(INS_adds, EA_4BYTE, REG_R8, REG_R9, 0xffffffffff800000LL);
theEmitter->emitIns_R_R_I(INS_subs, EA_8BYTE, REG_R8, REG_R9, 0); // == mov
theEmitter->emitIns_R_R_I(INS_subs, EA_8BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_subs, EA_8BYTE, REG_R8, REG_R9, -1);
theEmitter->emitIns_R_R_I(INS_subs, EA_8BYTE, REG_R8, REG_R9, 0xfff);
theEmitter->emitIns_R_R_I(INS_subs, EA_8BYTE, REG_R8, REG_R9, -0xfff);
theEmitter->emitIns_R_R_I(INS_subs, EA_8BYTE, REG_R8, REG_R9, 0x1000);
theEmitter->emitIns_R_R_I(INS_subs, EA_8BYTE, REG_R8, REG_R9, 0xfff000);
theEmitter->emitIns_R_R_I(INS_subs, EA_8BYTE, REG_R8, REG_R9, 0xfffffffffffff000LL);
theEmitter->emitIns_R_R_I(INS_subs, EA_8BYTE, REG_R8, REG_R9, 0xffffffffff800000LL);
theEmitter->emitIns_R_R_I(INS_subs, EA_4BYTE, REG_R8, REG_R9, 0); // == mov
theEmitter->emitIns_R_R_I(INS_subs, EA_4BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_subs, EA_4BYTE, REG_R8, REG_R9, -1);
theEmitter->emitIns_R_R_I(INS_subs, EA_4BYTE, REG_R8, REG_R9, 0xfff);
theEmitter->emitIns_R_R_I(INS_subs, EA_4BYTE, REG_R8, REG_R9, -0xfff);
theEmitter->emitIns_R_R_I(INS_subs, EA_4BYTE, REG_R8, REG_R9, 0x1000);
theEmitter->emitIns_R_R_I(INS_subs, EA_4BYTE, REG_R8, REG_R9, 0xfff000);
theEmitter->emitIns_R_R_I(INS_subs, EA_4BYTE, REG_R8, REG_R9, 0xfffffffffffff000LL);
theEmitter->emitIns_R_R_I(INS_subs, EA_4BYTE, REG_R8, REG_R9, 0xffffffffff800000LL);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_R_I cmp/txt
//
// cmp
theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 0);
theEmitter->emitIns_R_R_I(INS_cmp, EA_4BYTE, REG_R8, REG_R9, 0);
// CMP (shifted register)
theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 31, INS_OPTS_LSL);
theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 32, INS_OPTS_LSR);
theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 33, INS_OPTS_ASR);
theEmitter->emitIns_R_R_I(INS_cmp, EA_4BYTE, REG_R8, REG_R9, 21, INS_OPTS_LSL);
theEmitter->emitIns_R_R_I(INS_cmp, EA_4BYTE, REG_R8, REG_R9, 22, INS_OPTS_LSR);
theEmitter->emitIns_R_R_I(INS_cmp, EA_4BYTE, REG_R8, REG_R9, 23, INS_OPTS_ASR);
// TST (shifted register)
theEmitter->emitIns_R_R_I(INS_tst, EA_8BYTE, REG_R8, REG_R9, 31, INS_OPTS_LSL);
theEmitter->emitIns_R_R_I(INS_tst, EA_8BYTE, REG_R8, REG_R9, 32, INS_OPTS_LSR);
theEmitter->emitIns_R_R_I(INS_tst, EA_8BYTE, REG_R8, REG_R9, 33, INS_OPTS_ASR);
theEmitter->emitIns_R_R_I(INS_tst, EA_8BYTE, REG_R8, REG_R9, 34, INS_OPTS_ROR);
theEmitter->emitIns_R_R_I(INS_tst, EA_4BYTE, REG_R8, REG_R9, 21, INS_OPTS_LSL);
theEmitter->emitIns_R_R_I(INS_tst, EA_4BYTE, REG_R8, REG_R9, 22, INS_OPTS_LSR);
theEmitter->emitIns_R_R_I(INS_tst, EA_4BYTE, REG_R8, REG_R9, 23, INS_OPTS_ASR);
theEmitter->emitIns_R_R_I(INS_tst, EA_4BYTE, REG_R8, REG_R9, 24, INS_OPTS_ROR);
// CMP (extended register)
theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 0, INS_OPTS_UXTB);
theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 0, INS_OPTS_UXTH);
theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 0, INS_OPTS_UXTW); // "cmp x8, x9, UXTW"; msdis
// disassembles this "cmp x8,x9",
// which looks like an msdis issue.
theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 0, INS_OPTS_UXTX);
theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 0, INS_OPTS_SXTB);
theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 0, INS_OPTS_SXTH);
theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 0, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 0, INS_OPTS_SXTX);
// CMP 64-bit (extended register) and left shift
theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 1, INS_OPTS_UXTB);
theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 2, INS_OPTS_UXTH);
theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 3, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 4, INS_OPTS_UXTX);
theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 1, INS_OPTS_SXTB);
theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 2, INS_OPTS_SXTH);
theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 3, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 4, INS_OPTS_SXTX);
// CMP 32-bit (extended register) and left shift
theEmitter->emitIns_R_R_I(INS_cmp, EA_4BYTE, REG_R8, REG_R9, 0, INS_OPTS_UXTB);
theEmitter->emitIns_R_R_I(INS_cmp, EA_4BYTE, REG_R8, REG_R9, 2, INS_OPTS_UXTH);
theEmitter->emitIns_R_R_I(INS_cmp, EA_4BYTE, REG_R8, REG_R9, 4, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_I(INS_cmp, EA_4BYTE, REG_R8, REG_R9, 0, INS_OPTS_SXTB);
theEmitter->emitIns_R_R_I(INS_cmp, EA_4BYTE, REG_R8, REG_R9, 2, INS_OPTS_SXTH);
theEmitter->emitIns_R_R_I(INS_cmp, EA_4BYTE, REG_R8, REG_R9, 4, INS_OPTS_SXTW);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_R_R
//
genDefineTempLabel(genCreateTempLabel());
theEmitter->emitIns_R_R_R(INS_lsl, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_lsr, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_asr, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ror, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_adc, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_adcs, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_sbc, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_sbcs, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_udiv, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_sdiv, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_mul, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_mneg, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_smull, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_smnegl, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_smulh, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_umull, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_umnegl, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_umulh, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_lslv, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_lsrv, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_asrv, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_rorv, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_lsl, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_lsr, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_asr, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ror, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_adc, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_adcs, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_sbc, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_sbcs, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_udiv, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_sdiv, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_mul, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_mneg, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_smull, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_smnegl, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_smulh, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_umull, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_umnegl, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_umulh, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_lslv, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_lsrv, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_asrv, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_rorv, EA_4BYTE, REG_R8, REG_R9, REG_R10);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// ARMv8.1 LSE Atomics
//
genDefineTempLabel(genCreateTempLabel());
theEmitter->emitIns_R_R_R(INS_casb, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_casab, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_casalb, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_caslb, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_cash, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_casah, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_casalh, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_caslh, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_cas, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_casa, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_casal, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_casl, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_cas, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_casa, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_casal, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_casl, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ldaddb, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ldaddab, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ldaddalb, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ldaddlb, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ldaddh, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ldaddah, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ldaddalh, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ldaddlh, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ldadd, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ldadda, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ldaddal, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ldaddl, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ldadd, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ldadda, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ldaddal, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ldclral, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ldclral, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ldsetal, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ldsetal, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ldaddl, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_swpb, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_swpab, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_swpalb, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_swplb, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_swph, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_swpah, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_swpalh, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_swplh, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_swp, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_swpa, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_swpal, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_swpl, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_swp, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_swpa, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_swpal, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_swpl, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R(INS_staddb, EA_4BYTE, REG_R8, REG_R10);
theEmitter->emitIns_R_R(INS_staddlb, EA_4BYTE, REG_R8, REG_R10);
theEmitter->emitIns_R_R(INS_staddh, EA_4BYTE, REG_R8, REG_R10);
theEmitter->emitIns_R_R(INS_staddlh, EA_4BYTE, REG_R8, REG_R10);
theEmitter->emitIns_R_R(INS_stadd, EA_4BYTE, REG_R8, REG_R10);
theEmitter->emitIns_R_R(INS_staddl, EA_4BYTE, REG_R8, REG_R10);
theEmitter->emitIns_R_R(INS_stadd, EA_8BYTE, REG_R8, REG_R10);
theEmitter->emitIns_R_R(INS_staddl, EA_8BYTE, REG_R8, REG_R10);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_R_I_I
//
genDefineTempLabel(genCreateTempLabel());
theEmitter->emitIns_R_R_I_I(INS_sbfm, EA_8BYTE, REG_R2, REG_R3, 4, 39);
theEmitter->emitIns_R_R_I_I(INS_bfm, EA_8BYTE, REG_R1, REG_R5, 20, 23);
theEmitter->emitIns_R_R_I_I(INS_ubfm, EA_8BYTE, REG_R8, REG_R9, 36, 7);
theEmitter->emitIns_R_R_I_I(INS_sbfiz, EA_8BYTE, REG_R2, REG_R3, 7, 37);
theEmitter->emitIns_R_R_I_I(INS_bfi, EA_8BYTE, REG_R1, REG_R5, 23, 21);
theEmitter->emitIns_R_R_I_I(INS_ubfiz, EA_8BYTE, REG_R8, REG_R9, 39, 5);
theEmitter->emitIns_R_R_I_I(INS_sbfx, EA_8BYTE, REG_R2, REG_R3, 10, 24);
theEmitter->emitIns_R_R_I_I(INS_bfxil, EA_8BYTE, REG_R1, REG_R5, 26, 16);
theEmitter->emitIns_R_R_I_I(INS_ubfx, EA_8BYTE, REG_R8, REG_R9, 42, 8);
theEmitter->emitIns_R_R_I_I(INS_sbfm, EA_4BYTE, REG_R2, REG_R3, 4, 19);
theEmitter->emitIns_R_R_I_I(INS_bfm, EA_4BYTE, REG_R1, REG_R5, 10, 13);
theEmitter->emitIns_R_R_I_I(INS_ubfm, EA_4BYTE, REG_R8, REG_R9, 16, 7);
theEmitter->emitIns_R_R_I_I(INS_sbfiz, EA_4BYTE, REG_R2, REG_R3, 5, 17);
theEmitter->emitIns_R_R_I_I(INS_bfi, EA_4BYTE, REG_R1, REG_R5, 13, 11);
theEmitter->emitIns_R_R_I_I(INS_ubfiz, EA_4BYTE, REG_R8, REG_R9, 19, 5);
theEmitter->emitIns_R_R_I_I(INS_sbfx, EA_4BYTE, REG_R2, REG_R3, 3, 14);
theEmitter->emitIns_R_R_I_I(INS_bfxil, EA_4BYTE, REG_R1, REG_R5, 11, 9);
theEmitter->emitIns_R_R_I_I(INS_ubfx, EA_4BYTE, REG_R8, REG_R9, 22, 8);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_R_R_I
//
genDefineTempLabel(genCreateTempLabel());
// ADD (extended register)
theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_UXTB);
theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_UXTH);
theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_UXTX);
theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_SXTB);
theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_SXTH);
theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_SXTX);
// ADD (extended register) and left shift
theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_UXTB);
theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_UXTH);
theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_UXTX);
theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_SXTB);
theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_SXTH);
theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_SXTX);
// ADD (shifted register)
theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 31, INS_OPTS_LSL);
theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 32, INS_OPTS_LSR);
theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 33, INS_OPTS_ASR);
// EXTR (extract field from register pair)
theEmitter->emitIns_R_R_R_I(INS_extr, EA_8BYTE, REG_R8, REG_R9, REG_R10, 1);
theEmitter->emitIns_R_R_R_I(INS_extr, EA_8BYTE, REG_R8, REG_R9, REG_R10, 31);
theEmitter->emitIns_R_R_R_I(INS_extr, EA_8BYTE, REG_R8, REG_R9, REG_R10, 32);
theEmitter->emitIns_R_R_R_I(INS_extr, EA_8BYTE, REG_R8, REG_R9, REG_R10, 63);
theEmitter->emitIns_R_R_R_I(INS_extr, EA_4BYTE, REG_R8, REG_R9, REG_R10, 1);
theEmitter->emitIns_R_R_R_I(INS_extr, EA_4BYTE, REG_R8, REG_R9, REG_R10, 31);
// SUB (extended register)
theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_UXTB);
theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_UXTH);
theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_UXTX);
theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_SXTB);
theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_SXTH);
theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_SXTX);
// SUB (extended register) and left shift
theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_UXTB);
theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_UXTH);
theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_UXTX);
theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_SXTB);
theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_SXTH);
theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_SXTX);
// SUB (shifted register)
theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 27, INS_OPTS_LSL);
theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 28, INS_OPTS_LSR);
theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 29, INS_OPTS_ASR);
// bit operations
theEmitter->emitIns_R_R_R_I(INS_and, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_ands, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_eor, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_orr, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_bic, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_bics, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_eon, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_orn, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_and, EA_8BYTE, REG_R8, REG_R9, REG_R10, 1, INS_OPTS_LSL);
theEmitter->emitIns_R_R_R_I(INS_ands, EA_8BYTE, REG_R8, REG_R9, REG_R10, 2, INS_OPTS_LSR);
theEmitter->emitIns_R_R_R_I(INS_eor, EA_8BYTE, REG_R8, REG_R9, REG_R10, 3, INS_OPTS_ASR);
theEmitter->emitIns_R_R_R_I(INS_orr, EA_8BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_ROR);
theEmitter->emitIns_R_R_R_I(INS_bic, EA_8BYTE, REG_R8, REG_R9, REG_R10, 5, INS_OPTS_LSL);
theEmitter->emitIns_R_R_R_I(INS_bics, EA_8BYTE, REG_R8, REG_R9, REG_R10, 6, INS_OPTS_LSR);
theEmitter->emitIns_R_R_R_I(INS_eon, EA_8BYTE, REG_R8, REG_R9, REG_R10, 7, INS_OPTS_ASR);
theEmitter->emitIns_R_R_R_I(INS_orn, EA_8BYTE, REG_R8, REG_R9, REG_R10, 8, INS_OPTS_ROR);
theEmitter->emitIns_R_R_R_I(INS_and, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_ands, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_eor, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_orr, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_bic, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_bics, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_eon, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_orn, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_and, EA_4BYTE, REG_R8, REG_R9, REG_R10, 1, INS_OPTS_LSL);
theEmitter->emitIns_R_R_R_I(INS_ands, EA_4BYTE, REG_R8, REG_R9, REG_R10, 2, INS_OPTS_LSR);
theEmitter->emitIns_R_R_R_I(INS_eor, EA_4BYTE, REG_R8, REG_R9, REG_R10, 3, INS_OPTS_ASR);
theEmitter->emitIns_R_R_R_I(INS_orr, EA_4BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_ROR);
theEmitter->emitIns_R_R_R_I(INS_bic, EA_4BYTE, REG_R8, REG_R9, REG_R10, 5, INS_OPTS_LSL);
theEmitter->emitIns_R_R_R_I(INS_bics, EA_4BYTE, REG_R8, REG_R9, REG_R10, 6, INS_OPTS_LSR);
theEmitter->emitIns_R_R_R_I(INS_eon, EA_4BYTE, REG_R8, REG_R9, REG_R10, 7, INS_OPTS_ASR);
theEmitter->emitIns_R_R_R_I(INS_orn, EA_4BYTE, REG_R8, REG_R9, REG_R10, 8, INS_OPTS_ROR);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_R_R_I -- load/store pair
//
theEmitter->emitIns_R_R_R_I(INS_ldnp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_stnp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_ldnp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 8);
theEmitter->emitIns_R_R_R_I(INS_stnp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 8);
theEmitter->emitIns_R_R_R_I(INS_ldnp, EA_4BYTE, REG_R8, REG_R9, REG_SP, 0);
theEmitter->emitIns_R_R_R_I(INS_stnp, EA_4BYTE, REG_R8, REG_R9, REG_SP, 0);
theEmitter->emitIns_R_R_R_I(INS_ldnp, EA_4BYTE, REG_R8, REG_R9, REG_SP, 8);
theEmitter->emitIns_R_R_R_I(INS_stnp, EA_4BYTE, REG_R8, REG_R9, REG_SP, 8);
theEmitter->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 16);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 16);
theEmitter->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 16, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 16, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 16, INS_OPTS_PRE_INDEX);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 16, INS_OPTS_PRE_INDEX);
theEmitter->emitIns_R_R_R_I(INS_ldp, EA_4BYTE, REG_R8, REG_R9, REG_SP, 0);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_4BYTE, REG_R8, REG_R9, REG_SP, 0);
theEmitter->emitIns_R_R_R_I(INS_ldp, EA_4BYTE, REG_R8, REG_R9, REG_SP, 16);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_4BYTE, REG_R8, REG_R9, REG_SP, 16);
theEmitter->emitIns_R_R_R_I(INS_ldp, EA_4BYTE, REG_R8, REG_R9, REG_R10, 16, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_4BYTE, REG_R8, REG_R9, REG_R10, 16, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_ldp, EA_4BYTE, REG_R8, REG_R9, REG_R10, 16, INS_OPTS_PRE_INDEX);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_4BYTE, REG_R8, REG_R9, REG_R10, 16, INS_OPTS_PRE_INDEX);
theEmitter->emitIns_R_R_R_I(INS_ldpsw, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_ldpsw, EA_4BYTE, REG_R8, REG_R9, REG_R10, 16);
theEmitter->emitIns_R_R_R_I(INS_ldpsw, EA_4BYTE, REG_R8, REG_R9, REG_R10, 16, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_ldpsw, EA_4BYTE, REG_R8, REG_R9, REG_R10, 16, INS_OPTS_PRE_INDEX);
// SP and ZR tests
theEmitter->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, REG_ZR, REG_R1, REG_SP, 0);
theEmitter->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, REG_R0, REG_ZR, REG_SP, 16);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_ZR, REG_R1, REG_SP, 0);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_R0, REG_ZR, REG_SP, 16);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_ZR, REG_ZR, REG_SP, 16, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_ZR, REG_ZR, REG_R8, 16, INS_OPTS_PRE_INDEX);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_R_R_Ext -- load/store shifted/extend
//
genDefineTempLabel(genCreateTempLabel());
// LDR (register)
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL, 3);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW, 3);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW, 3);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX, 3);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX, 3);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL, 2);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW, 2);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW, 2);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX, 2);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX, 2);
theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9);
theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL);
theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL, 1);
theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW, 1);
theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW, 1);
theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX, 1);
theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX, 1);
theEmitter->emitIns_R_R_R_Ext(INS_ldrb, EA_1BYTE, REG_R8, REG_SP, REG_R9);
theEmitter->emitIns_R_R_R_Ext(INS_ldrb, EA_1BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldrb, EA_1BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldrb, EA_1BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldrb, EA_1BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsw, EA_4BYTE, REG_R8, REG_SP, REG_R9);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsw, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsw, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL, 2);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsw, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsw, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW, 2);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsw, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsw, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW, 2);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsw, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsw, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX, 2);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsw, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsw, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX, 2);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsh, EA_4BYTE, REG_R8, REG_SP, REG_R9);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsh, EA_8BYTE, REG_R8, REG_SP, REG_R9);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsh, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsh, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL, 1);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsh, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsh, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW, 1);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsh, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsh, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW, 1);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsh, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsh, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX, 1);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsh, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsh, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX, 1);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsb, EA_4BYTE, REG_R8, REG_SP, REG_R9);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsb, EA_8BYTE, REG_R8, REG_SP, REG_R9);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsb, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsb, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsb, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsb, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX);
// STR (register)
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9);
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL);
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL, 3);
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW, 3);
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW, 3);
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX);
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX, 3);
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX);
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX, 3);
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9);
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL);
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL, 2);
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW, 2);
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW, 2);
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX);
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX, 2);
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX);
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX, 2);
theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9);
theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL);
theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL, 1);
theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW, 1);
theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW, 1);
theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX);
theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX, 1);
theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX);
theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX, 1);
theEmitter->emitIns_R_R_R_Ext(INS_strb, EA_1BYTE, REG_R8, REG_SP, REG_R9);
theEmitter->emitIns_R_R_R_Ext(INS_strb, EA_1BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_R_Ext(INS_strb, EA_1BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_R_Ext(INS_strb, EA_1BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX);
theEmitter->emitIns_R_R_R_Ext(INS_strb, EA_1BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_R_R_R
//
genDefineTempLabel(genCreateTempLabel());
theEmitter->emitIns_R_R_R_R(INS_madd, EA_4BYTE, REG_R0, REG_R12, REG_R27, REG_R10);
theEmitter->emitIns_R_R_R_R(INS_msub, EA_4BYTE, REG_R1, REG_R13, REG_R28, REG_R11);
theEmitter->emitIns_R_R_R_R(INS_smaddl, EA_4BYTE, REG_R2, REG_R14, REG_R0, REG_R12);
theEmitter->emitIns_R_R_R_R(INS_smsubl, EA_4BYTE, REG_R3, REG_R15, REG_R1, REG_R13);
theEmitter->emitIns_R_R_R_R(INS_umaddl, EA_4BYTE, REG_R4, REG_R19, REG_R2, REG_R14);
theEmitter->emitIns_R_R_R_R(INS_umsubl, EA_4BYTE, REG_R5, REG_R20, REG_R3, REG_R15);
theEmitter->emitIns_R_R_R_R(INS_madd, EA_8BYTE, REG_R6, REG_R21, REG_R4, REG_R19);
theEmitter->emitIns_R_R_R_R(INS_msub, EA_8BYTE, REG_R7, REG_R22, REG_R5, REG_R20);
theEmitter->emitIns_R_R_R_R(INS_smaddl, EA_8BYTE, REG_R8, REG_R23, REG_R6, REG_R21);
theEmitter->emitIns_R_R_R_R(INS_smsubl, EA_8BYTE, REG_R9, REG_R24, REG_R7, REG_R22);
theEmitter->emitIns_R_R_R_R(INS_umaddl, EA_8BYTE, REG_R10, REG_R25, REG_R8, REG_R23);
theEmitter->emitIns_R_R_R_R(INS_umsubl, EA_8BYTE, REG_R11, REG_R26, REG_R9, REG_R24);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
// R_COND
//
// cset reg, cond
theEmitter->emitIns_R_COND(INS_cset, EA_8BYTE, REG_R9, INS_COND_EQ); // eq
theEmitter->emitIns_R_COND(INS_cset, EA_4BYTE, REG_R8, INS_COND_NE); // ne
theEmitter->emitIns_R_COND(INS_cset, EA_4BYTE, REG_R7, INS_COND_HS); // hs
theEmitter->emitIns_R_COND(INS_cset, EA_8BYTE, REG_R6, INS_COND_LO); // lo
theEmitter->emitIns_R_COND(INS_cset, EA_8BYTE, REG_R5, INS_COND_MI); // mi
theEmitter->emitIns_R_COND(INS_cset, EA_4BYTE, REG_R4, INS_COND_PL); // pl
theEmitter->emitIns_R_COND(INS_cset, EA_4BYTE, REG_R3, INS_COND_VS); // vs
theEmitter->emitIns_R_COND(INS_cset, EA_8BYTE, REG_R2, INS_COND_VC); // vc
theEmitter->emitIns_R_COND(INS_cset, EA_8BYTE, REG_R1, INS_COND_HI); // hi
theEmitter->emitIns_R_COND(INS_cset, EA_4BYTE, REG_R0, INS_COND_LS); // ls
theEmitter->emitIns_R_COND(INS_cset, EA_4BYTE, REG_R9, INS_COND_GE); // ge
theEmitter->emitIns_R_COND(INS_cset, EA_8BYTE, REG_R8, INS_COND_LT); // lt
theEmitter->emitIns_R_COND(INS_cset, EA_8BYTE, REG_R7, INS_COND_GT); // gt
theEmitter->emitIns_R_COND(INS_cset, EA_4BYTE, REG_R6, INS_COND_LE); // le
// csetm reg, cond
theEmitter->emitIns_R_COND(INS_csetm, EA_4BYTE, REG_R9, INS_COND_EQ); // eq
theEmitter->emitIns_R_COND(INS_csetm, EA_8BYTE, REG_R8, INS_COND_NE); // ne
theEmitter->emitIns_R_COND(INS_csetm, EA_8BYTE, REG_R7, INS_COND_HS); // hs
theEmitter->emitIns_R_COND(INS_csetm, EA_4BYTE, REG_R6, INS_COND_LO); // lo
theEmitter->emitIns_R_COND(INS_csetm, EA_4BYTE, REG_R5, INS_COND_MI); // mi
theEmitter->emitIns_R_COND(INS_csetm, EA_8BYTE, REG_R4, INS_COND_PL); // pl
theEmitter->emitIns_R_COND(INS_csetm, EA_8BYTE, REG_R3, INS_COND_VS); // vs
theEmitter->emitIns_R_COND(INS_csetm, EA_4BYTE, REG_R2, INS_COND_VC); // vc
theEmitter->emitIns_R_COND(INS_csetm, EA_4BYTE, REG_R1, INS_COND_HI); // hi
theEmitter->emitIns_R_COND(INS_csetm, EA_8BYTE, REG_R0, INS_COND_LS); // ls
theEmitter->emitIns_R_COND(INS_csetm, EA_8BYTE, REG_R9, INS_COND_GE); // ge
theEmitter->emitIns_R_COND(INS_csetm, EA_4BYTE, REG_R8, INS_COND_LT); // lt
theEmitter->emitIns_R_COND(INS_csetm, EA_4BYTE, REG_R7, INS_COND_GT); // gt
theEmitter->emitIns_R_COND(INS_csetm, EA_8BYTE, REG_R6, INS_COND_LE); // le
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
// R_R_COND
//
// cinc reg, reg, cond
// cinv reg, reg, cond
// cneg reg, reg, cond
theEmitter->emitIns_R_R_COND(INS_cinc, EA_8BYTE, REG_R0, REG_R4, INS_COND_EQ); // eq
theEmitter->emitIns_R_R_COND(INS_cinv, EA_4BYTE, REG_R1, REG_R5, INS_COND_NE); // ne
theEmitter->emitIns_R_R_COND(INS_cneg, EA_4BYTE, REG_R2, REG_R6, INS_COND_HS); // hs
theEmitter->emitIns_R_R_COND(INS_cinc, EA_8BYTE, REG_R3, REG_R7, INS_COND_LO); // lo
theEmitter->emitIns_R_R_COND(INS_cinv, EA_4BYTE, REG_R4, REG_R8, INS_COND_MI); // mi
theEmitter->emitIns_R_R_COND(INS_cneg, EA_8BYTE, REG_R5, REG_R9, INS_COND_PL); // pl
theEmitter->emitIns_R_R_COND(INS_cinc, EA_8BYTE, REG_R6, REG_R0, INS_COND_VS); // vs
theEmitter->emitIns_R_R_COND(INS_cinv, EA_4BYTE, REG_R7, REG_R1, INS_COND_VC); // vc
theEmitter->emitIns_R_R_COND(INS_cneg, EA_8BYTE, REG_R8, REG_R2, INS_COND_HI); // hi
theEmitter->emitIns_R_R_COND(INS_cinc, EA_4BYTE, REG_R9, REG_R3, INS_COND_LS); // ls
theEmitter->emitIns_R_R_COND(INS_cinv, EA_4BYTE, REG_R0, REG_R4, INS_COND_GE); // ge
theEmitter->emitIns_R_R_COND(INS_cneg, EA_8BYTE, REG_R2, REG_R5, INS_COND_LT); // lt
theEmitter->emitIns_R_R_COND(INS_cinc, EA_4BYTE, REG_R2, REG_R6, INS_COND_GT); // gt
theEmitter->emitIns_R_R_COND(INS_cinv, EA_8BYTE, REG_R3, REG_R7, INS_COND_LE); // le
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
// R_R_R_COND
//
// csel reg, reg, reg, cond
// csinc reg, reg, reg, cond
// csinv reg, reg, reg, cond
// csneg reg, reg, reg, cond
theEmitter->emitIns_R_R_R_COND(INS_csel, EA_8BYTE, REG_R0, REG_R4, REG_R8, INS_COND_EQ); // eq
theEmitter->emitIns_R_R_R_COND(INS_csinc, EA_4BYTE, REG_R1, REG_R5, REG_R9, INS_COND_NE); // ne
theEmitter->emitIns_R_R_R_COND(INS_csinv, EA_4BYTE, REG_R2, REG_R6, REG_R0, INS_COND_HS); // hs
theEmitter->emitIns_R_R_R_COND(INS_csneg, EA_8BYTE, REG_R3, REG_R7, REG_R1, INS_COND_LO); // lo
theEmitter->emitIns_R_R_R_COND(INS_csel, EA_4BYTE, REG_R4, REG_R8, REG_R2, INS_COND_MI); // mi
theEmitter->emitIns_R_R_R_COND(INS_csinc, EA_8BYTE, REG_R5, REG_R9, REG_R3, INS_COND_PL); // pl
theEmitter->emitIns_R_R_R_COND(INS_csinv, EA_8BYTE, REG_R6, REG_R0, REG_R4, INS_COND_VS); // vs
theEmitter->emitIns_R_R_R_COND(INS_csneg, EA_4BYTE, REG_R7, REG_R1, REG_R5, INS_COND_VC); // vc
theEmitter->emitIns_R_R_R_COND(INS_csel, EA_8BYTE, REG_R8, REG_R2, REG_R6, INS_COND_HI); // hi
theEmitter->emitIns_R_R_R_COND(INS_csinc, EA_4BYTE, REG_R9, REG_R3, REG_R7, INS_COND_LS); // ls
theEmitter->emitIns_R_R_R_COND(INS_csinv, EA_4BYTE, REG_R0, REG_R4, REG_R8, INS_COND_GE); // ge
theEmitter->emitIns_R_R_R_COND(INS_csneg, EA_8BYTE, REG_R2, REG_R5, REG_R9, INS_COND_LT); // lt
theEmitter->emitIns_R_R_R_COND(INS_csel, EA_4BYTE, REG_R2, REG_R6, REG_R0, INS_COND_GT); // gt
theEmitter->emitIns_R_R_R_COND(INS_csinc, EA_8BYTE, REG_R3, REG_R7, REG_R1, INS_COND_LE); // le
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
// R_R_FLAGS_COND
//
// ccmp reg1, reg2, nzcv, cond
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R9, REG_R3, INS_FLAGS_V, INS_COND_EQ); // eq
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R8, REG_R2, INS_FLAGS_C, INS_COND_NE); // ne
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R7, REG_R1, INS_FLAGS_Z, INS_COND_HS); // hs
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R6, REG_R0, INS_FLAGS_N, INS_COND_LO); // lo
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R5, REG_R3, INS_FLAGS_CV, INS_COND_MI); // mi
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R4, REG_R2, INS_FLAGS_ZV, INS_COND_PL); // pl
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R3, REG_R1, INS_FLAGS_ZC, INS_COND_VS); // vs
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R2, REG_R0, INS_FLAGS_NV, INS_COND_VC); // vc
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R1, REG_R3, INS_FLAGS_NC, INS_COND_HI); // hi
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R0, REG_R2, INS_FLAGS_NZ, INS_COND_LS); // ls
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R9, REG_R1, INS_FLAGS_NONE, INS_COND_GE); // ge
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R8, REG_R0, INS_FLAGS_NZV, INS_COND_LT); // lt
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R7, REG_R3, INS_FLAGS_NZC, INS_COND_GT); // gt
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R6, REG_R2, INS_FLAGS_NZCV, INS_COND_LE); // le
// ccmp reg1, imm, nzcv, cond
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R9, 3, INS_FLAGS_V, INS_COND_EQ); // eq
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R8, 2, INS_FLAGS_C, INS_COND_NE); // ne
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R7, 1, INS_FLAGS_Z, INS_COND_HS); // hs
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R6, 0, INS_FLAGS_N, INS_COND_LO); // lo
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R5, 31, INS_FLAGS_CV, INS_COND_MI); // mi
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R4, 28, INS_FLAGS_ZV, INS_COND_PL); // pl
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R3, 25, INS_FLAGS_ZC, INS_COND_VS); // vs
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R2, 22, INS_FLAGS_NV, INS_COND_VC); // vc
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R1, 19, INS_FLAGS_NC, INS_COND_HI); // hi
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R0, 16, INS_FLAGS_NZ, INS_COND_LS); // ls
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R9, 13, INS_FLAGS_NONE, INS_COND_GE); // ge
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R8, 10, INS_FLAGS_NZV, INS_COND_LT); // lt
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R7, 7, INS_FLAGS_NZC, INS_COND_GT); // gt
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R6, 4, INS_FLAGS_NZCV, INS_COND_LE); // le
// ccmp reg1, imm, nzcv, cond -- encoded as ccmn
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R9, -3, INS_FLAGS_V, INS_COND_EQ); // eq
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R8, -2, INS_FLAGS_C, INS_COND_NE); // ne
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R7, -1, INS_FLAGS_Z, INS_COND_HS); // hs
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R6, -5, INS_FLAGS_N, INS_COND_LO); // lo
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R5, -31, INS_FLAGS_CV, INS_COND_MI); // mi
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R4, -28, INS_FLAGS_ZV, INS_COND_PL); // pl
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R3, -25, INS_FLAGS_ZC, INS_COND_VS); // vs
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R2, -22, INS_FLAGS_NV, INS_COND_VC); // vc
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R1, -19, INS_FLAGS_NC, INS_COND_HI); // hi
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R0, -16, INS_FLAGS_NZ, INS_COND_LS); // ls
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R9, -13, INS_FLAGS_NONE, INS_COND_GE); // ge
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R8, -10, INS_FLAGS_NZV, INS_COND_LT); // lt
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R7, -7, INS_FLAGS_NZC, INS_COND_GT); // gt
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R6, -4, INS_FLAGS_NZCV, INS_COND_LE); // le
// ccmn reg1, reg2, nzcv, cond
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R9, REG_R3, INS_FLAGS_V, INS_COND_EQ); // eq
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R8, REG_R2, INS_FLAGS_C, INS_COND_NE); // ne
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R7, REG_R1, INS_FLAGS_Z, INS_COND_HS); // hs
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R6, REG_R0, INS_FLAGS_N, INS_COND_LO); // lo
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R5, REG_R3, INS_FLAGS_CV, INS_COND_MI); // mi
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R4, REG_R2, INS_FLAGS_ZV, INS_COND_PL); // pl
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R3, REG_R1, INS_FLAGS_ZC, INS_COND_VS); // vs
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R2, REG_R0, INS_FLAGS_NV, INS_COND_VC); // vc
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R1, REG_R3, INS_FLAGS_NC, INS_COND_HI); // hi
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R0, REG_R2, INS_FLAGS_NZ, INS_COND_LS); // ls
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R9, REG_R1, INS_FLAGS_NONE, INS_COND_GE); // ge
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R8, REG_R0, INS_FLAGS_NZV, INS_COND_LT); // lt
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R7, REG_R3, INS_FLAGS_NZC, INS_COND_GT); // gt
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R6, REG_R2, INS_FLAGS_NZCV, INS_COND_LE); // le
// ccmn reg1, imm, nzcv, cond
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R9, 3, INS_FLAGS_V, INS_COND_EQ); // eq
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R8, 2, INS_FLAGS_C, INS_COND_NE); // ne
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R7, 1, INS_FLAGS_Z, INS_COND_HS); // hs
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R6, 0, INS_FLAGS_N, INS_COND_LO); // lo
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R5, 31, INS_FLAGS_CV, INS_COND_MI); // mi
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R4, 28, INS_FLAGS_ZV, INS_COND_PL); // pl
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R3, 25, INS_FLAGS_ZC, INS_COND_VS); // vs
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R2, 22, INS_FLAGS_NV, INS_COND_VC); // vc
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R1, 19, INS_FLAGS_NC, INS_COND_HI); // hi
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R0, 16, INS_FLAGS_NZ, INS_COND_LS); // ls
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R9, 13, INS_FLAGS_NONE, INS_COND_GE); // ge
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R8, 10, INS_FLAGS_NZV, INS_COND_LT); // lt
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R7, 7, INS_FLAGS_NZC, INS_COND_GT); // gt
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R6, 4, INS_FLAGS_NZCV, INS_COND_LE); // le
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// Branch to register
//
genDefineTempLabel(genCreateTempLabel());
theEmitter->emitIns_R(INS_br, EA_PTRSIZE, REG_R8);
theEmitter->emitIns_R(INS_ret, EA_PTRSIZE, REG_R8);
theEmitter->emitIns_R(INS_ret, EA_PTRSIZE, REG_LR);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// Misc
//
genDefineTempLabel(genCreateTempLabel());
theEmitter->emitIns_I(INS_brk, EA_PTRSIZE, 0);
theEmitter->emitIns_I(INS_brk, EA_PTRSIZE, 65535);
theEmitter->emitIns_BARR(INS_dsb, INS_BARRIER_OSHLD);
theEmitter->emitIns_BARR(INS_dmb, INS_BARRIER_OSHST);
theEmitter->emitIns_BARR(INS_isb, INS_BARRIER_OSH);
theEmitter->emitIns_BARR(INS_dmb, INS_BARRIER_NSHLD);
theEmitter->emitIns_BARR(INS_isb, INS_BARRIER_NSHST);
theEmitter->emitIns_BARR(INS_dsb, INS_BARRIER_NSH);
theEmitter->emitIns_BARR(INS_isb, INS_BARRIER_ISHLD);
theEmitter->emitIns_BARR(INS_dsb, INS_BARRIER_ISHST);
theEmitter->emitIns_BARR(INS_dmb, INS_BARRIER_ISH);
theEmitter->emitIns_BARR(INS_dsb, INS_BARRIER_LD);
theEmitter->emitIns_BARR(INS_dmb, INS_BARRIER_ST);
theEmitter->emitIns_BARR(INS_isb, INS_BARRIER_SY);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
////////////////////////////////////////////////////////////////////////////////
//
// SIMD and Floating point
//
////////////////////////////////////////////////////////////////////////////////
//
// Load/Stores vector register
//
genDefineTempLabel(genCreateTempLabel());
// ldr/str Vt, [reg]
theEmitter->emitIns_R_R(INS_ldr, EA_8BYTE, REG_V1, REG_R9);
theEmitter->emitIns_R_R(INS_str, EA_8BYTE, REG_V2, REG_R8);
theEmitter->emitIns_R_R(INS_ldr, EA_4BYTE, REG_V3, REG_R7);
theEmitter->emitIns_R_R(INS_str, EA_4BYTE, REG_V4, REG_R6);
theEmitter->emitIns_R_R(INS_ldr, EA_2BYTE, REG_V5, REG_R5);
theEmitter->emitIns_R_R(INS_str, EA_2BYTE, REG_V6, REG_R4);
theEmitter->emitIns_R_R(INS_ldr, EA_1BYTE, REG_V7, REG_R3);
theEmitter->emitIns_R_R(INS_str, EA_1BYTE, REG_V8, REG_R2);
theEmitter->emitIns_R_R(INS_ldr, EA_16BYTE, REG_V9, REG_R1);
theEmitter->emitIns_R_R(INS_str, EA_16BYTE, REG_V10, REG_R0);
// ldr/str Vt, [reg+cns] -- scaled
theEmitter->emitIns_R_R_I(INS_ldr, EA_1BYTE, REG_V8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_ldr, EA_2BYTE, REG_V8, REG_R9, 2);
theEmitter->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_V8, REG_R9, 4);
theEmitter->emitIns_R_R_I(INS_ldr, EA_8BYTE, REG_V8, REG_R9, 8);
theEmitter->emitIns_R_R_I(INS_ldr, EA_16BYTE, REG_V8, REG_R9, 16);
theEmitter->emitIns_R_R_I(INS_ldr, EA_1BYTE, REG_V7, REG_R10, 1);
theEmitter->emitIns_R_R_I(INS_ldr, EA_2BYTE, REG_V7, REG_R10, 2);
theEmitter->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_V7, REG_R10, 4);
theEmitter->emitIns_R_R_I(INS_ldr, EA_8BYTE, REG_V7, REG_R10, 8);
theEmitter->emitIns_R_R_I(INS_ldr, EA_16BYTE, REG_V7, REG_R10, 16);
// ldr/str Vt, [reg],cns -- post-indexed (unscaled)
// ldr/str Vt, [reg+cns]! -- post-indexed (unscaled)
theEmitter->emitIns_R_R_I(INS_ldr, EA_1BYTE, REG_V8, REG_R9, 1, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I(INS_ldr, EA_2BYTE, REG_V8, REG_R9, 1, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_V8, REG_R9, 1, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I(INS_ldr, EA_8BYTE, REG_V8, REG_R9, 1, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I(INS_ldr, EA_16BYTE, REG_V8, REG_R9, 1, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I(INS_ldr, EA_1BYTE, REG_V8, REG_R9, 1, INS_OPTS_PRE_INDEX);
theEmitter->emitIns_R_R_I(INS_ldr, EA_2BYTE, REG_V8, REG_R9, 1, INS_OPTS_PRE_INDEX);
theEmitter->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_V8, REG_R9, 1, INS_OPTS_PRE_INDEX);
theEmitter->emitIns_R_R_I(INS_ldr, EA_8BYTE, REG_V8, REG_R9, 1, INS_OPTS_PRE_INDEX);
theEmitter->emitIns_R_R_I(INS_ldr, EA_16BYTE, REG_V8, REG_R9, 1, INS_OPTS_PRE_INDEX);
theEmitter->emitIns_R_R_I(INS_str, EA_1BYTE, REG_V8, REG_R9, 1, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I(INS_str, EA_2BYTE, REG_V8, REG_R9, 1, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I(INS_str, EA_4BYTE, REG_V8, REG_R9, 1, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I(INS_str, EA_8BYTE, REG_V8, REG_R9, 1, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I(INS_str, EA_16BYTE, REG_V8, REG_R9, 1, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I(INS_str, EA_1BYTE, REG_V8, REG_R9, 1, INS_OPTS_PRE_INDEX);
theEmitter->emitIns_R_R_I(INS_str, EA_2BYTE, REG_V8, REG_R9, 1, INS_OPTS_PRE_INDEX);
theEmitter->emitIns_R_R_I(INS_str, EA_4BYTE, REG_V8, REG_R9, 1, INS_OPTS_PRE_INDEX);
theEmitter->emitIns_R_R_I(INS_str, EA_8BYTE, REG_V8, REG_R9, 1, INS_OPTS_PRE_INDEX);
theEmitter->emitIns_R_R_I(INS_str, EA_16BYTE, REG_V8, REG_R9, 1, INS_OPTS_PRE_INDEX);
theEmitter->emitIns_R_R_I(INS_ldur, EA_1BYTE, REG_V8, REG_R9, 2);
theEmitter->emitIns_R_R_I(INS_ldur, EA_2BYTE, REG_V8, REG_R9, 3);
theEmitter->emitIns_R_R_I(INS_ldur, EA_4BYTE, REG_V8, REG_R9, 5);
theEmitter->emitIns_R_R_I(INS_ldur, EA_8BYTE, REG_V8, REG_R9, 9);
theEmitter->emitIns_R_R_I(INS_ldur, EA_16BYTE, REG_V8, REG_R9, 17);
theEmitter->emitIns_R_R_I(INS_stur, EA_1BYTE, REG_V7, REG_R10, 2);
theEmitter->emitIns_R_R_I(INS_stur, EA_2BYTE, REG_V7, REG_R10, 3);
theEmitter->emitIns_R_R_I(INS_stur, EA_4BYTE, REG_V7, REG_R10, 5);
theEmitter->emitIns_R_R_I(INS_stur, EA_8BYTE, REG_V7, REG_R10, 9);
theEmitter->emitIns_R_R_I(INS_stur, EA_16BYTE, REG_V7, REG_R10, 17);
// load/store pair
theEmitter->emitIns_R_R_R(INS_ldnp, EA_8BYTE, REG_V0, REG_V1, REG_R10);
theEmitter->emitIns_R_R_R_I(INS_stnp, EA_8BYTE, REG_V1, REG_V2, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_ldnp, EA_8BYTE, REG_V2, REG_V3, REG_R10, 8);
theEmitter->emitIns_R_R_R_I(INS_stnp, EA_8BYTE, REG_V3, REG_V4, REG_R10, 24);
theEmitter->emitIns_R_R_R(INS_ldnp, EA_4BYTE, REG_V4, REG_V5, REG_SP);
theEmitter->emitIns_R_R_R_I(INS_stnp, EA_4BYTE, REG_V5, REG_V6, REG_SP, 0);
theEmitter->emitIns_R_R_R_I(INS_ldnp, EA_4BYTE, REG_V6, REG_V7, REG_SP, 4);
theEmitter->emitIns_R_R_R_I(INS_stnp, EA_4BYTE, REG_V7, REG_V8, REG_SP, 12);
theEmitter->emitIns_R_R_R(INS_ldnp, EA_16BYTE, REG_V8, REG_V9, REG_R10);
theEmitter->emitIns_R_R_R_I(INS_stnp, EA_16BYTE, REG_V9, REG_V10, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_ldnp, EA_16BYTE, REG_V10, REG_V11, REG_R10, 16);
theEmitter->emitIns_R_R_R_I(INS_stnp, EA_16BYTE, REG_V11, REG_V12, REG_R10, 48);
theEmitter->emitIns_R_R_R(INS_ldp, EA_8BYTE, REG_V0, REG_V1, REG_R10);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_V1, REG_V2, REG_SP, 0);
theEmitter->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, REG_V2, REG_V3, REG_SP, 8);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_V3, REG_V4, REG_R10, 16);
theEmitter->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, REG_V4, REG_V5, REG_R10, 24, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_V5, REG_V6, REG_SP, 32, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, REG_V6, REG_V7, REG_SP, 40, INS_OPTS_PRE_INDEX);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_V7, REG_V8, REG_R10, 48, INS_OPTS_PRE_INDEX);
theEmitter->emitIns_R_R_R(INS_ldp, EA_4BYTE, REG_V0, REG_V1, REG_R10);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_4BYTE, REG_V1, REG_V2, REG_SP, 0);
theEmitter->emitIns_R_R_R_I(INS_ldp, EA_4BYTE, REG_V2, REG_V3, REG_SP, 4);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_4BYTE, REG_V3, REG_V4, REG_R10, 8);
theEmitter->emitIns_R_R_R_I(INS_ldp, EA_4BYTE, REG_V4, REG_V5, REG_R10, 12, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_4BYTE, REG_V5, REG_V6, REG_SP, 16, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_ldp, EA_4BYTE, REG_V6, REG_V7, REG_SP, 20, INS_OPTS_PRE_INDEX);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_4BYTE, REG_V7, REG_V8, REG_R10, 24, INS_OPTS_PRE_INDEX);
theEmitter->emitIns_R_R_R(INS_ldp, EA_16BYTE, REG_V0, REG_V1, REG_R10);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_16BYTE, REG_V1, REG_V2, REG_SP, 0);
theEmitter->emitIns_R_R_R_I(INS_ldp, EA_16BYTE, REG_V2, REG_V3, REG_SP, 16);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_16BYTE, REG_V3, REG_V4, REG_R10, 32);
theEmitter->emitIns_R_R_R_I(INS_ldp, EA_16BYTE, REG_V4, REG_V5, REG_R10, 48, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_16BYTE, REG_V5, REG_V6, REG_SP, 64, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_ldp, EA_16BYTE, REG_V6, REG_V7, REG_SP, 80, INS_OPTS_PRE_INDEX);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_16BYTE, REG_V7, REG_V8, REG_R10, 96, INS_OPTS_PRE_INDEX);
// LDR (register)
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V1, REG_SP, REG_R9);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V2, REG_R7, REG_R9, INS_OPTS_LSL);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V3, REG_R7, REG_R9, INS_OPTS_LSL, 3);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V4, REG_R7, REG_R9, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V5, REG_R7, REG_R9, INS_OPTS_SXTW, 3);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V6, REG_SP, REG_R9, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V7, REG_R7, REG_R9, INS_OPTS_UXTW, 3);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V8, REG_R7, REG_R9, INS_OPTS_SXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V9, REG_R7, REG_R9, INS_OPTS_SXTX, 3);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V10, REG_R7, REG_R9, INS_OPTS_UXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V11, REG_SP, REG_R9, INS_OPTS_UXTX, 3);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V1, REG_SP, REG_R9);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V2, REG_R7, REG_R9, INS_OPTS_LSL);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V3, REG_R7, REG_R9, INS_OPTS_LSL, 2);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V4, REG_R7, REG_R9, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V5, REG_R7, REG_R9, INS_OPTS_SXTW, 2);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V6, REG_SP, REG_R9, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V7, REG_R7, REG_R9, INS_OPTS_UXTW, 2);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V8, REG_R7, REG_R9, INS_OPTS_SXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V9, REG_R7, REG_R9, INS_OPTS_SXTX, 2);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V10, REG_R7, REG_R9, INS_OPTS_UXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V11, REG_SP, REG_R9, INS_OPTS_UXTX, 2);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V1, REG_SP, REG_R9);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V2, REG_R7, REG_R9, INS_OPTS_LSL);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V3, REG_R7, REG_R9, INS_OPTS_LSL, 4);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V4, REG_R7, REG_R9, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V5, REG_R7, REG_R9, INS_OPTS_SXTW, 4);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V6, REG_SP, REG_R9, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V7, REG_R7, REG_R9, INS_OPTS_UXTW, 4);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V8, REG_R7, REG_R9, INS_OPTS_SXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V9, REG_R7, REG_R9, INS_OPTS_SXTX, 4);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V10, REG_R7, REG_R9, INS_OPTS_UXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V11, REG_SP, REG_R9, INS_OPTS_UXTX, 4);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V1, REG_SP, REG_R9);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V2, REG_R7, REG_R9, INS_OPTS_LSL);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V3, REG_R7, REG_R9, INS_OPTS_LSL, 1);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V4, REG_R7, REG_R9, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V5, REG_R7, REG_R9, INS_OPTS_SXTW, 1);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V6, REG_SP, REG_R9, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V7, REG_R7, REG_R9, INS_OPTS_UXTW, 1);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V8, REG_R7, REG_R9, INS_OPTS_SXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V9, REG_R7, REG_R9, INS_OPTS_SXTX, 1);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V10, REG_R7, REG_R9, INS_OPTS_UXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V11, REG_SP, REG_R9, INS_OPTS_UXTX, 1);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_1BYTE, REG_V1, REG_R7, REG_R9);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_1BYTE, REG_V2, REG_SP, REG_R9, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_1BYTE, REG_V3, REG_R7, REG_R9, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_1BYTE, REG_V4, REG_SP, REG_R9, INS_OPTS_SXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_1BYTE, REG_V5, REG_R7, REG_R9, INS_OPTS_UXTX);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_R mov and aliases for mov
//
// mov vector to vector
theEmitter->emitIns_Mov(INS_mov, EA_8BYTE, REG_V0, REG_V1, /* canSkip */ false);
theEmitter->emitIns_Mov(INS_mov, EA_16BYTE, REG_V2, REG_V3, /* canSkip */ false);
theEmitter->emitIns_Mov(INS_mov, EA_4BYTE, REG_V12, REG_V13, /* canSkip */ false);
theEmitter->emitIns_Mov(INS_mov, EA_2BYTE, REG_V14, REG_V15, /* canSkip */ false);
theEmitter->emitIns_Mov(INS_mov, EA_1BYTE, REG_V16, REG_V17, /* canSkip */ false);
// mov vector to general
theEmitter->emitIns_Mov(INS_mov, EA_8BYTE, REG_R0, REG_V4, /* canSkip */ false);
theEmitter->emitIns_Mov(INS_mov, EA_4BYTE, REG_R1, REG_V5, /* canSkip */ false);
theEmitter->emitIns_Mov(INS_mov, EA_2BYTE, REG_R2, REG_V6, /* canSkip */ false);
theEmitter->emitIns_Mov(INS_mov, EA_1BYTE, REG_R3, REG_V7, /* canSkip */ false);
// mov general to vector
theEmitter->emitIns_Mov(INS_mov, EA_8BYTE, REG_V8, REG_R4, /* canSkip */ false);
theEmitter->emitIns_Mov(INS_mov, EA_4BYTE, REG_V9, REG_R5, /* canSkip */ false);
theEmitter->emitIns_Mov(INS_mov, EA_2BYTE, REG_V10, REG_R6, /* canSkip */ false);
theEmitter->emitIns_Mov(INS_mov, EA_1BYTE, REG_V11, REG_R7, /* canSkip */ false);
// mov vector[index] to vector
theEmitter->emitIns_R_R_I(INS_mov, EA_8BYTE, REG_V0, REG_V1, 1);
theEmitter->emitIns_R_R_I(INS_mov, EA_4BYTE, REG_V2, REG_V3, 3);
theEmitter->emitIns_R_R_I(INS_mov, EA_2BYTE, REG_V4, REG_V5, 7);
theEmitter->emitIns_R_R_I(INS_mov, EA_1BYTE, REG_V6, REG_V7, 15);
// mov to general from vector[index]
theEmitter->emitIns_R_R_I(INS_mov, EA_8BYTE, REG_R8, REG_V16, 1);
theEmitter->emitIns_R_R_I(INS_mov, EA_4BYTE, REG_R9, REG_V17, 2);
theEmitter->emitIns_R_R_I(INS_mov, EA_2BYTE, REG_R10, REG_V18, 3);
theEmitter->emitIns_R_R_I(INS_mov, EA_1BYTE, REG_R11, REG_V19, 4);
// mov to vector[index] from general
theEmitter->emitIns_R_R_I(INS_mov, EA_8BYTE, REG_V20, REG_R12, 1);
theEmitter->emitIns_R_R_I(INS_mov, EA_4BYTE, REG_V21, REG_R13, 2);
theEmitter->emitIns_R_R_I(INS_mov, EA_2BYTE, REG_V22, REG_R14, 6);
theEmitter->emitIns_R_R_I(INS_mov, EA_1BYTE, REG_V23, REG_R15, 8);
// mov vector[index] to vector[index2]
theEmitter->emitIns_R_R_I_I(INS_mov, EA_8BYTE, REG_V8, REG_V9, 1, 0);
theEmitter->emitIns_R_R_I_I(INS_mov, EA_4BYTE, REG_V10, REG_V11, 2, 1);
theEmitter->emitIns_R_R_I_I(INS_mov, EA_2BYTE, REG_V12, REG_V13, 5, 2);
theEmitter->emitIns_R_R_I_I(INS_mov, EA_1BYTE, REG_V14, REG_V15, 12, 3);
//////////////////////////////////////////////////////////////////////////////////
// mov/dup scalar
theEmitter->emitIns_R_R_I(INS_dup, EA_8BYTE, REG_V24, REG_V25, 1);
theEmitter->emitIns_R_R_I(INS_dup, EA_4BYTE, REG_V26, REG_V27, 3);
theEmitter->emitIns_R_R_I(INS_dup, EA_2BYTE, REG_V28, REG_V29, 7);
theEmitter->emitIns_R_R_I(INS_dup, EA_1BYTE, REG_V30, REG_V31, 15);
// mov/ins vector element
theEmitter->emitIns_R_R_I_I(INS_ins, EA_8BYTE, REG_V0, REG_V1, 0, 1);
theEmitter->emitIns_R_R_I_I(INS_ins, EA_4BYTE, REG_V2, REG_V3, 2, 2);
theEmitter->emitIns_R_R_I_I(INS_ins, EA_2BYTE, REG_V4, REG_V5, 4, 3);
theEmitter->emitIns_R_R_I_I(INS_ins, EA_1BYTE, REG_V6, REG_V7, 8, 4);
// umov to general from vector element
theEmitter->emitIns_R_R_I(INS_umov, EA_8BYTE, REG_R0, REG_V8, 1);
theEmitter->emitIns_R_R_I(INS_umov, EA_4BYTE, REG_R1, REG_V9, 2);
theEmitter->emitIns_R_R_I(INS_umov, EA_2BYTE, REG_R2, REG_V10, 4);
theEmitter->emitIns_R_R_I(INS_umov, EA_1BYTE, REG_R3, REG_V11, 8);
// ins to vector element from general
theEmitter->emitIns_R_R_I(INS_ins, EA_8BYTE, REG_V12, REG_R4, 1);
theEmitter->emitIns_R_R_I(INS_ins, EA_4BYTE, REG_V13, REG_R5, 3);
theEmitter->emitIns_R_R_I(INS_ins, EA_2BYTE, REG_V14, REG_R6, 7);
theEmitter->emitIns_R_R_I(INS_ins, EA_1BYTE, REG_V15, REG_R7, 15);
// smov to general from vector element
theEmitter->emitIns_R_R_I(INS_smov, EA_4BYTE, REG_R5, REG_V17, 2);
theEmitter->emitIns_R_R_I(INS_smov, EA_2BYTE, REG_R6, REG_V18, 4);
theEmitter->emitIns_R_R_I(INS_smov, EA_1BYTE, REG_R7, REG_V19, 8);
// ext extract vector from pair of vectors
theEmitter->emitIns_R_R_R_I(INS_ext, EA_8BYTE, REG_V0, REG_V1, REG_V2, 3, INS_OPTS_8B);
theEmitter->emitIns_R_R_R_I(INS_ext, EA_8BYTE, REG_V4, REG_V5, REG_V6, 7, INS_OPTS_8B);
theEmitter->emitIns_R_R_R_I(INS_ext, EA_16BYTE, REG_V8, REG_V9, REG_V10, 11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R_I(INS_ext, EA_16BYTE, REG_V12, REG_V13, REG_V14, 15, INS_OPTS_16B);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_I movi and mvni
//
// movi imm8 (vector)
theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V0, 0x00, INS_OPTS_8B);
theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V1, 0xFF, INS_OPTS_8B);
theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V2, 0x00, INS_OPTS_16B);
theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V3, 0xFF, INS_OPTS_16B);
theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V4, 0x007F, INS_OPTS_4H);
theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V5, 0x7F00, INS_OPTS_4H); // LSL 8
theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V6, 0x003F, INS_OPTS_8H);
theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V7, 0x3F00, INS_OPTS_8H); // LSL 8
theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V8, 0x1F, INS_OPTS_2S);
theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V9, 0x1F00, INS_OPTS_2S); // LSL 8
theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V10, 0x1F0000, INS_OPTS_2S); // LSL 16
theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V11, 0x1F000000, INS_OPTS_2S); // LSL 24
theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V12, 0x1FFF, INS_OPTS_2S); // MSL 8
theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V13, 0x1FFFFF, INS_OPTS_2S); // MSL 16
theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V14, 0x37, INS_OPTS_4S);
theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V15, 0x3700, INS_OPTS_4S); // LSL 8
theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V16, 0x370000, INS_OPTS_4S); // LSL 16
theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V17, 0x37000000, INS_OPTS_4S); // LSL 24
theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V18, 0x37FF, INS_OPTS_4S); // MSL 8
theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V19, 0x37FFFF, INS_OPTS_4S); // MSL 16
theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V20, 0xFF80, INS_OPTS_4H); // mvni
theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V21, 0xFFC0, INS_OPTS_8H); // mvni
theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V22, 0xFFFFFFE0, INS_OPTS_2S); // mvni
theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V23, 0xFFFFF0FF, INS_OPTS_4S); // mvni LSL 8
theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V24, 0xFFF8FFFF, INS_OPTS_2S); // mvni LSL 16
theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V25, 0xFCFFFFFF, INS_OPTS_4S); // mvni LSL 24
theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V26, 0xFFFFFE00, INS_OPTS_2S); // mvni MSL 8
theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V27, 0xFFFC0000, INS_OPTS_4S); // mvni MSL 16
theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V28, 0x00FF00FF00FF00FF, INS_OPTS_1D);
theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V29, 0x00FFFF0000FFFF00, INS_OPTS_2D);
theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V30, 0xFF000000FF000000);
theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V31, 0x0, INS_OPTS_2D);
// We were not encoding immediate of movi that was int.MaxValue or int.MaxValue / 2.
theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V16, 0x7fffffff, INS_OPTS_2S);
theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V16, 0x3fffffff, INS_OPTS_2S);
theEmitter->emitIns_R_I(INS_mvni, EA_8BYTE, REG_V0, 0x0022, INS_OPTS_4H);
theEmitter->emitIns_R_I(INS_mvni, EA_8BYTE, REG_V1, 0x2200, INS_OPTS_4H); // LSL 8
theEmitter->emitIns_R_I(INS_mvni, EA_16BYTE, REG_V2, 0x0033, INS_OPTS_8H);
theEmitter->emitIns_R_I(INS_mvni, EA_16BYTE, REG_V3, 0x3300, INS_OPTS_8H); // LSL 8
theEmitter->emitIns_R_I(INS_mvni, EA_8BYTE, REG_V4, 0x42, INS_OPTS_2S);
theEmitter->emitIns_R_I(INS_mvni, EA_8BYTE, REG_V5, 0x4200, INS_OPTS_2S); // LSL 8
theEmitter->emitIns_R_I(INS_mvni, EA_8BYTE, REG_V6, 0x420000, INS_OPTS_2S); // LSL 16
theEmitter->emitIns_R_I(INS_mvni, EA_8BYTE, REG_V7, 0x42000000, INS_OPTS_2S); // LSL 24
theEmitter->emitIns_R_I(INS_mvni, EA_8BYTE, REG_V8, 0x42FF, INS_OPTS_2S); // MSL 8
theEmitter->emitIns_R_I(INS_mvni, EA_8BYTE, REG_V9, 0x42FFFF, INS_OPTS_2S); // MSL 16
theEmitter->emitIns_R_I(INS_mvni, EA_16BYTE, REG_V10, 0x5D, INS_OPTS_4S);
theEmitter->emitIns_R_I(INS_mvni, EA_16BYTE, REG_V11, 0x5D00, INS_OPTS_4S); // LSL 8
theEmitter->emitIns_R_I(INS_mvni, EA_16BYTE, REG_V12, 0x5D0000, INS_OPTS_4S); // LSL 16
theEmitter->emitIns_R_I(INS_mvni, EA_16BYTE, REG_V13, 0x5D000000, INS_OPTS_4S); // LSL 24
theEmitter->emitIns_R_I(INS_mvni, EA_16BYTE, REG_V14, 0x5DFF, INS_OPTS_4S); // MSL 8
theEmitter->emitIns_R_I(INS_mvni, EA_16BYTE, REG_V15, 0x5DFFFF, INS_OPTS_4S); // MSL 16
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_I orr/bic vector immediate
//
theEmitter->emitIns_R_I(INS_orr, EA_8BYTE, REG_V0, 0x0022, INS_OPTS_4H);
theEmitter->emitIns_R_I(INS_orr, EA_8BYTE, REG_V1, 0x2200, INS_OPTS_4H); // LSL 8
theEmitter->emitIns_R_I(INS_orr, EA_16BYTE, REG_V2, 0x0033, INS_OPTS_8H);
theEmitter->emitIns_R_I(INS_orr, EA_16BYTE, REG_V3, 0x3300, INS_OPTS_8H); // LSL 8
theEmitter->emitIns_R_I(INS_orr, EA_8BYTE, REG_V4, 0x42, INS_OPTS_2S);
theEmitter->emitIns_R_I(INS_orr, EA_8BYTE, REG_V5, 0x4200, INS_OPTS_2S); // LSL 8
theEmitter->emitIns_R_I(INS_orr, EA_8BYTE, REG_V6, 0x420000, INS_OPTS_2S); // LSL 16
theEmitter->emitIns_R_I(INS_orr, EA_8BYTE, REG_V7, 0x42000000, INS_OPTS_2S); // LSL 24
theEmitter->emitIns_R_I(INS_orr, EA_16BYTE, REG_V10, 0x5D, INS_OPTS_4S);
theEmitter->emitIns_R_I(INS_orr, EA_16BYTE, REG_V11, 0x5D00, INS_OPTS_4S); // LSL 8
theEmitter->emitIns_R_I(INS_orr, EA_16BYTE, REG_V12, 0x5D0000, INS_OPTS_4S); // LSL 16
theEmitter->emitIns_R_I(INS_orr, EA_16BYTE, REG_V13, 0x5D000000, INS_OPTS_4S); // LSL 24
theEmitter->emitIns_R_I(INS_bic, EA_8BYTE, REG_V0, 0x0022, INS_OPTS_4H);
theEmitter->emitIns_R_I(INS_bic, EA_8BYTE, REG_V1, 0x2200, INS_OPTS_4H); // LSL 8
theEmitter->emitIns_R_I(INS_bic, EA_16BYTE, REG_V2, 0x0033, INS_OPTS_8H);
theEmitter->emitIns_R_I(INS_bic, EA_16BYTE, REG_V3, 0x3300, INS_OPTS_8H); // LSL 8
theEmitter->emitIns_R_I(INS_bic, EA_8BYTE, REG_V4, 0x42, INS_OPTS_2S);
theEmitter->emitIns_R_I(INS_bic, EA_8BYTE, REG_V5, 0x4200, INS_OPTS_2S); // LSL 8
theEmitter->emitIns_R_I(INS_bic, EA_8BYTE, REG_V6, 0x420000, INS_OPTS_2S); // LSL 16
theEmitter->emitIns_R_I(INS_bic, EA_8BYTE, REG_V7, 0x42000000, INS_OPTS_2S); // LSL 24
theEmitter->emitIns_R_I(INS_bic, EA_16BYTE, REG_V10, 0x5D, INS_OPTS_4S);
theEmitter->emitIns_R_I(INS_bic, EA_16BYTE, REG_V11, 0x5D00, INS_OPTS_4S); // LSL 8
theEmitter->emitIns_R_I(INS_bic, EA_16BYTE, REG_V12, 0x5D0000, INS_OPTS_4S); // LSL 16
theEmitter->emitIns_R_I(INS_bic, EA_16BYTE, REG_V13, 0x5D000000, INS_OPTS_4S); // LSL 24
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_F cmp/fmov immediate
//
// fmov imm8 (scalar)
theEmitter->emitIns_R_F(INS_fmov, EA_8BYTE, REG_V14, 1.0);
theEmitter->emitIns_R_F(INS_fmov, EA_4BYTE, REG_V15, -1.0);
theEmitter->emitIns_R_F(INS_fmov, EA_4BYTE, REG_V0, 2.0); // encodes imm8 == 0
theEmitter->emitIns_R_F(INS_fmov, EA_4BYTE, REG_V16, 10.0);
theEmitter->emitIns_R_F(INS_fmov, EA_8BYTE, REG_V17, -10.0);
theEmitter->emitIns_R_F(INS_fmov, EA_8BYTE, REG_V18, 31); // Largest encodable value
theEmitter->emitIns_R_F(INS_fmov, EA_4BYTE, REG_V19, -31);
theEmitter->emitIns_R_F(INS_fmov, EA_4BYTE, REG_V20, 1.25);
theEmitter->emitIns_R_F(INS_fmov, EA_8BYTE, REG_V21, -1.25);
theEmitter->emitIns_R_F(INS_fmov, EA_8BYTE, REG_V22, 0.125); // Smallest encodable value
theEmitter->emitIns_R_F(INS_fmov, EA_4BYTE, REG_V23, -0.125);
// fmov imm8 (vector)
theEmitter->emitIns_R_F(INS_fmov, EA_8BYTE, REG_V0, 2.0, INS_OPTS_2S);
theEmitter->emitIns_R_F(INS_fmov, EA_8BYTE, REG_V24, 1.0, INS_OPTS_2S);
theEmitter->emitIns_R_F(INS_fmov, EA_16BYTE, REG_V25, 1.0, INS_OPTS_4S);
theEmitter->emitIns_R_F(INS_fmov, EA_16BYTE, REG_V26, 1.0, INS_OPTS_2D);
theEmitter->emitIns_R_F(INS_fmov, EA_8BYTE, REG_V27, -10.0, INS_OPTS_2S);
theEmitter->emitIns_R_F(INS_fmov, EA_16BYTE, REG_V28, -10.0, INS_OPTS_4S);
theEmitter->emitIns_R_F(INS_fmov, EA_16BYTE, REG_V29, -10.0, INS_OPTS_2D);
theEmitter->emitIns_R_F(INS_fmov, EA_8BYTE, REG_V30, 31.0, INS_OPTS_2S);
theEmitter->emitIns_R_F(INS_fmov, EA_16BYTE, REG_V31, 31.0, INS_OPTS_4S);
theEmitter->emitIns_R_F(INS_fmov, EA_16BYTE, REG_V0, 31.0, INS_OPTS_2D);
theEmitter->emitIns_R_F(INS_fmov, EA_8BYTE, REG_V1, -0.125, INS_OPTS_2S);
theEmitter->emitIns_R_F(INS_fmov, EA_16BYTE, REG_V2, -0.125, INS_OPTS_4S);
theEmitter->emitIns_R_F(INS_fmov, EA_16BYTE, REG_V3, -0.125, INS_OPTS_2D);
// fcmp with 0.0
theEmitter->emitIns_R_F(INS_fcmp, EA_8BYTE, REG_V12, 0.0);
theEmitter->emitIns_R_F(INS_fcmp, EA_4BYTE, REG_V13, 0.0);
theEmitter->emitIns_R_F(INS_fcmpe, EA_8BYTE, REG_V14, 0.0);
theEmitter->emitIns_R_F(INS_fcmpe, EA_4BYTE, REG_V15, 0.0);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_R cmeq/fmov/fcmp/fcvt
//
// cmeq scalar
theEmitter->emitIns_R_R(INS_cmeq, EA_8BYTE, REG_V0, REG_V1);
// fmov to vector to vector
theEmitter->emitIns_Mov(INS_fmov, EA_8BYTE, REG_V0, REG_V2, /* canSkip */ false);
theEmitter->emitIns_Mov(INS_fmov, EA_4BYTE, REG_V1, REG_V3, /* canSkip */ false);
// fmov to vector to general
theEmitter->emitIns_Mov(INS_fmov, EA_8BYTE, REG_R0, REG_V4, /* canSkip */ false);
theEmitter->emitIns_Mov(INS_fmov, EA_4BYTE, REG_R1, REG_V5, /* canSkip */ false);
// using the optional conversion specifier
theEmitter->emitIns_Mov(INS_fmov, EA_8BYTE, REG_R2, REG_V6, /* canSkip */ false, INS_OPTS_D_TO_8BYTE);
theEmitter->emitIns_Mov(INS_fmov, EA_4BYTE, REG_R3, REG_V7, /* canSkip */ false, INS_OPTS_S_TO_4BYTE);
// fmov to general to vector
theEmitter->emitIns_Mov(INS_fmov, EA_8BYTE, REG_V8, REG_R4, /* canSkip */ false);
theEmitter->emitIns_Mov(INS_fmov, EA_4BYTE, REG_V9, REG_R5, /* canSkip */ false);
// using the optional conversion specifier
theEmitter->emitIns_Mov(INS_fmov, EA_4BYTE, REG_V11, REG_R7, /* canSkip */ false, INS_OPTS_4BYTE_TO_S);
theEmitter->emitIns_Mov(INS_fmov, EA_8BYTE, REG_V10, REG_R6, /* canSkip */ false, INS_OPTS_8BYTE_TO_D);
// fcmp/fcmpe
theEmitter->emitIns_R_R(INS_fcmp, EA_8BYTE, REG_V8, REG_V16);
theEmitter->emitIns_R_R(INS_fcmp, EA_4BYTE, REG_V9, REG_V17);
theEmitter->emitIns_R_R(INS_fcmpe, EA_8BYTE, REG_V10, REG_V18);
theEmitter->emitIns_R_R(INS_fcmpe, EA_4BYTE, REG_V11, REG_V19);
// fcvt
theEmitter->emitIns_R_R(INS_fcvt, EA_8BYTE, REG_V24, REG_V25, INS_OPTS_S_TO_D); // Single to Double
theEmitter->emitIns_R_R(INS_fcvt, EA_4BYTE, REG_V26, REG_V27, INS_OPTS_D_TO_S); // Double to Single
theEmitter->emitIns_R_R(INS_fcvt, EA_4BYTE, REG_V1, REG_V2, INS_OPTS_H_TO_S);
theEmitter->emitIns_R_R(INS_fcvt, EA_8BYTE, REG_V3, REG_V4, INS_OPTS_H_TO_D);
theEmitter->emitIns_R_R(INS_fcvt, EA_2BYTE, REG_V5, REG_V6, INS_OPTS_S_TO_H);
theEmitter->emitIns_R_R(INS_fcvt, EA_2BYTE, REG_V7, REG_V8, INS_OPTS_D_TO_H);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_R floating point conversions
//
// fcvtas scalar
theEmitter->emitIns_R_R(INS_fcvtas, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_fcvtas, EA_8BYTE, REG_V2, REG_V3);
// fcvtas scalar to general
theEmitter->emitIns_R_R(INS_fcvtas, EA_4BYTE, REG_R0, REG_V4, INS_OPTS_S_TO_4BYTE);
theEmitter->emitIns_R_R(INS_fcvtas, EA_4BYTE, REG_R1, REG_V5, INS_OPTS_D_TO_4BYTE);
theEmitter->emitIns_R_R(INS_fcvtas, EA_8BYTE, REG_R2, REG_V6, INS_OPTS_S_TO_8BYTE);
theEmitter->emitIns_R_R(INS_fcvtas, EA_8BYTE, REG_R3, REG_V7, INS_OPTS_D_TO_8BYTE);
// fcvtas vector
theEmitter->emitIns_R_R(INS_fcvtas, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fcvtas, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_fcvtas, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
// fcvtau scalar
theEmitter->emitIns_R_R(INS_fcvtau, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_fcvtau, EA_8BYTE, REG_V2, REG_V3);
// fcvtau scalar to general
theEmitter->emitIns_R_R(INS_fcvtau, EA_4BYTE, REG_R0, REG_V4, INS_OPTS_S_TO_4BYTE);
theEmitter->emitIns_R_R(INS_fcvtau, EA_4BYTE, REG_R1, REG_V5, INS_OPTS_D_TO_4BYTE);
theEmitter->emitIns_R_R(INS_fcvtau, EA_8BYTE, REG_R2, REG_V6, INS_OPTS_S_TO_8BYTE);
theEmitter->emitIns_R_R(INS_fcvtau, EA_8BYTE, REG_R3, REG_V7, INS_OPTS_D_TO_8BYTE);
// fcvtau vector
theEmitter->emitIns_R_R(INS_fcvtau, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fcvtau, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_fcvtau, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
////////////////////////////////////////////////////////////////////////////////
// fcvtms scalar
theEmitter->emitIns_R_R(INS_fcvtms, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_fcvtms, EA_8BYTE, REG_V2, REG_V3);
// fcvtms scalar to general
theEmitter->emitIns_R_R(INS_fcvtms, EA_4BYTE, REG_R0, REG_V4, INS_OPTS_S_TO_4BYTE);
theEmitter->emitIns_R_R(INS_fcvtms, EA_4BYTE, REG_R1, REG_V5, INS_OPTS_D_TO_4BYTE);
theEmitter->emitIns_R_R(INS_fcvtms, EA_8BYTE, REG_R2, REG_V6, INS_OPTS_S_TO_8BYTE);
theEmitter->emitIns_R_R(INS_fcvtms, EA_8BYTE, REG_R3, REG_V7, INS_OPTS_D_TO_8BYTE);
// fcvtms vector
theEmitter->emitIns_R_R(INS_fcvtms, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fcvtms, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_fcvtms, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
// fcvtmu scalar
theEmitter->emitIns_R_R(INS_fcvtmu, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_fcvtmu, EA_8BYTE, REG_V2, REG_V3);
// fcvtmu scalar to general
theEmitter->emitIns_R_R(INS_fcvtmu, EA_4BYTE, REG_R0, REG_V4, INS_OPTS_S_TO_4BYTE);
theEmitter->emitIns_R_R(INS_fcvtmu, EA_4BYTE, REG_R1, REG_V5, INS_OPTS_D_TO_4BYTE);
theEmitter->emitIns_R_R(INS_fcvtmu, EA_8BYTE, REG_R2, REG_V6, INS_OPTS_S_TO_8BYTE);
theEmitter->emitIns_R_R(INS_fcvtmu, EA_8BYTE, REG_R3, REG_V7, INS_OPTS_D_TO_8BYTE);
// fcvtmu vector
theEmitter->emitIns_R_R(INS_fcvtmu, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fcvtmu, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_fcvtmu, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
////////////////////////////////////////////////////////////////////////////////
// fcvtns scalar
theEmitter->emitIns_R_R(INS_fcvtns, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_fcvtns, EA_8BYTE, REG_V2, REG_V3);
// fcvtns scalar to general
theEmitter->emitIns_R_R(INS_fcvtns, EA_4BYTE, REG_R0, REG_V4, INS_OPTS_S_TO_4BYTE);
theEmitter->emitIns_R_R(INS_fcvtns, EA_4BYTE, REG_R1, REG_V5, INS_OPTS_D_TO_4BYTE);
theEmitter->emitIns_R_R(INS_fcvtns, EA_8BYTE, REG_R2, REG_V6, INS_OPTS_S_TO_8BYTE);
theEmitter->emitIns_R_R(INS_fcvtns, EA_8BYTE, REG_R3, REG_V7, INS_OPTS_D_TO_8BYTE);
// fcvtns vector
theEmitter->emitIns_R_R(INS_fcvtns, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fcvtns, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_fcvtns, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
// fcvtnu scalar
theEmitter->emitIns_R_R(INS_fcvtnu, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_fcvtnu, EA_8BYTE, REG_V2, REG_V3);
// fcvtnu scalar to general
theEmitter->emitIns_R_R(INS_fcvtnu, EA_4BYTE, REG_R0, REG_V4, INS_OPTS_S_TO_4BYTE);
theEmitter->emitIns_R_R(INS_fcvtnu, EA_4BYTE, REG_R1, REG_V5, INS_OPTS_D_TO_4BYTE);
theEmitter->emitIns_R_R(INS_fcvtnu, EA_8BYTE, REG_R2, REG_V6, INS_OPTS_S_TO_8BYTE);
theEmitter->emitIns_R_R(INS_fcvtnu, EA_8BYTE, REG_R3, REG_V7, INS_OPTS_D_TO_8BYTE);
// fcvtnu vector
theEmitter->emitIns_R_R(INS_fcvtnu, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fcvtnu, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_fcvtnu, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
////////////////////////////////////////////////////////////////////////////////
// fcvtps scalar
theEmitter->emitIns_R_R(INS_fcvtps, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_fcvtps, EA_8BYTE, REG_V2, REG_V3);
// fcvtps scalar to general
theEmitter->emitIns_R_R(INS_fcvtps, EA_4BYTE, REG_R0, REG_V4, INS_OPTS_S_TO_4BYTE);
theEmitter->emitIns_R_R(INS_fcvtps, EA_4BYTE, REG_R1, REG_V5, INS_OPTS_D_TO_4BYTE);
theEmitter->emitIns_R_R(INS_fcvtps, EA_8BYTE, REG_R2, REG_V6, INS_OPTS_S_TO_8BYTE);
theEmitter->emitIns_R_R(INS_fcvtps, EA_8BYTE, REG_R3, REG_V7, INS_OPTS_D_TO_8BYTE);
// fcvtps vector
theEmitter->emitIns_R_R(INS_fcvtps, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fcvtps, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_fcvtps, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
// fcvtpu scalar
theEmitter->emitIns_R_R(INS_fcvtpu, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_fcvtpu, EA_8BYTE, REG_V2, REG_V3);
// fcvtpu scalar to general
theEmitter->emitIns_R_R(INS_fcvtpu, EA_4BYTE, REG_R0, REG_V4, INS_OPTS_S_TO_4BYTE);
theEmitter->emitIns_R_R(INS_fcvtpu, EA_4BYTE, REG_R1, REG_V5, INS_OPTS_D_TO_4BYTE);
theEmitter->emitIns_R_R(INS_fcvtpu, EA_8BYTE, REG_R2, REG_V6, INS_OPTS_S_TO_8BYTE);
theEmitter->emitIns_R_R(INS_fcvtpu, EA_8BYTE, REG_R3, REG_V7, INS_OPTS_D_TO_8BYTE);
// fcvtpu vector
theEmitter->emitIns_R_R(INS_fcvtpu, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fcvtpu, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_fcvtpu, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
////////////////////////////////////////////////////////////////////////////////
// fcvtzs scalar
theEmitter->emitIns_R_R(INS_fcvtzs, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_fcvtzs, EA_8BYTE, REG_V2, REG_V3);
// fcvtzs scalar to general
theEmitter->emitIns_R_R(INS_fcvtzs, EA_4BYTE, REG_R0, REG_V4, INS_OPTS_S_TO_4BYTE);
theEmitter->emitIns_R_R(INS_fcvtzs, EA_4BYTE, REG_R1, REG_V5, INS_OPTS_D_TO_4BYTE);
theEmitter->emitIns_R_R(INS_fcvtzs, EA_8BYTE, REG_R2, REG_V6, INS_OPTS_S_TO_8BYTE);
theEmitter->emitIns_R_R(INS_fcvtzs, EA_8BYTE, REG_R3, REG_V7, INS_OPTS_D_TO_8BYTE);
// fcvtzs vector
theEmitter->emitIns_R_R(INS_fcvtzs, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fcvtzs, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_fcvtzs, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
// fcvtzu scalar
theEmitter->emitIns_R_R(INS_fcvtzu, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_fcvtzu, EA_8BYTE, REG_V2, REG_V3);
// fcvtzu scalar to general
theEmitter->emitIns_R_R(INS_fcvtzu, EA_4BYTE, REG_R0, REG_V4, INS_OPTS_S_TO_4BYTE);
theEmitter->emitIns_R_R(INS_fcvtzu, EA_4BYTE, REG_R1, REG_V5, INS_OPTS_D_TO_4BYTE);
theEmitter->emitIns_R_R(INS_fcvtzu, EA_8BYTE, REG_R2, REG_V6, INS_OPTS_S_TO_8BYTE);
theEmitter->emitIns_R_R(INS_fcvtzu, EA_8BYTE, REG_R3, REG_V7, INS_OPTS_D_TO_8BYTE);
// fcvtzu vector
theEmitter->emitIns_R_R(INS_fcvtzu, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fcvtzu, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_fcvtzu, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
////////////////////////////////////////////////////////////////////////////////
// scvtf scalar
theEmitter->emitIns_R_R(INS_scvtf, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_scvtf, EA_8BYTE, REG_V2, REG_V3);
// scvtf scalar from general
theEmitter->emitIns_R_R(INS_scvtf, EA_4BYTE, REG_V4, REG_R0, INS_OPTS_4BYTE_TO_S);
theEmitter->emitIns_R_R(INS_scvtf, EA_4BYTE, REG_V5, REG_R1, INS_OPTS_8BYTE_TO_S);
theEmitter->emitIns_R_R(INS_scvtf, EA_8BYTE, REG_V6, REG_R2, INS_OPTS_4BYTE_TO_D);
theEmitter->emitIns_R_R(INS_scvtf, EA_8BYTE, REG_V7, REG_R3, INS_OPTS_8BYTE_TO_D);
// scvtf vector
theEmitter->emitIns_R_R(INS_scvtf, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_scvtf, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_scvtf, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
// ucvtf scalar
theEmitter->emitIns_R_R(INS_ucvtf, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_ucvtf, EA_8BYTE, REG_V2, REG_V3);
// ucvtf scalar from general
theEmitter->emitIns_R_R(INS_ucvtf, EA_4BYTE, REG_V4, REG_R0, INS_OPTS_4BYTE_TO_S);
theEmitter->emitIns_R_R(INS_ucvtf, EA_4BYTE, REG_V5, REG_R1, INS_OPTS_8BYTE_TO_S);
theEmitter->emitIns_R_R(INS_ucvtf, EA_8BYTE, REG_V6, REG_R2, INS_OPTS_4BYTE_TO_D);
theEmitter->emitIns_R_R(INS_ucvtf, EA_8BYTE, REG_V7, REG_R3, INS_OPTS_8BYTE_TO_D);
// ucvtf vector
theEmitter->emitIns_R_R(INS_ucvtf, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_ucvtf, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_ucvtf, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_R floating point operations, one dest, one source
//
// fabs scalar
theEmitter->emitIns_R_R(INS_fabs, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_fabs, EA_8BYTE, REG_V2, REG_V3);
// fabs vector
theEmitter->emitIns_R_R(INS_fabs, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fabs, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_fabs, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_2D);
// fmaxp scalar
theEmitter->emitIns_R_R(INS_fmaxp, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fmaxp, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_2D);
// fmaxnmp scalar
theEmitter->emitIns_R_R(INS_fmaxnmp, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fmaxnmp, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_2D);
// fmaxnmv vector
theEmitter->emitIns_R_R(INS_fmaxnmv, EA_16BYTE, REG_V0, REG_V1, INS_OPTS_4S);
// fmaxv vector
theEmitter->emitIns_R_R(INS_fmaxv, EA_16BYTE, REG_V0, REG_V1, INS_OPTS_4S);
// fminp scalar
theEmitter->emitIns_R_R(INS_fminp, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fminp, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_2D);
// fminnmp scalar
theEmitter->emitIns_R_R(INS_fminnmp, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fminnmp, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_2D);
// fminnmv vector
theEmitter->emitIns_R_R(INS_fminnmv, EA_16BYTE, REG_V0, REG_V1, INS_OPTS_4S);
// fminv vector
theEmitter->emitIns_R_R(INS_fminv, EA_16BYTE, REG_V0, REG_V1, INS_OPTS_4S);
// fneg scalar
theEmitter->emitIns_R_R(INS_fneg, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_fneg, EA_8BYTE, REG_V2, REG_V3);
// fneg vector
theEmitter->emitIns_R_R(INS_fneg, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fneg, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_fneg, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_2D);
// fsqrt scalar
theEmitter->emitIns_R_R(INS_fsqrt, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_fsqrt, EA_8BYTE, REG_V2, REG_V3);
// fsqrt vector
theEmitter->emitIns_R_R(INS_fsqrt, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fsqrt, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_fsqrt, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_2D);
// faddp scalar
theEmitter->emitIns_R_R(INS_faddp, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_faddp, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_2D);
// fcmeq Vd, Vn, #0.0
theEmitter->emitIns_R_R(INS_fcmeq, EA_4BYTE, REG_V0, REG_V1); // scalar 4BYTE
theEmitter->emitIns_R_R(INS_fcmeq, EA_8BYTE, REG_V2, REG_V3); // scalar 8BYTE
// fcmge Vd, Vn, #0.0
theEmitter->emitIns_R_R(INS_fcmge, EA_4BYTE, REG_V0, REG_V1); // scalar 4BYTE
theEmitter->emitIns_R_R(INS_fcmge, EA_8BYTE, REG_V2, REG_V3); // scalar 8BYTE
// fcmgt Vd, Vn, #0.0
theEmitter->emitIns_R_R(INS_fcmgt, EA_4BYTE, REG_V0, REG_V1); // scalar 4BYTE
theEmitter->emitIns_R_R(INS_fcmgt, EA_8BYTE, REG_V2, REG_V3); // scalar 8BYTE
// fcmle Vd, Vn, #0.0
theEmitter->emitIns_R_R(INS_fcmle, EA_4BYTE, REG_V0, REG_V1); // scalar 4BYTE
theEmitter->emitIns_R_R(INS_fcmle, EA_8BYTE, REG_V2, REG_V3); // scalar 8BYTE
// fcmlt Vd, Vn, #0.0
theEmitter->emitIns_R_R(INS_fcmlt, EA_4BYTE, REG_V0, REG_V1); // scalar 4BYTE
theEmitter->emitIns_R_R(INS_fcmlt, EA_8BYTE, REG_V2, REG_V3); // scalar 8BYTE
// frecpe scalar
theEmitter->emitIns_R_R(INS_frecpe, EA_4BYTE, REG_V0, REG_V1); // scalar 4BYTE
theEmitter->emitIns_R_R(INS_frecpe, EA_8BYTE, REG_V2, REG_V3); // scalar 8BYTE
theEmitter->emitIns_R_R(INS_frecpe, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_frecpe, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_frecpe, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_2D);
// frecpx scalar
theEmitter->emitIns_R_R(INS_frecpx, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_frecpx, EA_8BYTE, REG_V2, REG_V3);
// frsqrte
theEmitter->emitIns_R_R(INS_frsqrte, EA_4BYTE, REG_V0, REG_V1); // scalar 4BYTE
theEmitter->emitIns_R_R(INS_frsqrte, EA_8BYTE, REG_V2, REG_V3); // scalar 8BYTE
theEmitter->emitIns_R_R(INS_frsqrte, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_frsqrte, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_frsqrte, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_2D);
// fcvtl{2} vector
theEmitter->emitIns_R_R(INS_fcvtl, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_fcvtl2, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_fcvtl, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fcvtl2, EA_16BYTE, REG_V5, REG_V6, INS_OPTS_4S);
// fcvtn{2} vector
theEmitter->emitIns_R_R(INS_fcvtn, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_fcvtn2, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_fcvtn, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fcvtn2, EA_16BYTE, REG_V5, REG_V6, INS_OPTS_4S);
// fcvtxn scalar
theEmitter->emitIns_R_R(INS_fcvtxn, EA_4BYTE, REG_V0, REG_V1);
// fcvtxn{2} vector
theEmitter->emitIns_R_R(INS_fcvtxn, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fcvtxn2, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_4S);
#endif
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
genDefineTempLabel(genCreateTempLabel());
// abs scalar
theEmitter->emitIns_R_R(INS_abs, EA_8BYTE, REG_V2, REG_V3);
// abs vector
theEmitter->emitIns_R_R(INS_abs, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_abs, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_abs, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_abs, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_abs, EA_8BYTE, REG_V12, REG_V13, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_abs, EA_16BYTE, REG_V14, REG_V15, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_abs, EA_16BYTE, REG_V16, REG_V17, INS_OPTS_2D);
// addv vector
theEmitter->emitIns_R_R(INS_addv, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_addv, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_addv, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_addv, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_addv, EA_16BYTE, REG_V14, REG_V15, INS_OPTS_4S);
// cnt vector
theEmitter->emitIns_R_R(INS_cnt, EA_8BYTE, REG_V22, REG_V23, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_cnt, EA_16BYTE, REG_V24, REG_V25, INS_OPTS_16B);
// cls vector
theEmitter->emitIns_R_R(INS_cls, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_cls, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_cls, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_cls, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_cls, EA_8BYTE, REG_V12, REG_V13, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_cls, EA_16BYTE, REG_V14, REG_V15, INS_OPTS_4S);
// clz vector
theEmitter->emitIns_R_R(INS_clz, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_clz, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_clz, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_clz, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_clz, EA_8BYTE, REG_V12, REG_V13, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_clz, EA_16BYTE, REG_V14, REG_V15, INS_OPTS_4S);
// mvn vector
theEmitter->emitIns_R_R(INS_mvn, EA_8BYTE, REG_V4, REG_V5);
theEmitter->emitIns_R_R(INS_mvn, EA_8BYTE, REG_V6, REG_V7, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_mvn, EA_16BYTE, REG_V8, REG_V9);
theEmitter->emitIns_R_R(INS_mvn, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_16B);
// neg scalar
theEmitter->emitIns_R_R(INS_neg, EA_8BYTE, REG_V2, REG_V3);
// neg vector
theEmitter->emitIns_R_R(INS_neg, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_neg, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_neg, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_neg, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_neg, EA_8BYTE, REG_V12, REG_V13, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_neg, EA_16BYTE, REG_V14, REG_V15, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_neg, EA_16BYTE, REG_V16, REG_V17, INS_OPTS_2D);
// not vector (the same encoding as mvn)
theEmitter->emitIns_R_R(INS_not, EA_8BYTE, REG_V12, REG_V13);
theEmitter->emitIns_R_R(INS_not, EA_8BYTE, REG_V14, REG_V15, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_not, EA_16BYTE, REG_V16, REG_V17);
theEmitter->emitIns_R_R(INS_not, EA_16BYTE, REG_V18, REG_V19, INS_OPTS_16B);
// rbit vector
theEmitter->emitIns_R_R(INS_rbit, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_rbit, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_16B);
// rev16 vector
theEmitter->emitIns_R_R(INS_rev16, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_rev16, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_16B);
// rev32 vector
theEmitter->emitIns_R_R(INS_rev32, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_rev32, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_rev32, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_rev32, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_8H);
// rev64 vector
theEmitter->emitIns_R_R(INS_rev64, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_rev64, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_rev64, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_rev64, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_rev64, EA_8BYTE, REG_V12, REG_V13, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_rev64, EA_16BYTE, REG_V14, REG_V15, INS_OPTS_4S);
// sadalp vector
theEmitter->emitIns_R_R(INS_sadalp, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_sadalp, EA_8BYTE, REG_V2, REG_V3, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_sadalp, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_sadalp, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_sadalp, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_sadalp, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
// saddlp vector
theEmitter->emitIns_R_R(INS_saddlp, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_saddlp, EA_8BYTE, REG_V2, REG_V3, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_saddlp, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_saddlp, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_saddlp, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_saddlp, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
// saddlv vector
theEmitter->emitIns_R_R(INS_saddlv, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_saddlv, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_saddlv, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_saddlv, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_saddlv, EA_16BYTE, REG_V14, REG_V15, INS_OPTS_4S);
// smaxv vector
theEmitter->emitIns_R_R(INS_smaxv, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_smaxv, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_smaxv, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_smaxv, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_smaxv, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_4S);
// sminv vector
theEmitter->emitIns_R_R(INS_sminv, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_sminv, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_sminv, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_sminv, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_sminv, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_4S);
// sqabs scalar
theEmitter->emitIns_R_R(INS_sqabs, EA_1BYTE, REG_V0, REG_V1, INS_OPTS_NONE);
theEmitter->emitIns_R_R(INS_sqabs, EA_2BYTE, REG_V2, REG_V3, INS_OPTS_NONE);
theEmitter->emitIns_R_R(INS_sqabs, EA_4BYTE, REG_V4, REG_V5, INS_OPTS_NONE);
theEmitter->emitIns_R_R(INS_sqabs, EA_8BYTE, REG_V6, REG_V7, INS_OPTS_NONE);
// sqabs vector
theEmitter->emitIns_R_R(INS_sqabs, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_sqabs, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_sqabs, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_sqabs, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_sqabs, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_sqabs, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_sqabs, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
// sqneg scalar
theEmitter->emitIns_R_R(INS_sqneg, EA_1BYTE, REG_V0, REG_V1, INS_OPTS_NONE);
theEmitter->emitIns_R_R(INS_sqneg, EA_2BYTE, REG_V2, REG_V3, INS_OPTS_NONE);
theEmitter->emitIns_R_R(INS_sqneg, EA_4BYTE, REG_V4, REG_V5, INS_OPTS_NONE);
theEmitter->emitIns_R_R(INS_sqneg, EA_8BYTE, REG_V6, REG_V7, INS_OPTS_NONE);
// sqneg vector
theEmitter->emitIns_R_R(INS_sqneg, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_sqneg, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_sqneg, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_sqneg, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_sqneg, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_sqneg, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_sqneg, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
// sqxtn scalar
theEmitter->emitIns_R_R(INS_sqxtn, EA_1BYTE, REG_V0, REG_V1, INS_OPTS_NONE);
theEmitter->emitIns_R_R(INS_sqxtn, EA_2BYTE, REG_V2, REG_V3, INS_OPTS_NONE);
theEmitter->emitIns_R_R(INS_sqxtn, EA_4BYTE, REG_V4, REG_V5, INS_OPTS_NONE);
// sqxtn vector
theEmitter->emitIns_R_R(INS_sqxtn, EA_8BYTE, REG_V0, REG_V6, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_sqxtn, EA_8BYTE, REG_V1, REG_V7, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_sqxtn, EA_8BYTE, REG_V2, REG_V8, INS_OPTS_2S);
// sqxtn2 vector
theEmitter->emitIns_R_R(INS_sqxtn2, EA_16BYTE, REG_V3, REG_V9, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_sqxtn2, EA_16BYTE, REG_V4, REG_V10, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_sqxtn2, EA_16BYTE, REG_V5, REG_V11, INS_OPTS_4S);
// sqxtun scalar
theEmitter->emitIns_R_R(INS_sqxtun, EA_1BYTE, REG_V0, REG_V1, INS_OPTS_NONE);
theEmitter->emitIns_R_R(INS_sqxtun, EA_2BYTE, REG_V2, REG_V3, INS_OPTS_NONE);
theEmitter->emitIns_R_R(INS_sqxtun, EA_4BYTE, REG_V4, REG_V5, INS_OPTS_NONE);
// sqxtun vector
theEmitter->emitIns_R_R(INS_sqxtun, EA_8BYTE, REG_V0, REG_V6, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_sqxtun, EA_8BYTE, REG_V1, REG_V7, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_sqxtun, EA_8BYTE, REG_V2, REG_V8, INS_OPTS_2S);
// sqxtun2 vector
theEmitter->emitIns_R_R(INS_sqxtun2, EA_16BYTE, REG_V3, REG_V9, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_sqxtun2, EA_16BYTE, REG_V4, REG_V10, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_sqxtun2, EA_16BYTE, REG_V5, REG_V11, INS_OPTS_4S);
// suqadd scalar
theEmitter->emitIns_R_R(INS_suqadd, EA_1BYTE, REG_V0, REG_V1, INS_OPTS_NONE);
theEmitter->emitIns_R_R(INS_suqadd, EA_2BYTE, REG_V2, REG_V3, INS_OPTS_NONE);
theEmitter->emitIns_R_R(INS_suqadd, EA_4BYTE, REG_V4, REG_V5, INS_OPTS_NONE);
theEmitter->emitIns_R_R(INS_suqadd, EA_8BYTE, REG_V6, REG_V7, INS_OPTS_NONE);
// suqadd vector
theEmitter->emitIns_R_R(INS_suqadd, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_suqadd, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_suqadd, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_suqadd, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_suqadd, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_suqadd, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_suqadd, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
// uadalp vector
theEmitter->emitIns_R_R(INS_uadalp, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_uadalp, EA_8BYTE, REG_V2, REG_V3, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_uadalp, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_uadalp, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_uadalp, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_uadalp, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
// uaddlp vector
theEmitter->emitIns_R_R(INS_uaddlp, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_uaddlp, EA_8BYTE, REG_V2, REG_V3, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_uaddlp, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_uaddlp, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_uaddlp, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_uaddlp, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
// uaddlv vector
theEmitter->emitIns_R_R(INS_uaddlv, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_uaddlv, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_uaddlv, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_uaddlv, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_uaddlv, EA_16BYTE, REG_V14, REG_V15, INS_OPTS_4S);
// umaxv vector
theEmitter->emitIns_R_R(INS_umaxv, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_umaxv, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_umaxv, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_umaxv, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_umaxv, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_4S);
// uminv vector
theEmitter->emitIns_R_R(INS_uminv, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_uminv, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_uminv, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_uminv, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_uminv, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_4S);
// uqxtn scalar
theEmitter->emitIns_R_R(INS_uqxtn, EA_1BYTE, REG_V0, REG_V1, INS_OPTS_NONE);
theEmitter->emitIns_R_R(INS_uqxtn, EA_2BYTE, REG_V2, REG_V3, INS_OPTS_NONE);
theEmitter->emitIns_R_R(INS_uqxtn, EA_4BYTE, REG_V4, REG_V5, INS_OPTS_NONE);
// uqxtn vector
theEmitter->emitIns_R_R(INS_uqxtn, EA_8BYTE, REG_V0, REG_V6, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_uqxtn, EA_8BYTE, REG_V1, REG_V7, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_uqxtn, EA_8BYTE, REG_V2, REG_V8, INS_OPTS_2S);
// uqxtn2 vector
theEmitter->emitIns_R_R(INS_uqxtn2, EA_16BYTE, REG_V3, REG_V9, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_uqxtn2, EA_16BYTE, REG_V4, REG_V10, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_uqxtn2, EA_16BYTE, REG_V5, REG_V11, INS_OPTS_4S);
// urecpe vector
theEmitter->emitIns_R_R(INS_urecpe, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_urecpe, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_4S);
// ursqrte vector
theEmitter->emitIns_R_R(INS_ursqrte, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_ursqrte, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_4S);
// usqadd scalar
theEmitter->emitIns_R_R(INS_usqadd, EA_1BYTE, REG_V0, REG_V1, INS_OPTS_NONE);
theEmitter->emitIns_R_R(INS_usqadd, EA_2BYTE, REG_V2, REG_V3, INS_OPTS_NONE);
theEmitter->emitIns_R_R(INS_usqadd, EA_4BYTE, REG_V4, REG_V5, INS_OPTS_NONE);
theEmitter->emitIns_R_R(INS_usqadd, EA_8BYTE, REG_V6, REG_V7, INS_OPTS_NONE);
// usqadd vector
theEmitter->emitIns_R_R(INS_usqadd, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_usqadd, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_usqadd, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_usqadd, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_usqadd, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_usqadd, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_usqadd, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
// xtn vector
theEmitter->emitIns_R_R(INS_xtn, EA_8BYTE, REG_V0, REG_V6, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_xtn, EA_8BYTE, REG_V1, REG_V7, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_xtn, EA_8BYTE, REG_V2, REG_V8, INS_OPTS_2S);
// xtn2 vector
theEmitter->emitIns_R_R(INS_xtn2, EA_16BYTE, REG_V3, REG_V9, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_xtn2, EA_16BYTE, REG_V4, REG_V10, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_xtn2, EA_16BYTE, REG_V5, REG_V11, INS_OPTS_4S);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_R floating point round to int, one dest, one source
//
// frinta scalar
theEmitter->emitIns_R_R(INS_frinta, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_frinta, EA_8BYTE, REG_V2, REG_V3);
// frinta vector
theEmitter->emitIns_R_R(INS_frinta, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_frinta, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_frinta, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_2D);
// frinti scalar
theEmitter->emitIns_R_R(INS_frinti, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_frinti, EA_8BYTE, REG_V2, REG_V3);
// frinti vector
theEmitter->emitIns_R_R(INS_frinti, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_frinti, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_frinti, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_2D);
// frintm scalar
theEmitter->emitIns_R_R(INS_frintm, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_frintm, EA_8BYTE, REG_V2, REG_V3);
// frintm vector
theEmitter->emitIns_R_R(INS_frintm, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_frintm, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_frintm, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_2D);
// frintn scalar
theEmitter->emitIns_R_R(INS_frintn, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_frintn, EA_8BYTE, REG_V2, REG_V3);
// frintn vector
theEmitter->emitIns_R_R(INS_frintn, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_frintn, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_frintn, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_2D);
// frintp scalar
theEmitter->emitIns_R_R(INS_frintp, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_frintp, EA_8BYTE, REG_V2, REG_V3);
// frintp vector
theEmitter->emitIns_R_R(INS_frintp, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_frintp, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_frintp, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_2D);
// frintx scalar
theEmitter->emitIns_R_R(INS_frintx, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_frintx, EA_8BYTE, REG_V2, REG_V3);
// frintx vector
theEmitter->emitIns_R_R(INS_frintx, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_frintx, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_frintx, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_2D);
// frintz scalar
theEmitter->emitIns_R_R(INS_frintz, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_frintz, EA_8BYTE, REG_V2, REG_V3);
// frintz vector
theEmitter->emitIns_R_R(INS_frintz, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_frintz, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_frintz, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_2D);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_R_R floating point operations, one dest, two source
//
genDefineTempLabel(genCreateTempLabel());
// fadd
theEmitter->emitIns_R_R_R(INS_fadd, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
theEmitter->emitIns_R_R_R(INS_fadd, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
theEmitter->emitIns_R_R_R(INS_fadd, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_fadd, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fadd, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2D);
// fsub
theEmitter->emitIns_R_R_R(INS_fsub, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
theEmitter->emitIns_R_R_R(INS_fsub, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
theEmitter->emitIns_R_R_R(INS_fsub, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_fsub, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fsub, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2D);
// fdiv
theEmitter->emitIns_R_R_R(INS_fdiv, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
theEmitter->emitIns_R_R_R(INS_fdiv, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
theEmitter->emitIns_R_R_R(INS_fdiv, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_fdiv, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fdiv, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2D);
// fmax
theEmitter->emitIns_R_R_R(INS_fmax, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
theEmitter->emitIns_R_R_R(INS_fmax, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
theEmitter->emitIns_R_R_R(INS_fmax, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_fmax, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fmax, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2D);
// fmaxp
theEmitter->emitIns_R_R_R(INS_fmaxp, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_fmaxp, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fmaxp, EA_16BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2D);
// fmaxnm
theEmitter->emitIns_R_R_R(INS_fmaxnm, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
theEmitter->emitIns_R_R_R(INS_fmaxnm, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
theEmitter->emitIns_R_R_R(INS_fmaxnm, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_fmaxnm, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fmaxnm, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2D);
// fmaxnmp vector
theEmitter->emitIns_R_R_R(INS_fmaxnmp, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_fmaxnmp, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fmaxnmp, EA_16BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2D);
// fmin
theEmitter->emitIns_R_R_R(INS_fmin, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
theEmitter->emitIns_R_R_R(INS_fmin, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
theEmitter->emitIns_R_R_R(INS_fmin, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_fmin, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fmin, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2D);
// fminp
theEmitter->emitIns_R_R_R(INS_fminp, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_fminp, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fminp, EA_16BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2D);
// fminnm
theEmitter->emitIns_R_R_R(INS_fminnm, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
theEmitter->emitIns_R_R_R(INS_fminnm, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
theEmitter->emitIns_R_R_R(INS_fminnm, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_fminnm, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fminnm, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2D);
// fminnmp vector
theEmitter->emitIns_R_R_R(INS_fminnmp, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_fminnmp, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fminnmp, EA_16BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2D);
// fabd
theEmitter->emitIns_R_R_R(INS_fabd, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
theEmitter->emitIns_R_R_R(INS_fabd, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
theEmitter->emitIns_R_R_R(INS_fabd, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_fabd, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fabd, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2D);
// frecps
theEmitter->emitIns_R_R_R(INS_frecps, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
theEmitter->emitIns_R_R_R(INS_frecps, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
theEmitter->emitIns_R_R_R(INS_frecps, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_frecps, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_frecps, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2D);
// frsqrts
theEmitter->emitIns_R_R_R(INS_frsqrts, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
theEmitter->emitIns_R_R_R(INS_frsqrts, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
theEmitter->emitIns_R_R_R(INS_frsqrts, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_frsqrts, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_frsqrts, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2D);
genDefineTempLabel(genCreateTempLabel());
theEmitter->emitIns_R_R_R(INS_fmul, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
theEmitter->emitIns_R_R_R(INS_fmul, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
theEmitter->emitIns_R_R_R(INS_fmul, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_fmul, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fmul, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2D);
theEmitter->emitIns_R_R_R_I(INS_fmul, EA_4BYTE, REG_V15, REG_V16, REG_V17, 3); // scalar by element 4BYTE
theEmitter->emitIns_R_R_R_I(INS_fmul, EA_8BYTE, REG_V18, REG_V19, REG_V20, 1); // scalar by element 8BYTE
theEmitter->emitIns_R_R_R_I(INS_fmul, EA_8BYTE, REG_V21, REG_V22, REG_V23, 0, INS_OPTS_2S);
theEmitter->emitIns_R_R_R_I(INS_fmul, EA_16BYTE, REG_V24, REG_V25, REG_V26, 2, INS_OPTS_4S);
theEmitter->emitIns_R_R_R_I(INS_fmul, EA_16BYTE, REG_V27, REG_V28, REG_V29, 0, INS_OPTS_2D);
theEmitter->emitIns_R_R_R(INS_fmulx, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
theEmitter->emitIns_R_R_R(INS_fmulx, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
theEmitter->emitIns_R_R_R(INS_fmulx, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_fmulx, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fmulx, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2D);
theEmitter->emitIns_R_R_R_I(INS_fmulx, EA_4BYTE, REG_V15, REG_V16, REG_V17, 3); // scalar by element 4BYTE
theEmitter->emitIns_R_R_R_I(INS_fmulx, EA_8BYTE, REG_V18, REG_V19, REG_V20, 1); // scalar by element 8BYTE
theEmitter->emitIns_R_R_R_I(INS_fmulx, EA_8BYTE, REG_V21, REG_V22, REG_V23, 0, INS_OPTS_2S);
theEmitter->emitIns_R_R_R_I(INS_fmulx, EA_16BYTE, REG_V24, REG_V25, REG_V26, 2, INS_OPTS_4S);
theEmitter->emitIns_R_R_R_I(INS_fmulx, EA_16BYTE, REG_V27, REG_V28, REG_V29, 0, INS_OPTS_2D);
theEmitter->emitIns_R_R_R(INS_fnmul, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
theEmitter->emitIns_R_R_R(INS_fnmul, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_R_I vector operations, one dest, one source reg, one immed
//
// Some of the tests cases below might appear redundant since they emit same combinations of instruction x size x
// vector arrangements. However, these are added to verify that the split constant encoding works with both - small
// and large constants.
genDefineTempLabel(genCreateTempLabel());
// sshr scalar
theEmitter->emitIns_R_R_I(INS_sshr, EA_8BYTE, REG_V0, REG_V1, 1);
theEmitter->emitIns_R_R_I(INS_sshr, EA_8BYTE, REG_V2, REG_V3, 14);
theEmitter->emitIns_R_R_I(INS_sshr, EA_8BYTE, REG_V4, REG_V5, 27);
theEmitter->emitIns_R_R_I(INS_sshr, EA_8BYTE, REG_V6, REG_V7, 40);
theEmitter->emitIns_R_R_I(INS_sshr, EA_8BYTE, REG_V8, REG_V9, 64);
// sshr vector
theEmitter->emitIns_R_R_I(INS_sshr, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_sshr, EA_16BYTE, REG_V2, REG_V3, 8, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_sshr, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_sshr, EA_16BYTE, REG_V6, REG_V7, 16, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_sshr, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_sshr, EA_16BYTE, REG_V10, REG_V11, 32, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_sshr, EA_16BYTE, REG_V12, REG_V13, 33, INS_OPTS_2D);
theEmitter->emitIns_R_R_I(INS_sshr, EA_16BYTE, REG_V14, REG_V15, 64, INS_OPTS_2D);
// ssra scalar
theEmitter->emitIns_R_R_I(INS_ssra, EA_8BYTE, REG_V0, REG_V1, 1);
theEmitter->emitIns_R_R_I(INS_ssra, EA_8BYTE, REG_V2, REG_V3, 14);
theEmitter->emitIns_R_R_I(INS_ssra, EA_8BYTE, REG_V4, REG_V5, 27);
theEmitter->emitIns_R_R_I(INS_ssra, EA_8BYTE, REG_V6, REG_V7, 40);
theEmitter->emitIns_R_R_I(INS_ssra, EA_8BYTE, REG_V8, REG_V9, 64);
// ssra vector
theEmitter->emitIns_R_R_I(INS_ssra, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_ssra, EA_16BYTE, REG_V2, REG_V3, 8, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_ssra, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_ssra, EA_16BYTE, REG_V6, REG_V7, 16, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_ssra, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_ssra, EA_16BYTE, REG_V10, REG_V11, 32, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_ssra, EA_16BYTE, REG_V12, REG_V13, 33, INS_OPTS_2D);
theEmitter->emitIns_R_R_I(INS_ssra, EA_16BYTE, REG_V14, REG_V15, 64, INS_OPTS_2D);
// srshr scalar
theEmitter->emitIns_R_R_I(INS_srshr, EA_8BYTE, REG_V0, REG_V1, 1);
theEmitter->emitIns_R_R_I(INS_srshr, EA_8BYTE, REG_V2, REG_V3, 14);
theEmitter->emitIns_R_R_I(INS_srshr, EA_8BYTE, REG_V4, REG_V5, 27);
theEmitter->emitIns_R_R_I(INS_srshr, EA_8BYTE, REG_V6, REG_V7, 40);
theEmitter->emitIns_R_R_I(INS_srshr, EA_8BYTE, REG_V8, REG_V9, 64);
// srshr vector
theEmitter->emitIns_R_R_I(INS_srshr, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_srshr, EA_16BYTE, REG_V2, REG_V3, 8, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_srshr, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_srshr, EA_16BYTE, REG_V6, REG_V7, 16, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_srshr, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_srshr, EA_16BYTE, REG_V10, REG_V11, 32, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_srshr, EA_16BYTE, REG_V12, REG_V13, 33, INS_OPTS_2D);
theEmitter->emitIns_R_R_I(INS_srshr, EA_16BYTE, REG_V14, REG_V15, 64, INS_OPTS_2D);
// srsra scalar
theEmitter->emitIns_R_R_I(INS_srsra, EA_8BYTE, REG_V0, REG_V1, 1);
theEmitter->emitIns_R_R_I(INS_srsra, EA_8BYTE, REG_V2, REG_V3, 14);
theEmitter->emitIns_R_R_I(INS_srsra, EA_8BYTE, REG_V4, REG_V5, 27);
theEmitter->emitIns_R_R_I(INS_srsra, EA_8BYTE, REG_V6, REG_V7, 40);
theEmitter->emitIns_R_R_I(INS_srsra, EA_8BYTE, REG_V8, REG_V9, 64);
// srsra vector
theEmitter->emitIns_R_R_I(INS_srsra, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_srsra, EA_16BYTE, REG_V2, REG_V3, 8, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_srsra, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_srsra, EA_16BYTE, REG_V6, REG_V7, 16, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_srsra, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_srsra, EA_16BYTE, REG_V10, REG_V11, 32, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_srsra, EA_16BYTE, REG_V12, REG_V13, 33, INS_OPTS_2D);
theEmitter->emitIns_R_R_I(INS_srsra, EA_16BYTE, REG_V14, REG_V15, 64, INS_OPTS_2D);
// shl scalar
theEmitter->emitIns_R_R_I(INS_shl, EA_8BYTE, REG_V0, REG_V1, 0);
theEmitter->emitIns_R_R_I(INS_shl, EA_8BYTE, REG_V2, REG_V3, 14);
theEmitter->emitIns_R_R_I(INS_shl, EA_8BYTE, REG_V4, REG_V5, 27);
theEmitter->emitIns_R_R_I(INS_shl, EA_8BYTE, REG_V6, REG_V7, 40);
theEmitter->emitIns_R_R_I(INS_shl, EA_8BYTE, REG_V8, REG_V9, 63);
// shl vector
theEmitter->emitIns_R_R_I(INS_shl, EA_8BYTE, REG_V0, REG_V1, 0, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_shl, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_shl, EA_8BYTE, REG_V4, REG_V5, 8, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_shl, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_shl, EA_8BYTE, REG_V8, REG_V9, 16, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_shl, EA_16BYTE, REG_V10, REG_V11, 31, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_shl, EA_16BYTE, REG_V12, REG_V13, 32, INS_OPTS_2D);
theEmitter->emitIns_R_R_I(INS_shl, EA_16BYTE, REG_V14, REG_V15, 63, INS_OPTS_2D);
// ushr scalar
theEmitter->emitIns_R_R_I(INS_ushr, EA_8BYTE, REG_V0, REG_V1, 1);
theEmitter->emitIns_R_R_I(INS_ushr, EA_8BYTE, REG_V2, REG_V3, 14);
theEmitter->emitIns_R_R_I(INS_ushr, EA_8BYTE, REG_V4, REG_V5, 27);
theEmitter->emitIns_R_R_I(INS_ushr, EA_8BYTE, REG_V6, REG_V7, 40);
theEmitter->emitIns_R_R_I(INS_ushr, EA_8BYTE, REG_V8, REG_V9, 64);
// ushr vector
theEmitter->emitIns_R_R_I(INS_ushr, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_ushr, EA_16BYTE, REG_V2, REG_V3, 8, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_ushr, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_ushr, EA_16BYTE, REG_V6, REG_V7, 16, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_ushr, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_ushr, EA_16BYTE, REG_V10, REG_V11, 32, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_ushr, EA_16BYTE, REG_V12, REG_V13, 33, INS_OPTS_2D);
theEmitter->emitIns_R_R_I(INS_ushr, EA_16BYTE, REG_V14, REG_V15, 64, INS_OPTS_2D);
// usra scalar
theEmitter->emitIns_R_R_I(INS_usra, EA_8BYTE, REG_V0, REG_V1, 1);
theEmitter->emitIns_R_R_I(INS_usra, EA_8BYTE, REG_V2, REG_V3, 14);
theEmitter->emitIns_R_R_I(INS_usra, EA_8BYTE, REG_V4, REG_V5, 27);
theEmitter->emitIns_R_R_I(INS_usra, EA_8BYTE, REG_V6, REG_V7, 40);
theEmitter->emitIns_R_R_I(INS_usra, EA_8BYTE, REG_V8, REG_V9, 64);
// usra vector
theEmitter->emitIns_R_R_I(INS_usra, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_usra, EA_16BYTE, REG_V2, REG_V3, 8, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_usra, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_usra, EA_16BYTE, REG_V6, REG_V7, 16, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_usra, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_usra, EA_16BYTE, REG_V10, REG_V11, 32, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_usra, EA_16BYTE, REG_V12, REG_V13, 33, INS_OPTS_2D);
theEmitter->emitIns_R_R_I(INS_usra, EA_16BYTE, REG_V14, REG_V15, 64, INS_OPTS_2D);
// urshr scalar
theEmitter->emitIns_R_R_I(INS_urshr, EA_8BYTE, REG_V0, REG_V1, 1);
theEmitter->emitIns_R_R_I(INS_urshr, EA_8BYTE, REG_V2, REG_V3, 14);
theEmitter->emitIns_R_R_I(INS_urshr, EA_8BYTE, REG_V4, REG_V5, 27);
theEmitter->emitIns_R_R_I(INS_urshr, EA_8BYTE, REG_V6, REG_V7, 40);
theEmitter->emitIns_R_R_I(INS_urshr, EA_8BYTE, REG_V8, REG_V9, 64);
// urshr vector
theEmitter->emitIns_R_R_I(INS_urshr, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_urshr, EA_16BYTE, REG_V2, REG_V3, 8, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_urshr, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_urshr, EA_16BYTE, REG_V6, REG_V7, 16, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_urshr, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_urshr, EA_16BYTE, REG_V10, REG_V11, 32, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_urshr, EA_16BYTE, REG_V12, REG_V13, 33, INS_OPTS_2D);
theEmitter->emitIns_R_R_I(INS_urshr, EA_16BYTE, REG_V14, REG_V15, 64, INS_OPTS_2D);
// ursra scalar
theEmitter->emitIns_R_R_I(INS_ursra, EA_8BYTE, REG_V0, REG_V1, 1);
theEmitter->emitIns_R_R_I(INS_ursra, EA_8BYTE, REG_V2, REG_V3, 14);
theEmitter->emitIns_R_R_I(INS_ursra, EA_8BYTE, REG_V4, REG_V5, 27);
theEmitter->emitIns_R_R_I(INS_ursra, EA_8BYTE, REG_V6, REG_V7, 40);
theEmitter->emitIns_R_R_I(INS_ursra, EA_8BYTE, REG_V8, REG_V9, 64);
// ursra vector
theEmitter->emitIns_R_R_I(INS_ursra, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_ursra, EA_16BYTE, REG_V2, REG_V3, 8, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_ursra, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_ursra, EA_16BYTE, REG_V6, REG_V7, 16, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_ursra, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_ursra, EA_16BYTE, REG_V10, REG_V11, 32, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_ursra, EA_16BYTE, REG_V12, REG_V13, 33, INS_OPTS_2D);
theEmitter->emitIns_R_R_I(INS_ursra, EA_16BYTE, REG_V14, REG_V15, 64, INS_OPTS_2D);
// sri scalar
theEmitter->emitIns_R_R_I(INS_sri, EA_8BYTE, REG_V0, REG_V1, 1);
theEmitter->emitIns_R_R_I(INS_sri, EA_8BYTE, REG_V2, REG_V3, 14);
theEmitter->emitIns_R_R_I(INS_sri, EA_8BYTE, REG_V4, REG_V5, 27);
theEmitter->emitIns_R_R_I(INS_sri, EA_8BYTE, REG_V6, REG_V7, 40);
theEmitter->emitIns_R_R_I(INS_sri, EA_8BYTE, REG_V8, REG_V9, 64);
// sri vector
theEmitter->emitIns_R_R_I(INS_sri, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_sri, EA_16BYTE, REG_V2, REG_V3, 8, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_sri, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_sri, EA_16BYTE, REG_V6, REG_V7, 16, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_sri, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_sri, EA_16BYTE, REG_V10, REG_V11, 32, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_sri, EA_16BYTE, REG_V12, REG_V13, 33, INS_OPTS_2D);
theEmitter->emitIns_R_R_I(INS_sri, EA_16BYTE, REG_V14, REG_V15, 64, INS_OPTS_2D);
// sli scalar
theEmitter->emitIns_R_R_I(INS_sli, EA_8BYTE, REG_V0, REG_V1, 0);
theEmitter->emitIns_R_R_I(INS_sli, EA_8BYTE, REG_V2, REG_V3, 14);
theEmitter->emitIns_R_R_I(INS_sli, EA_8BYTE, REG_V4, REG_V5, 27);
theEmitter->emitIns_R_R_I(INS_sli, EA_8BYTE, REG_V6, REG_V7, 40);
theEmitter->emitIns_R_R_I(INS_sli, EA_8BYTE, REG_V8, REG_V9, 63);
// sli vector
theEmitter->emitIns_R_R_I(INS_sli, EA_8BYTE, REG_V0, REG_V1, 0, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_sli, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_sli, EA_8BYTE, REG_V4, REG_V5, 8, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_sli, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_sli, EA_8BYTE, REG_V8, REG_V9, 16, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_sli, EA_16BYTE, REG_V10, REG_V11, 31, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_sli, EA_16BYTE, REG_V12, REG_V13, 32, INS_OPTS_2D);
theEmitter->emitIns_R_R_I(INS_sli, EA_16BYTE, REG_V14, REG_V15, 63, INS_OPTS_2D);
// sshll{2} vector
theEmitter->emitIns_R_R_I(INS_sshll, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_sshll2, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_sshll, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_sshll2, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_sshll, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_sshll2, EA_16BYTE, REG_V10, REG_V11, 31, INS_OPTS_4S);
// ushll{2} vector
theEmitter->emitIns_R_R_I(INS_ushll, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_ushll2, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_ushll, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_ushll2, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_ushll, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_ushll2, EA_16BYTE, REG_V10, REG_V11, 31, INS_OPTS_4S);
// shrn{2} vector
theEmitter->emitIns_R_R_I(INS_shrn, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_shrn2, EA_16BYTE, REG_V2, REG_V3, 8, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_shrn, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_shrn2, EA_16BYTE, REG_V6, REG_V7, 16, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_shrn, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_shrn2, EA_16BYTE, REG_V10, REG_V11, 32, INS_OPTS_4S);
// rshrn{2} vector
theEmitter->emitIns_R_R_I(INS_rshrn, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_rshrn2, EA_16BYTE, REG_V2, REG_V3, 8, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_rshrn, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_rshrn2, EA_16BYTE, REG_V6, REG_V7, 16, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_rshrn, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_rshrn2, EA_16BYTE, REG_V10, REG_V11, 32, INS_OPTS_4S);
// sxtl{2} vector
theEmitter->emitIns_R_R(INS_sxtl, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_sxtl2, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_sxtl, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_sxtl2, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_sxtl, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_sxtl2, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
// uxtl{2} vector
theEmitter->emitIns_R_R(INS_uxtl, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_uxtl2, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_uxtl, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_uxtl2, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_uxtl, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_uxtl2, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
// sqrshrn scalar
theEmitter->emitIns_R_R_I(INS_sqrshrn, EA_1BYTE, REG_V0, REG_V1, 1, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqrshrn, EA_1BYTE, REG_V2, REG_V3, 8, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqrshrn, EA_2BYTE, REG_V4, REG_V5, 9, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqrshrn, EA_2BYTE, REG_V6, REG_V7, 16, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqrshrn, EA_4BYTE, REG_V8, REG_V9, 17, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqrshrn, EA_4BYTE, REG_V10, REG_V11, 32, INS_OPTS_NONE);
// sqrshrn{2} vector
theEmitter->emitIns_R_R_I(INS_sqrshrn, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_sqrshrn, EA_8BYTE, REG_V2, REG_V3, 8, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_sqrshrn2, EA_16BYTE, REG_V4, REG_V5, 1, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_sqrshrn2, EA_16BYTE, REG_V6, REG_V7, 8, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_sqrshrn, EA_8BYTE, REG_V8, REG_V9, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_sqrshrn, EA_8BYTE, REG_V10, REG_V11, 16, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_sqrshrn2, EA_16BYTE, REG_V12, REG_V13, 9, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_sqrshrn2, EA_16BYTE, REG_V14, REG_V15, 16, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_sqrshrn, EA_8BYTE, REG_V16, REG_V17, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_sqrshrn, EA_8BYTE, REG_V18, REG_V18, 32, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_sqrshrn2, EA_16BYTE, REG_V20, REG_V21, 17, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_sqrshrn2, EA_16BYTE, REG_V22, REG_V23, 32, INS_OPTS_4S);
// sqrshrun scalar
theEmitter->emitIns_R_R_I(INS_sqrshrun, EA_1BYTE, REG_V0, REG_V1, 1, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqrshrun, EA_1BYTE, REG_V0, REG_V1, 8, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqrshrun, EA_2BYTE, REG_V2, REG_V3, 9, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqrshrun, EA_2BYTE, REG_V2, REG_V3, 16, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqrshrun, EA_4BYTE, REG_V4, REG_V5, 17, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqrshrun, EA_4BYTE, REG_V4, REG_V5, 32, INS_OPTS_NONE);
// sqrshrun{2} vector
theEmitter->emitIns_R_R_I(INS_sqrshrun, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_sqrshrun, EA_8BYTE, REG_V2, REG_V3, 8, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_sqrshrun2, EA_16BYTE, REG_V4, REG_V5, 1, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_sqrshrun2, EA_16BYTE, REG_V6, REG_V7, 8, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_sqrshrun, EA_8BYTE, REG_V8, REG_V9, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_sqrshrun, EA_8BYTE, REG_V10, REG_V11, 16, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_sqrshrun2, EA_16BYTE, REG_V12, REG_V13, 9, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_sqrshrun2, EA_16BYTE, REG_V14, REG_V15, 16, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_sqrshrun, EA_8BYTE, REG_V16, REG_V17, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_sqrshrun, EA_8BYTE, REG_V18, REG_V18, 32, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_sqrshrun2, EA_16BYTE, REG_V20, REG_V21, 17, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_sqrshrun2, EA_16BYTE, REG_V22, REG_V23, 32, INS_OPTS_4S);
// sqshl scalar
theEmitter->emitIns_R_R_I(INS_sqshl, EA_1BYTE, REG_V0, REG_V1, 0, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshl, EA_1BYTE, REG_V2, REG_V3, 7, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshl, EA_2BYTE, REG_V4, REG_V5, 8, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshl, EA_2BYTE, REG_V6, REG_V7, 15, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshl, EA_4BYTE, REG_V8, REG_V9, 16, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshl, EA_4BYTE, REG_V10, REG_V11, 31, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshl, EA_8BYTE, REG_V12, REG_V13, 32, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshl, EA_8BYTE, REG_V14, REG_V15, 63, INS_OPTS_NONE);
// sqshl vector
theEmitter->emitIns_R_R_I(INS_sqshl, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_sqshl, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_sqshl, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_sqshl, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_sqshl, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_sqshl, EA_16BYTE, REG_V10, REG_V11, 31, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_sqshl, EA_16BYTE, REG_V12, REG_V13, 63, INS_OPTS_2D);
// sqshlu scalar
theEmitter->emitIns_R_R_I(INS_sqshlu, EA_1BYTE, REG_V0, REG_V1, 0, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshlu, EA_1BYTE, REG_V2, REG_V3, 7, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshlu, EA_2BYTE, REG_V4, REG_V5, 8, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshlu, EA_2BYTE, REG_V6, REG_V7, 15, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshlu, EA_4BYTE, REG_V8, REG_V9, 16, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshlu, EA_4BYTE, REG_V10, REG_V11, 31, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshlu, EA_8BYTE, REG_V12, REG_V13, 32, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshlu, EA_8BYTE, REG_V14, REG_V15, 63, INS_OPTS_NONE);
// sqshlu vector
theEmitter->emitIns_R_R_I(INS_sqshlu, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_sqshlu, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_sqshlu, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_sqshlu, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_sqshlu, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_sqshlu, EA_16BYTE, REG_V10, REG_V11, 31, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_sqshlu, EA_16BYTE, REG_V12, REG_V13, 63, INS_OPTS_2D);
// sqshrn scalar
theEmitter->emitIns_R_R_I(INS_sqshrn, EA_1BYTE, REG_V0, REG_V1, 1, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshrn, EA_1BYTE, REG_V2, REG_V3, 8, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshrn, EA_2BYTE, REG_V4, REG_V5, 9, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshrn, EA_2BYTE, REG_V6, REG_V7, 16, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshrn, EA_4BYTE, REG_V8, REG_V9, 17, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshrn, EA_4BYTE, REG_V10, REG_V11, 32, INS_OPTS_NONE);
// sqshrn{2} vector
theEmitter->emitIns_R_R_I(INS_sqshrn, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_sqshrn, EA_8BYTE, REG_V2, REG_V3, 8, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_sqshrn2, EA_16BYTE, REG_V4, REG_V5, 1, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_sqshrn2, EA_16BYTE, REG_V6, REG_V7, 8, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_sqshrn, EA_8BYTE, REG_V8, REG_V9, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_sqshrn, EA_8BYTE, REG_V10, REG_V11, 16, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_sqshrn2, EA_16BYTE, REG_V12, REG_V13, 9, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_sqshrn2, EA_16BYTE, REG_V14, REG_V15, 16, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_sqshrn, EA_8BYTE, REG_V16, REG_V17, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_sqshrn, EA_8BYTE, REG_V18, REG_V18, 32, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_sqshrn2, EA_16BYTE, REG_V20, REG_V21, 17, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_sqshrn2, EA_16BYTE, REG_V22, REG_V23, 32, INS_OPTS_4S);
// sqshrun scalar
theEmitter->emitIns_R_R_I(INS_sqshrun, EA_1BYTE, REG_V0, REG_V1, 1, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshrun, EA_1BYTE, REG_V2, REG_V3, 8, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshrun, EA_2BYTE, REG_V4, REG_V5, 9, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshrun, EA_2BYTE, REG_V6, REG_V7, 16, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshrun, EA_4BYTE, REG_V8, REG_V9, 17, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshrun, EA_4BYTE, REG_V10, REG_V11, 32, INS_OPTS_NONE);
// sqshrun{2} vector
theEmitter->emitIns_R_R_I(INS_sqshrun, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_sqshrun, EA_8BYTE, REG_V2, REG_V3, 8, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_sqshrun2, EA_16BYTE, REG_V4, REG_V5, 1, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_sqshrun2, EA_16BYTE, REG_V6, REG_V7, 8, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_sqshrun, EA_8BYTE, REG_V8, REG_V9, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_sqshrun, EA_8BYTE, REG_V10, REG_V11, 16, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_sqshrun2, EA_16BYTE, REG_V12, REG_V13, 9, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_sqshrun2, EA_16BYTE, REG_V14, REG_V15, 16, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_sqshrun, EA_8BYTE, REG_V16, REG_V17, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_sqshrun, EA_8BYTE, REG_V18, REG_V18, 32, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_sqshrun2, EA_16BYTE, REG_V20, REG_V21, 17, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_sqshrun2, EA_16BYTE, REG_V22, REG_V23, 32, INS_OPTS_4S);
// uqrshrn scalar
theEmitter->emitIns_R_R_I(INS_uqrshrn, EA_1BYTE, REG_V0, REG_V1, 1, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_uqrshrn, EA_1BYTE, REG_V2, REG_V3, 8, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_uqrshrn, EA_2BYTE, REG_V4, REG_V5, 9, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_uqrshrn, EA_2BYTE, REG_V6, REG_V7, 16, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_uqrshrn, EA_4BYTE, REG_V8, REG_V9, 17, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_uqrshrn, EA_4BYTE, REG_V10, REG_V11, 32, INS_OPTS_NONE);
// uqrshrn{2} vector
theEmitter->emitIns_R_R_I(INS_uqrshrn, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_uqrshrn, EA_8BYTE, REG_V2, REG_V3, 8, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_uqrshrn2, EA_16BYTE, REG_V4, REG_V5, 1, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_uqrshrn2, EA_16BYTE, REG_V6, REG_V7, 8, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_uqrshrn, EA_8BYTE, REG_V8, REG_V9, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_uqrshrn, EA_8BYTE, REG_V10, REG_V11, 16, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_uqrshrn2, EA_16BYTE, REG_V12, REG_V13, 9, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_uqrshrn2, EA_16BYTE, REG_V14, REG_V15, 16, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_uqrshrn, EA_8BYTE, REG_V16, REG_V17, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_uqrshrn, EA_8BYTE, REG_V18, REG_V18, 32, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_uqrshrn2, EA_16BYTE, REG_V20, REG_V21, 17, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_uqrshrn2, EA_16BYTE, REG_V22, REG_V23, 32, INS_OPTS_4S);
// uqshl scalar
theEmitter->emitIns_R_R_I(INS_uqshl, EA_1BYTE, REG_V0, REG_V1, 0, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_uqshl, EA_1BYTE, REG_V2, REG_V3, 7, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_uqshl, EA_2BYTE, REG_V4, REG_V5, 8, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_uqshl, EA_2BYTE, REG_V6, REG_V7, 15, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_uqshl, EA_4BYTE, REG_V8, REG_V9, 16, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_uqshl, EA_4BYTE, REG_V10, REG_V11, 31, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_uqshl, EA_8BYTE, REG_V12, REG_V13, 32, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_uqshl, EA_8BYTE, REG_V14, REG_V15, 63, INS_OPTS_NONE);
// uqshl vector
theEmitter->emitIns_R_R_I(INS_uqshl, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_uqshl, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_uqshl, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_uqshl, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_uqshl, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_uqshl, EA_16BYTE, REG_V10, REG_V11, 31, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_uqshl, EA_16BYTE, REG_V12, REG_V13, 63, INS_OPTS_2D);
// uqshrn scalar
theEmitter->emitIns_R_R_I(INS_uqshrn, EA_1BYTE, REG_V0, REG_V1, 1, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_uqshrn, EA_1BYTE, REG_V2, REG_V3, 8, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_uqshrn, EA_2BYTE, REG_V4, REG_V5, 9, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_uqshrn, EA_2BYTE, REG_V6, REG_V7, 16, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_uqshrn, EA_4BYTE, REG_V8, REG_V9, 17, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_uqshrn, EA_4BYTE, REG_V10, REG_V11, 32, INS_OPTS_NONE);
// uqshrn{2} vector
theEmitter->emitIns_R_R_I(INS_uqshrn, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_uqshrn, EA_8BYTE, REG_V2, REG_V3, 8, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_uqshrn2, EA_16BYTE, REG_V4, REG_V5, 1, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_uqshrn2, EA_16BYTE, REG_V6, REG_V7, 8, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_uqshrn, EA_8BYTE, REG_V8, REG_V9, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_uqshrn, EA_8BYTE, REG_V10, REG_V11, 16, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_uqshrn2, EA_16BYTE, REG_V12, REG_V13, 9, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_uqshrn2, EA_16BYTE, REG_V14, REG_V15, 16, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_uqshrn, EA_8BYTE, REG_V16, REG_V17, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_uqshrn, EA_8BYTE, REG_V18, REG_V18, 32, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_uqshrn2, EA_16BYTE, REG_V20, REG_V21, 17, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_uqshrn2, EA_16BYTE, REG_V22, REG_V23, 32, INS_OPTS_4S);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_R_R vector operations, one dest, two source
//
genDefineTempLabel(genCreateTempLabel());
// Specifying an Arrangement is optional
//
theEmitter->emitIns_R_R_R(INS_and, EA_8BYTE, REG_V6, REG_V7, REG_V8);
theEmitter->emitIns_R_R_R(INS_bic, EA_8BYTE, REG_V9, REG_V10, REG_V11);
theEmitter->emitIns_R_R_R(INS_eor, EA_8BYTE, REG_V12, REG_V13, REG_V14);
theEmitter->emitIns_R_R_R(INS_orr, EA_8BYTE, REG_V15, REG_V16, REG_V17);
theEmitter->emitIns_R_R_R(INS_orn, EA_8BYTE, REG_V18, REG_V19, REG_V20);
theEmitter->emitIns_R_R_R(INS_and, EA_16BYTE, REG_V21, REG_V22, REG_V23);
theEmitter->emitIns_R_R_R(INS_bic, EA_16BYTE, REG_V24, REG_V25, REG_V26);
theEmitter->emitIns_R_R_R(INS_eor, EA_16BYTE, REG_V27, REG_V28, REG_V29);
theEmitter->emitIns_R_R_R(INS_orr, EA_16BYTE, REG_V30, REG_V31, REG_V0);
theEmitter->emitIns_R_R_R(INS_orn, EA_16BYTE, REG_V1, REG_V2, REG_V3);
theEmitter->emitIns_R_R_R(INS_bsl, EA_8BYTE, REG_V4, REG_V5, REG_V6);
theEmitter->emitIns_R_R_R(INS_bit, EA_8BYTE, REG_V7, REG_V8, REG_V9);
theEmitter->emitIns_R_R_R(INS_bif, EA_8BYTE, REG_V10, REG_V11, REG_V12);
theEmitter->emitIns_R_R_R(INS_bsl, EA_16BYTE, REG_V13, REG_V14, REG_V15);
theEmitter->emitIns_R_R_R(INS_bit, EA_16BYTE, REG_V16, REG_V17, REG_V18);
theEmitter->emitIns_R_R_R(INS_bif, EA_16BYTE, REG_V19, REG_V20, REG_V21);
// Default Arrangement as per the ARM64 manual
//
theEmitter->emitIns_R_R_R(INS_and, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_bic, EA_8BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_eor, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_orr, EA_8BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_orn, EA_8BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_and, EA_16BYTE, REG_V21, REG_V22, REG_V23, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_bic, EA_16BYTE, REG_V24, REG_V25, REG_V26, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_eor, EA_16BYTE, REG_V27, REG_V28, REG_V29, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_orr, EA_16BYTE, REG_V30, REG_V31, REG_V0, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_orn, EA_16BYTE, REG_V1, REG_V2, REG_V3, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_bsl, EA_8BYTE, REG_V4, REG_V5, REG_V6, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_bit, EA_8BYTE, REG_V7, REG_V8, REG_V9, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_bif, EA_8BYTE, REG_V10, REG_V11, REG_V12, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_bsl, EA_16BYTE, REG_V13, REG_V14, REG_V15, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_bit, EA_16BYTE, REG_V16, REG_V17, REG_V18, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_bif, EA_16BYTE, REG_V19, REG_V20, REG_V21, INS_OPTS_16B);
genDefineTempLabel(genCreateTempLabel());
// add
theEmitter->emitIns_R_R_R(INS_add, EA_8BYTE, REG_V0, REG_V1, REG_V2); // scalar 8BYTE
theEmitter->emitIns_R_R_R(INS_add, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_add, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_add, EA_8BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_add, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_add, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_add, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_add, EA_16BYTE, REG_V21, REG_V22, REG_V23, INS_OPTS_2D);
// addp
theEmitter->emitIns_R_R(INS_addp, EA_16BYTE, REG_V0, REG_V1, INS_OPTS_2D); // scalar 16BYTE
theEmitter->emitIns_R_R_R(INS_addp, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_addp, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_addp, EA_8BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_addp, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_addp, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_addp, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_addp, EA_16BYTE, REG_V21, REG_V22, REG_V23, INS_OPTS_2D);
// sub
theEmitter->emitIns_R_R_R(INS_sub, EA_8BYTE, REG_V1, REG_V2, REG_V3); // scalar 8BYTE
theEmitter->emitIns_R_R_R(INS_sub, EA_8BYTE, REG_V4, REG_V5, REG_V6, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_sub, EA_8BYTE, REG_V7, REG_V8, REG_V9, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_sub, EA_8BYTE, REG_V10, REG_V11, REG_V12, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_sub, EA_16BYTE, REG_V13, REG_V14, REG_V15, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_sub, EA_16BYTE, REG_V16, REG_V17, REG_V18, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_sub, EA_16BYTE, REG_V19, REG_V20, REG_V21, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_sub, EA_16BYTE, REG_V22, REG_V23, REG_V24, INS_OPTS_2D);
genDefineTempLabel(genCreateTempLabel());
// saba vector
theEmitter->emitIns_R_R_R(INS_saba, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_saba, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_saba, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_saba, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_saba, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_saba, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// sabd vector
theEmitter->emitIns_R_R_R(INS_sabd, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_sabd, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_sabd, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_sabd, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_sabd, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_sabd, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// uaba vector
theEmitter->emitIns_R_R_R(INS_uaba, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_uaba, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_uaba, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_uaba, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_uaba, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_uaba, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// uabd vector
theEmitter->emitIns_R_R_R(INS_uabd, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_uabd, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_uabd, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_uabd, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_uabd, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_uabd, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
// sdot vector
theEmitter->emitIns_R_R_R(INS_sdot, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_sdot, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4S);
// smax vector
theEmitter->emitIns_R_R_R(INS_smax, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_smax, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_smax, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_smax, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_smax, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_smax, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// smaxp vector
theEmitter->emitIns_R_R_R(INS_smaxp, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_smaxp, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_smaxp, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_smaxp, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_smaxp, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_smaxp, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// smin vector
theEmitter->emitIns_R_R_R(INS_smin, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_smin, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_smin, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_smin, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_smin, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_smin, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// sminp vector
theEmitter->emitIns_R_R_R(INS_sminp, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_sminp, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_sminp, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_sminp, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_sminp, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_sminp, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// udot vector
theEmitter->emitIns_R_R_R(INS_udot, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_udot, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4S);
// umax vector
theEmitter->emitIns_R_R_R(INS_umax, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_umax, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_umax, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_umax, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_umax, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_umax, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// umaxp vector
theEmitter->emitIns_R_R_R(INS_umaxp, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_umaxp, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_umaxp, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_umaxp, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_umaxp, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_umaxp, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// umin vector
theEmitter->emitIns_R_R_R(INS_umin, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_umin, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_umin, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_umin, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_umin, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_umin, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// uminp vector
theEmitter->emitIns_R_R_R(INS_uminp, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_uminp, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_uminp, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_uminp, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_uminp, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_uminp, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// cmeq vector
theEmitter->emitIns_R_R_R(INS_cmeq, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_cmeq, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_cmeq, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_cmeq, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_cmeq, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_cmeq, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_cmeq, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_2D);
// cmge vector
theEmitter->emitIns_R_R_R(INS_cmge, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_cmge, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_cmge, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_cmge, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_cmge, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_cmge, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_cmge, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_2D);
// cmgt vector
theEmitter->emitIns_R_R_R(INS_cmgt, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_cmgt, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_cmgt, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_cmgt, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_cmgt, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_cmgt, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_cmgt, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_2D);
// cmhi vector
theEmitter->emitIns_R_R_R(INS_cmhi, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_cmhi, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_cmhi, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_cmhi, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_cmhi, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_cmhi, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_cmhi, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_2D);
// cmhs vector
theEmitter->emitIns_R_R_R(INS_cmhs, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_cmhs, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_cmhs, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_cmhs, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_cmhs, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_cmhs, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_cmhs, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_2D);
// cmtst vector
theEmitter->emitIns_R_R_R(INS_cmtst, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_cmtst, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_cmtst, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_cmtst, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_cmtst, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_cmtst, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_cmtst, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_2D);
// faddp vector
theEmitter->emitIns_R_R_R(INS_faddp, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_faddp, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_faddp, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_2D);
// fcmeq vector
theEmitter->emitIns_R_R_R(INS_fcmeq, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_fcmeq, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fcmeq, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_2D);
// fcmge vector
theEmitter->emitIns_R_R_R(INS_fcmge, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_fcmge, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fcmge, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_2D);
// fcmgt vector
theEmitter->emitIns_R_R_R(INS_fcmgt, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_fcmgt, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fcmgt, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_2D);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
// trn1 vector
theEmitter->emitIns_R_R_R(INS_trn1, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_trn1, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_trn1, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_trn1, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_trn1, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_trn1, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_trn1, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_2D);
// trn2 vector
theEmitter->emitIns_R_R_R(INS_trn2, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_trn2, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_trn2, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_trn2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_trn2, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_trn2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_trn2, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_2D);
// uzp1 vector
theEmitter->emitIns_R_R_R(INS_uzp1, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_uzp1, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_uzp1, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_uzp1, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_uzp1, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_uzp1, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_uzp1, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_2D);
// uzp2 vector
theEmitter->emitIns_R_R_R(INS_uzp2, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_uzp2, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_uzp2, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_uzp2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_uzp2, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_uzp2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_uzp2, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_2D);
// zip1 vector
theEmitter->emitIns_R_R_R(INS_zip1, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_zip1, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_zip1, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_zip1, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_zip1, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_zip1, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_zip1, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_2D);
// zip2 vector
theEmitter->emitIns_R_R_R(INS_zip2, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_zip2, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_zip2, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_zip2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_zip2, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_zip2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_zip2, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_2D);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
// srshl scalar
theEmitter->emitIns_R_R_R(INS_srshl, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_NONE);
// srshl vector
theEmitter->emitIns_R_R_R(INS_srshl, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_srshl, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_srshl, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_srshl, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_srshl, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_srshl, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_srshl, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_2D);
// sshl scalar
theEmitter->emitIns_R_R_R(INS_sshl, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_NONE);
// sshl vector
theEmitter->emitIns_R_R_R(INS_sshl, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_sshl, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_sshl, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_sshl, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_sshl, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_sshl, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_sshl, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_2D);
// urshl scalar
theEmitter->emitIns_R_R_R(INS_urshl, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_NONE);
// urshl vector
theEmitter->emitIns_R_R_R(INS_urshl, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_urshl, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_urshl, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_urshl, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_urshl, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_urshl, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_urshl, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_2D);
// ushl scalar
theEmitter->emitIns_R_R_R(INS_ushl, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_NONE);
// ushl vector
theEmitter->emitIns_R_R_R(INS_ushl, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_ushl, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_ushl, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_ushl, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_ushl, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_ushl, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_ushl, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_2D);
// addhn vector
theEmitter->emitIns_R_R_R(INS_addhn, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_addhn, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_addhn, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// addhn2 vector
theEmitter->emitIns_R_R_R(INS_addhn2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_addhn2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_addhn2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// raddhn vector
theEmitter->emitIns_R_R_R(INS_raddhn, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_raddhn, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_raddhn, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// raddhn2 vector
theEmitter->emitIns_R_R_R(INS_raddhn2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_raddhn2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_raddhn2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// rsubhn vector
theEmitter->emitIns_R_R_R(INS_rsubhn, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_rsubhn, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_rsubhn, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// rsubhn2 vector
theEmitter->emitIns_R_R_R(INS_rsubhn2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_rsubhn2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_rsubhn2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// sabal vector
theEmitter->emitIns_R_R_R(INS_sabal, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_sabal, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_sabal, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// sabal2 vector
theEmitter->emitIns_R_R_R(INS_sabal2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_sabal2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_sabal2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// sabdl vector
theEmitter->emitIns_R_R_R(INS_sabdl, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_sabdl, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_sabdl, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// sabdl2 vector
theEmitter->emitIns_R_R_R(INS_sabdl2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_sabdl2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_sabdl2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// saddl vector
theEmitter->emitIns_R_R_R(INS_saddl, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_saddl, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_saddl, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// saddl2 vector
theEmitter->emitIns_R_R_R(INS_saddl2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_saddl2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_saddl2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// saddw vector
theEmitter->emitIns_R_R_R(INS_saddw, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_saddw, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_saddw, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// saddw2 vector
theEmitter->emitIns_R_R_R(INS_saddw2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_saddw2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_saddw2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// shadd vector
theEmitter->emitIns_R_R_R(INS_shadd, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_shadd, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_shadd, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_shadd, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_shadd, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_shadd, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// shsub vector
theEmitter->emitIns_R_R_R(INS_shsub, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_shsub, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_shsub, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_shsub, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_shsub, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_shsub, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// sqadd scalar
theEmitter->emitIns_R_R_R(INS_sqadd, EA_1BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_sqadd, EA_2BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_sqadd, EA_4BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_sqadd, EA_8BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_NONE);
// sqadd vector
theEmitter->emitIns_R_R_R(INS_sqadd, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_sqadd, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_sqadd, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_sqadd, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_sqadd, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_sqadd, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// sqrshl scalar
theEmitter->emitIns_R_R_R(INS_sqrshl, EA_1BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_sqrshl, EA_2BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_sqrshl, EA_4BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_sqrshl, EA_8BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_NONE);
// sqrshl vector
theEmitter->emitIns_R_R_R(INS_sqrshl, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_sqrshl, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_sqrshl, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_sqrshl, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_sqrshl, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_sqrshl, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_sqrshl, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_2D);
// sqshl scalar
theEmitter->emitIns_R_R_R(INS_sqshl, EA_1BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_sqshl, EA_2BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_sqshl, EA_4BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_sqshl, EA_8BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_NONE);
// sqshl vector
theEmitter->emitIns_R_R_R(INS_sqshl, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_sqshl, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_sqshl, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_sqshl, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_sqshl, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_sqshl, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_sqshl, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_2D);
// sqsub scalar
theEmitter->emitIns_R_R_R(INS_sqsub, EA_1BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_sqsub, EA_2BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_sqsub, EA_4BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_sqsub, EA_8BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_NONE);
// sqsub vector
theEmitter->emitIns_R_R_R(INS_sqsub, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_sqsub, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_sqsub, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_sqsub, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_sqsub, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_sqsub, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// srhadd vector
theEmitter->emitIns_R_R_R(INS_srhadd, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_srhadd, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_srhadd, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_srhadd, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_srhadd, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_srhadd, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// ssubl vector
theEmitter->emitIns_R_R_R(INS_ssubl, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_ssubl, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_ssubl, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// ssubl2 vector
theEmitter->emitIns_R_R_R(INS_ssubl2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_ssubl2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_ssubl2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// ssubw vector
theEmitter->emitIns_R_R_R(INS_ssubw, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_ssubw, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_ssubw, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// ssubw2 vector
theEmitter->emitIns_R_R_R(INS_ssubw2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_ssubw2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_ssubw2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// subhn vector
theEmitter->emitIns_R_R_R(INS_subhn, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_subhn, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_subhn, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// sqdmlal scalar
theEmitter->emitIns_R_R_R(INS_sqdmlal, EA_2BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_sqdmlal, EA_4BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_NONE);
// sqdmlal vector
theEmitter->emitIns_R_R_R(INS_sqdmlal, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_sqdmlal, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_2S);
// sqdmlal2 vector
theEmitter->emitIns_R_R_R(INS_sqdmlal2, EA_16BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_sqdmlal2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
// sqdmlsl scalar
theEmitter->emitIns_R_R_R(INS_sqdmlsl, EA_2BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_sqdmlsl, EA_4BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_NONE);
// sqdmlsl vector
theEmitter->emitIns_R_R_R(INS_sqdmlsl, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_sqdmlsl, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_2S);
// sqdmlsl2 vector
theEmitter->emitIns_R_R_R(INS_sqdmlsl2, EA_16BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_sqdmlsl2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
// sqdmulh scalar
theEmitter->emitIns_R_R_R(INS_sqdmulh, EA_2BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_sqdmulh, EA_4BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_NONE);
// sqdmulh vector
theEmitter->emitIns_R_R_R(INS_sqdmulh, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_sqdmulh, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_sqdmulh, EA_16BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_sqdmulh, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
// sqdmull scalar
theEmitter->emitIns_R_R_R(INS_sqdmull, EA_2BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_sqdmull, EA_4BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_NONE);
// sqdmull vector
theEmitter->emitIns_R_R_R(INS_sqdmull, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_sqdmull, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_2S);
// sqdmull2 vector
theEmitter->emitIns_R_R_R(INS_sqdmull2, EA_16BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_sqdmull2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
// sqrdmlah scalar
theEmitter->emitIns_R_R_R(INS_sqrdmlah, EA_2BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_sqrdmlah, EA_4BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_NONE);
// sqdrmlah vector
theEmitter->emitIns_R_R_R(INS_sqrdmlah, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_sqrdmlah, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_sqrdmlah, EA_16BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_sqrdmlah, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
// sqrdmlsh scalar
theEmitter->emitIns_R_R_R(INS_sqrdmlsh, EA_2BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_sqrdmlsh, EA_4BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_NONE);
// sqdrmlsh vector
theEmitter->emitIns_R_R_R(INS_sqrdmlsh, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_sqrdmlsh, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_sqrdmlsh, EA_16BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_sqrdmlsh, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
// sqrdmulh scalar
theEmitter->emitIns_R_R_R(INS_sqrdmulh, EA_2BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_sqrdmulh, EA_4BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_NONE);
// sqdrmulh vector
theEmitter->emitIns_R_R_R(INS_sqrdmulh, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_sqrdmulh, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_sqrdmulh, EA_16BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_sqrdmulh, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
// subhn2 vector
theEmitter->emitIns_R_R_R(INS_subhn2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_subhn2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_subhn2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// uabal vector
theEmitter->emitIns_R_R_R(INS_uabal, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_uabal, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_uabal, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// uabal2 vector
theEmitter->emitIns_R_R_R(INS_uabal2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_uabal2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_uabal2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// uabdl vector
theEmitter->emitIns_R_R_R(INS_uabdl, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_uabdl, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_uabdl, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// uabdl2 vector
theEmitter->emitIns_R_R_R(INS_uabdl2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_uabdl2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_uabdl2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// uaddl vector
theEmitter->emitIns_R_R_R(INS_uaddl, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_uaddl, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_uaddl, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// uaddl2 vector
theEmitter->emitIns_R_R_R(INS_uaddl2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_uaddl2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_uaddl2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// uaddw vector
theEmitter->emitIns_R_R_R(INS_uaddw, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_uaddw, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_uaddw, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// uaddw2 vector
theEmitter->emitIns_R_R_R(INS_uaddw2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_uaddw2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_uaddw2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// uhadd vector
theEmitter->emitIns_R_R_R(INS_uhadd, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_uhadd, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_uhadd, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_uhadd, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_uhadd, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_uhadd, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// uhsub vector
theEmitter->emitIns_R_R_R(INS_uhsub, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_uhsub, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_uhsub, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_uhsub, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_uhsub, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_uhsub, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// uqadd scalar
theEmitter->emitIns_R_R_R(INS_uqadd, EA_1BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_uqadd, EA_2BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_uqadd, EA_4BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_uqadd, EA_8BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_NONE);
// uqadd vector
theEmitter->emitIns_R_R_R(INS_uqadd, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_uqadd, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_uqadd, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_uqadd, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_uqadd, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_uqadd, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// uqrshl scalar
theEmitter->emitIns_R_R_R(INS_uqrshl, EA_1BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_uqrshl, EA_2BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_uqrshl, EA_4BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_uqrshl, EA_8BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_NONE);
// uqrshl vector
theEmitter->emitIns_R_R_R(INS_uqrshl, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_uqrshl, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_uqrshl, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_uqrshl, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_uqrshl, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_uqrshl, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_uqrshl, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_2D);
// uqshl scalar
theEmitter->emitIns_R_R_R(INS_uqshl, EA_1BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_uqshl, EA_2BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_uqshl, EA_4BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_uqshl, EA_8BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_NONE);
// uqshl vector
theEmitter->emitIns_R_R_R(INS_uqshl, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_uqshl, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_uqshl, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_uqshl, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_uqshl, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_uqshl, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_uqshl, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_2D);
// uqsub scalar
theEmitter->emitIns_R_R_R(INS_uqsub, EA_1BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_uqsub, EA_2BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_uqsub, EA_4BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_uqsub, EA_8BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_NONE);
// uqsub vector
theEmitter->emitIns_R_R_R(INS_uqsub, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_uqsub, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_uqsub, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_uqsub, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_uqsub, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_uqsub, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// urhadd vector
theEmitter->emitIns_R_R_R(INS_urhadd, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_urhadd, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_urhadd, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_urhadd, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_urhadd, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_urhadd, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// usubl vector
theEmitter->emitIns_R_R_R(INS_usubl, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_usubl, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_usubl, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// usubl2 vector
theEmitter->emitIns_R_R_R(INS_usubl2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_usubl2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_usubl2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// usubw vector
theEmitter->emitIns_R_R_R(INS_usubw, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_usubw, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_usubw, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// usubw2 vector
theEmitter->emitIns_R_R_R(INS_usubw2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_usubw2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_usubw2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_R_R vector multiply
//
genDefineTempLabel(genCreateTempLabel());
theEmitter->emitIns_R_R_R(INS_mul, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_mul, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_mul, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_mul, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_mul, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_mul, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_pmul, EA_8BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_pmul, EA_16BYTE, REG_V21, REG_V22, REG_V23, INS_OPTS_16B);
// 'mul' vector by element
theEmitter->emitIns_R_R_R_I(INS_mul, EA_8BYTE, REG_V0, REG_V1, REG_V16, 0, INS_OPTS_2S);
theEmitter->emitIns_R_R_R_I(INS_mul, EA_8BYTE, REG_V2, REG_V3, REG_V15, 1, INS_OPTS_2S);
theEmitter->emitIns_R_R_R_I(INS_mul, EA_8BYTE, REG_V4, REG_V5, REG_V17, 3, INS_OPTS_2S);
theEmitter->emitIns_R_R_R_I(INS_mul, EA_8BYTE, REG_V6, REG_V7, REG_V0, 0, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_mul, EA_8BYTE, REG_V8, REG_V9, REG_V1, 3, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_mul, EA_8BYTE, REG_V10, REG_V11, REG_V2, 7, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_mul, EA_16BYTE, REG_V12, REG_V13, REG_V14, 0, INS_OPTS_4S);
theEmitter->emitIns_R_R_R_I(INS_mul, EA_16BYTE, REG_V14, REG_V15, REG_V18, 1, INS_OPTS_4S);
theEmitter->emitIns_R_R_R_I(INS_mul, EA_16BYTE, REG_V16, REG_V17, REG_V13, 3, INS_OPTS_4S);
theEmitter->emitIns_R_R_R_I(INS_mul, EA_16BYTE, REG_V18, REG_V19, REG_V3, 0, INS_OPTS_8H);
theEmitter->emitIns_R_R_R_I(INS_mul, EA_16BYTE, REG_V20, REG_V21, REG_V4, 3, INS_OPTS_8H);
theEmitter->emitIns_R_R_R_I(INS_mul, EA_16BYTE, REG_V22, REG_V23, REG_V5, 7, INS_OPTS_8H);
// 'mla' vector by element
theEmitter->emitIns_R_R_R_I(INS_mla, EA_8BYTE, REG_V0, REG_V1, REG_V16, 0, INS_OPTS_2S);
theEmitter->emitIns_R_R_R_I(INS_mla, EA_8BYTE, REG_V2, REG_V3, REG_V15, 1, INS_OPTS_2S);
theEmitter->emitIns_R_R_R_I(INS_mla, EA_8BYTE, REG_V4, REG_V5, REG_V17, 3, INS_OPTS_2S);
theEmitter->emitIns_R_R_R_I(INS_mla, EA_8BYTE, REG_V6, REG_V7, REG_V0, 0, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_mla, EA_8BYTE, REG_V8, REG_V9, REG_V1, 3, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_mla, EA_8BYTE, REG_V10, REG_V11, REG_V2, 7, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_mla, EA_16BYTE, REG_V12, REG_V13, REG_V14, 0, INS_OPTS_4S);
theEmitter->emitIns_R_R_R_I(INS_mla, EA_16BYTE, REG_V14, REG_V15, REG_V18, 1, INS_OPTS_4S);
theEmitter->emitIns_R_R_R_I(INS_mla, EA_16BYTE, REG_V16, REG_V17, REG_V13, 3, INS_OPTS_4S);
theEmitter->emitIns_R_R_R_I(INS_mla, EA_16BYTE, REG_V18, REG_V19, REG_V3, 0, INS_OPTS_8H);
theEmitter->emitIns_R_R_R_I(INS_mla, EA_16BYTE, REG_V20, REG_V21, REG_V4, 3, INS_OPTS_8H);
theEmitter->emitIns_R_R_R_I(INS_mla, EA_16BYTE, REG_V22, REG_V23, REG_V5, 7, INS_OPTS_8H);
// 'mls' vector by element
theEmitter->emitIns_R_R_R_I(INS_mls, EA_8BYTE, REG_V0, REG_V1, REG_V16, 0, INS_OPTS_2S);
theEmitter->emitIns_R_R_R_I(INS_mls, EA_8BYTE, REG_V2, REG_V3, REG_V15, 1, INS_OPTS_2S);
theEmitter->emitIns_R_R_R_I(INS_mls, EA_8BYTE, REG_V4, REG_V5, REG_V17, 3, INS_OPTS_2S);
theEmitter->emitIns_R_R_R_I(INS_mls, EA_8BYTE, REG_V6, REG_V7, REG_V0, 0, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_mls, EA_8BYTE, REG_V8, REG_V9, REG_V1, 3, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_mls, EA_8BYTE, REG_V10, REG_V11, REG_V2, 7, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_mls, EA_16BYTE, REG_V12, REG_V13, REG_V14, 0, INS_OPTS_4S);
theEmitter->emitIns_R_R_R_I(INS_mls, EA_16BYTE, REG_V14, REG_V15, REG_V18, 1, INS_OPTS_4S);
theEmitter->emitIns_R_R_R_I(INS_mls, EA_16BYTE, REG_V16, REG_V17, REG_V13, 3, INS_OPTS_4S);
theEmitter->emitIns_R_R_R_I(INS_mls, EA_16BYTE, REG_V18, REG_V19, REG_V3, 0, INS_OPTS_8H);
theEmitter->emitIns_R_R_R_I(INS_mls, EA_16BYTE, REG_V20, REG_V21, REG_V4, 3, INS_OPTS_8H);
theEmitter->emitIns_R_R_R_I(INS_mls, EA_16BYTE, REG_V22, REG_V23, REG_V5, 7, INS_OPTS_8H);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
// pmull vector
theEmitter->emitIns_R_R_R(INS_pmull, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_pmull, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_1D);
// pmull2 vector
theEmitter->emitIns_R_R_R(INS_pmull2, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_pmull2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_2D);
// sdot vector
theEmitter->emitIns_R_R_R_I(INS_sdot, EA_8BYTE, REG_V0, REG_V1, REG_V16, 3, INS_OPTS_2S);
theEmitter->emitIns_R_R_R_I(INS_sdot, EA_16BYTE, REG_V3, REG_V4, REG_V31, 1, INS_OPTS_4S);
// smlal vector
theEmitter->emitIns_R_R_R(INS_smlal, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_smlal, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_smlal, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// smlal2 vector
theEmitter->emitIns_R_R_R(INS_smlal2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_smlal2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_smlal2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// smlsl vector
theEmitter->emitIns_R_R_R(INS_smlsl, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_smlsl, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_smlsl, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// smlsl2 vector
theEmitter->emitIns_R_R_R(INS_smlsl2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_smlsl2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_smlsl2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// smull vector
theEmitter->emitIns_R_R_R(INS_smull, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_smull, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_smull, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// smull2 vector
theEmitter->emitIns_R_R_R(INS_smull2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_smull2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_smull2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// udot vector
theEmitter->emitIns_R_R_R_I(INS_udot, EA_8BYTE, REG_V0, REG_V1, REG_V16, 3, INS_OPTS_2S);
theEmitter->emitIns_R_R_R_I(INS_udot, EA_16BYTE, REG_V3, REG_V4, REG_V31, 1, INS_OPTS_4S);
// umlal vector
theEmitter->emitIns_R_R_R(INS_umlal, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_umlal, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_umlal, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// umlal2 vector
theEmitter->emitIns_R_R_R(INS_umlal2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_umlal2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_umlal2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// umlsl vector
theEmitter->emitIns_R_R_R(INS_umlsl, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_umlsl, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_umlsl, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// umlsl2 vector
theEmitter->emitIns_R_R_R(INS_umlsl2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_umlsl2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_umlsl2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// umull vector
theEmitter->emitIns_R_R_R(INS_umull, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_umull, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_umull, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// umull2 vector
theEmitter->emitIns_R_R_R(INS_umull2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_umull2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_umull2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// smlal vector, by element
theEmitter->emitIns_R_R_R_I(INS_smlal, EA_8BYTE, REG_V0, REG_V1, REG_V2, 3, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_smlal, EA_8BYTE, REG_V3, REG_V4, REG_V5, 1, INS_OPTS_2S);
// smlal2 vector, by element
theEmitter->emitIns_R_R_R_I(INS_smlal2, EA_16BYTE, REG_V6, REG_V7, REG_V8, 7, INS_OPTS_8H);
theEmitter->emitIns_R_R_R_I(INS_smlal2, EA_16BYTE, REG_V9, REG_V10, REG_V11, 3, INS_OPTS_4S);
// smlsl vector, by element
theEmitter->emitIns_R_R_R_I(INS_smlsl, EA_8BYTE, REG_V0, REG_V1, REG_V2, 3, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_smlsl, EA_8BYTE, REG_V3, REG_V4, REG_V5, 1, INS_OPTS_2S);
// smlsl2 vector, by element
theEmitter->emitIns_R_R_R_I(INS_smlsl2, EA_16BYTE, REG_V6, REG_V7, REG_V8, 7, INS_OPTS_8H);
theEmitter->emitIns_R_R_R_I(INS_smlsl2, EA_16BYTE, REG_V9, REG_V10, REG_V11, 3, INS_OPTS_4S);
// smull vector, by element
theEmitter->emitIns_R_R_R_I(INS_smull, EA_8BYTE, REG_V0, REG_V1, REG_V2, 3, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_smull, EA_8BYTE, REG_V3, REG_V4, REG_V5, 1, INS_OPTS_2S);
// smull2 vector, by element
theEmitter->emitIns_R_R_R_I(INS_smull2, EA_16BYTE, REG_V6, REG_V7, REG_V8, 7, INS_OPTS_8H);
theEmitter->emitIns_R_R_R_I(INS_smull2, EA_16BYTE, REG_V9, REG_V10, REG_V11, 3, INS_OPTS_4S);
// sqdmlal scalar, by element
theEmitter->emitIns_R_R_R_I(INS_sqdmlal, EA_2BYTE, REG_V0, REG_V1, REG_V2, 7, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R_I(INS_sqdmlal, EA_4BYTE, REG_V3, REG_V4, REG_V5, 3, INS_OPTS_NONE);
// sqdmlal vector, by element
theEmitter->emitIns_R_R_R_I(INS_sqdmlal, EA_8BYTE, REG_V0, REG_V1, REG_V2, 7, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_sqdmlal, EA_8BYTE, REG_V3, REG_V4, REG_V5, 3, INS_OPTS_2S);
// sqdmlal2 vector, by element
theEmitter->emitIns_R_R_R_I(INS_sqdmlal2, EA_16BYTE, REG_V6, REG_V7, REG_V8, 7, INS_OPTS_8H);
theEmitter->emitIns_R_R_R_I(INS_sqdmlal2, EA_16BYTE, REG_V9, REG_V10, REG_V11, 3, INS_OPTS_4S);
// sqdmlsl scalar, by element
theEmitter->emitIns_R_R_R_I(INS_sqdmlsl, EA_2BYTE, REG_V0, REG_V1, REG_V2, 7, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R_I(INS_sqdmlsl, EA_4BYTE, REG_V3, REG_V4, REG_V5, 3, INS_OPTS_NONE);
// sqdmlsl vector, by element
theEmitter->emitIns_R_R_R_I(INS_sqdmlsl, EA_8BYTE, REG_V0, REG_V1, REG_V2, 7, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_sqdmlsl, EA_8BYTE, REG_V3, REG_V4, REG_V5, 3, INS_OPTS_2S);
// sqdmlsl2 vector, by element
theEmitter->emitIns_R_R_R_I(INS_sqdmlsl2, EA_16BYTE, REG_V6, REG_V7, REG_V8, 7, INS_OPTS_8H);
theEmitter->emitIns_R_R_R_I(INS_sqdmlsl2, EA_16BYTE, REG_V9, REG_V10, REG_V11, 3, INS_OPTS_4S);
// sqdmulh scalar
theEmitter->emitIns_R_R_R_I(INS_sqdmulh, EA_2BYTE, REG_V0, REG_V1, REG_V2, 7, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R_I(INS_sqdmulh, EA_4BYTE, REG_V3, REG_V4, REG_V5, 3, INS_OPTS_NONE);
// sqdmulh vector
theEmitter->emitIns_R_R_R_I(INS_sqdmulh, EA_8BYTE, REG_V0, REG_V1, REG_V2, 7, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_sqdmulh, EA_8BYTE, REG_V3, REG_V4, REG_V5, 3, INS_OPTS_2S);
theEmitter->emitIns_R_R_R_I(INS_sqdmulh, EA_16BYTE, REG_V6, REG_V7, REG_V8, 7, INS_OPTS_8H);
theEmitter->emitIns_R_R_R_I(INS_sqdmulh, EA_16BYTE, REG_V9, REG_V10, REG_V11, 3, INS_OPTS_4S);
// sqdmull scalar, by element
theEmitter->emitIns_R_R_R_I(INS_sqdmull, EA_2BYTE, REG_V0, REG_V1, REG_V2, 7, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R_I(INS_sqdmull, EA_4BYTE, REG_V3, REG_V4, REG_V5, 3, INS_OPTS_NONE);
// sqdmull vector, by element
theEmitter->emitIns_R_R_R_I(INS_sqdmull, EA_8BYTE, REG_V0, REG_V1, REG_V2, 7, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_sqdmull, EA_8BYTE, REG_V3, REG_V4, REG_V5, 3, INS_OPTS_2S);
// sqdmull2 vector, by element
theEmitter->emitIns_R_R_R_I(INS_sqdmull2, EA_16BYTE, REG_V6, REG_V7, REG_V8, 7, INS_OPTS_8H);
theEmitter->emitIns_R_R_R_I(INS_sqdmull2, EA_16BYTE, REG_V9, REG_V10, REG_V11, 3, INS_OPTS_4S);
// sqrdmlah scalar
theEmitter->emitIns_R_R_R_I(INS_sqrdmlah, EA_2BYTE, REG_V0, REG_V1, REG_V2, 7, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R_I(INS_sqrdmlah, EA_4BYTE, REG_V3, REG_V4, REG_V5, 3, INS_OPTS_NONE);
// sqdrmlah vector
theEmitter->emitIns_R_R_R_I(INS_sqrdmlah, EA_8BYTE, REG_V0, REG_V1, REG_V2, 7, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_sqrdmlah, EA_8BYTE, REG_V3, REG_V4, REG_V5, 3, INS_OPTS_2S);
theEmitter->emitIns_R_R_R_I(INS_sqrdmlah, EA_16BYTE, REG_V6, REG_V7, REG_V8, 7, INS_OPTS_8H);
theEmitter->emitIns_R_R_R_I(INS_sqrdmlah, EA_16BYTE, REG_V9, REG_V10, REG_V11, 3, INS_OPTS_4S);
// sqrdmlsh scalar
theEmitter->emitIns_R_R_R_I(INS_sqrdmlsh, EA_2BYTE, REG_V0, REG_V1, REG_V2, 7, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R_I(INS_sqrdmlsh, EA_4BYTE, REG_V3, REG_V4, REG_V5, 3, INS_OPTS_NONE);
// sqdrmlsh vector
theEmitter->emitIns_R_R_R_I(INS_sqrdmlsh, EA_8BYTE, REG_V0, REG_V1, REG_V2, 7, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_sqrdmlsh, EA_8BYTE, REG_V3, REG_V4, REG_V5, 3, INS_OPTS_2S);
theEmitter->emitIns_R_R_R_I(INS_sqrdmlsh, EA_16BYTE, REG_V6, REG_V7, REG_V8, 7, INS_OPTS_8H);
theEmitter->emitIns_R_R_R_I(INS_sqrdmlsh, EA_16BYTE, REG_V9, REG_V10, REG_V11, 3, INS_OPTS_4S);
// sqrdmulh scalar
theEmitter->emitIns_R_R_R_I(INS_sqrdmulh, EA_2BYTE, REG_V0, REG_V1, REG_V2, 7, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R_I(INS_sqrdmulh, EA_4BYTE, REG_V3, REG_V4, REG_V5, 3, INS_OPTS_NONE);
// sqdrmulh vector
theEmitter->emitIns_R_R_R_I(INS_sqrdmulh, EA_8BYTE, REG_V0, REG_V1, REG_V2, 7, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_sqrdmulh, EA_8BYTE, REG_V3, REG_V4, REG_V5, 3, INS_OPTS_2S);
theEmitter->emitIns_R_R_R_I(INS_sqrdmulh, EA_16BYTE, REG_V6, REG_V7, REG_V8, 7, INS_OPTS_8H);
theEmitter->emitIns_R_R_R_I(INS_sqrdmulh, EA_16BYTE, REG_V9, REG_V10, REG_V11, 3, INS_OPTS_4S);
// umlal vector, by element
theEmitter->emitIns_R_R_R_I(INS_umlal, EA_8BYTE, REG_V0, REG_V1, REG_V2, 3, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_umlal, EA_8BYTE, REG_V3, REG_V4, REG_V5, 1, INS_OPTS_2S);
// umlal2 vector, by element
theEmitter->emitIns_R_R_R_I(INS_umlal2, EA_16BYTE, REG_V6, REG_V7, REG_V8, 7, INS_OPTS_8H);
theEmitter->emitIns_R_R_R_I(INS_umlal2, EA_16BYTE, REG_V9, REG_V10, REG_V11, 3, INS_OPTS_4S);
// umlsl vector, by element
theEmitter->emitIns_R_R_R_I(INS_umlsl, EA_8BYTE, REG_V0, REG_V1, REG_V2, 3, INS_OPTS_4H);
// umlsl2 vector, by element
theEmitter->emitIns_R_R_R_I(INS_umlsl2, EA_16BYTE, REG_V6, REG_V7, REG_V8, 7, INS_OPTS_8H);
theEmitter->emitIns_R_R_R_I(INS_umlsl2, EA_16BYTE, REG_V9, REG_V10, REG_V11, 3, INS_OPTS_4S);
// umull vector, by element
theEmitter->emitIns_R_R_R_I(INS_umull, EA_8BYTE, REG_V0, REG_V1, REG_V2, 3, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_umull, EA_8BYTE, REG_V3, REG_V4, REG_V5, 1, INS_OPTS_2S);
// umull2 vector, by element
theEmitter->emitIns_R_R_R_I(INS_umull2, EA_16BYTE, REG_V6, REG_V7, REG_V8, 7, INS_OPTS_8H);
theEmitter->emitIns_R_R_R_I(INS_umull2, EA_16BYTE, REG_V9, REG_V10, REG_V11, 3, INS_OPTS_4S);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_R_R floating point operations, one source/dest, and two source
//
genDefineTempLabel(genCreateTempLabel());
theEmitter->emitIns_R_R_R(INS_fmla, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_fmla, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fmla, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2D);
theEmitter->emitIns_R_R_R_I(INS_fmla, EA_4BYTE, REG_V15, REG_V16, REG_V17, 3); // scalar by element 4BYTE
theEmitter->emitIns_R_R_R_I(INS_fmla, EA_8BYTE, REG_V18, REG_V19, REG_V20, 1); // scalar by element 8BYTE
theEmitter->emitIns_R_R_R_I(INS_fmla, EA_8BYTE, REG_V21, REG_V22, REG_V23, 0, INS_OPTS_2S);
theEmitter->emitIns_R_R_R_I(INS_fmla, EA_16BYTE, REG_V24, REG_V25, REG_V26, 2, INS_OPTS_4S);
theEmitter->emitIns_R_R_R_I(INS_fmla, EA_16BYTE, REG_V27, REG_V28, REG_V29, 0, INS_OPTS_2D);
theEmitter->emitIns_R_R_R(INS_fmls, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_fmls, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fmls, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2D);
theEmitter->emitIns_R_R_R_I(INS_fmls, EA_4BYTE, REG_V15, REG_V16, REG_V17, 3); // scalar by element 4BYTE
theEmitter->emitIns_R_R_R_I(INS_fmls, EA_8BYTE, REG_V18, REG_V19, REG_V20, 1); // scalar by element 8BYTE
theEmitter->emitIns_R_R_R_I(INS_fmls, EA_8BYTE, REG_V21, REG_V22, REG_V23, 0, INS_OPTS_2S);
theEmitter->emitIns_R_R_R_I(INS_fmls, EA_16BYTE, REG_V24, REG_V25, REG_V26, 2, INS_OPTS_4S);
theEmitter->emitIns_R_R_R_I(INS_fmls, EA_16BYTE, REG_V27, REG_V28, REG_V29, 0, INS_OPTS_2D);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_R_R_R floating point operations, one dest, and three source
//
theEmitter->emitIns_R_R_R_R(INS_fmadd, EA_4BYTE, REG_V0, REG_V8, REG_V16, REG_V24);
theEmitter->emitIns_R_R_R_R(INS_fmsub, EA_4BYTE, REG_V1, REG_V9, REG_V17, REG_V25);
theEmitter->emitIns_R_R_R_R(INS_fnmadd, EA_4BYTE, REG_V2, REG_V10, REG_V18, REG_V26);
theEmitter->emitIns_R_R_R_R(INS_fnmsub, EA_4BYTE, REG_V3, REG_V11, REG_V19, REG_V27);
theEmitter->emitIns_R_R_R_R(INS_fmadd, EA_8BYTE, REG_V4, REG_V12, REG_V20, REG_V28);
theEmitter->emitIns_R_R_R_R(INS_fmsub, EA_8BYTE, REG_V5, REG_V13, REG_V21, REG_V29);
theEmitter->emitIns_R_R_R_R(INS_fnmadd, EA_8BYTE, REG_V6, REG_V14, REG_V22, REG_V30);
theEmitter->emitIns_R_R_R_R(INS_fnmsub, EA_8BYTE, REG_V7, REG_V15, REG_V23, REG_V31);
#endif
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
BasicBlock* label = genCreateTempLabel();
genDefineTempLabel(label);
instGen(INS_nop);
instGen(INS_nop);
instGen(INS_nop);
instGen(INS_nop);
theEmitter->emitIns_R_L(INS_adr, EA_4BYTE_DSP_RELOC, label, REG_R0);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
printf("*************** End of genArm64EmitterUnitTests()\n");
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
}
#endif // defined(DEBUG)
//------------------------------------------------------------------------
// genEstablishFramePointer: Set up the frame pointer by adding an offset to the stack pointer.
//
// Arguments:
// delta - the offset to add to the current stack pointer to establish the frame pointer
// reportUnwindData - true if establishing the frame pointer should be reported in the OS unwind data.
//
void CodeGen::genEstablishFramePointer(int delta, bool reportUnwindData)
{
assert(compiler->compGeneratingProlog);
if (delta == 0)
{
GetEmitter()->emitIns_Mov(INS_mov, EA_PTRSIZE, REG_FPBASE, REG_SPBASE, /* canSkip */ false);
}
else
{
GetEmitter()->emitIns_R_R_I(INS_add, EA_PTRSIZE, REG_FPBASE, REG_SPBASE, delta);
}
if (reportUnwindData)
{
compiler->unwindSetFrameReg(REG_FPBASE, delta);
}
}
//------------------------------------------------------------------------
// genAllocLclFrame: Probe the stack.
//
// Notes:
// This only does the probing; allocating the frame is done when callee-saved registers are saved.
// This is done before anything has been pushed. The previous frame might have a large outgoing argument
// space that has been allocated, but the lowest addresses have not been touched. Our frame setup might
// not touch up to the first 504 bytes. This means we could miss a guard page. On Windows, however,
// there are always three guard pages, so we will not miss them all. On Linux, there is only one guard
// page by default, so we need to be more careful. We do an extra probe if we might not have probed
// recently enough. That is, if a call and prolog establishment might lead to missing a page. We do this
// on Windows as well just to be consistent, even though it should not be necessary.
//
// Arguments:
// frameSize - the size of the stack frame being allocated.
// initReg - register to use as a scratch register.
// pInitRegZeroed - OUT parameter. *pInitRegZeroed is set to 'false' if and only if
// this call sets 'initReg' to a non-zero value. Otherwise, it is unchanged.
// maskArgRegsLiveIn - incoming argument registers that are currently live.
//
// Return value:
// None
//
void CodeGen::genAllocLclFrame(unsigned frameSize, regNumber initReg, bool* pInitRegZeroed, regMaskTP maskArgRegsLiveIn)
{
assert(compiler->compGeneratingProlog);
if (frameSize == 0)
{
return;
}
const target_size_t pageSize = compiler->eeGetPageSize();
// What offset from the final SP was the last probe? If we haven't probed almost a complete page, and
// if the next action on the stack might subtract from SP first, before touching the current SP, then
// we do one more probe at the very bottom. This can happen if we call a function on arm64 that does
// a "STP fp, lr, [sp-504]!", that is, pre-decrement SP then store. Note that we probe here for arm64,
// but we don't alter SP.
target_size_t lastTouchDelta = 0;
assert(!compiler->info.compPublishStubParam || (REG_SECRET_STUB_PARAM != initReg));
if (frameSize < pageSize)
{
lastTouchDelta = frameSize;
}
else if (frameSize < 3 * pageSize)
{
// The probing loop in "else"-case below would require at least 6 instructions (and more if
// 'frameSize' or 'pageSize' can not be encoded with mov-instruction immediate).
// Hence for frames that are smaller than 3 * PAGE_SIZE the JIT inlines the following probing code
// to decrease code size.
// TODO-ARM64: The probing mechanisms should be replaced by a call to stack probe helper
// as it is done on other platforms.
lastTouchDelta = frameSize;
for (target_size_t probeOffset = pageSize; probeOffset <= frameSize; probeOffset += pageSize)
{
// Generate:
// movw initReg, -probeOffset
// ldr wzr, [sp + initReg]
instGen_Set_Reg_To_Imm(EA_PTRSIZE, initReg, -(ssize_t)probeOffset);
GetEmitter()->emitIns_R_R_R(INS_ldr, EA_4BYTE, REG_ZR, REG_SPBASE, initReg);
regSet.verifyRegUsed(initReg);
*pInitRegZeroed = false; // The initReg does not contain zero
lastTouchDelta -= pageSize;
}
assert(lastTouchDelta == frameSize % pageSize);
compiler->unwindPadding();
}
else
{
// Emit the following sequence to 'tickle' the pages. Note it is important that stack pointer not change
// until this is complete since the tickles could cause a stack overflow, and we need to be able to crawl
// the stack afterward (which means the stack pointer needs to be known).
regMaskTP availMask = RBM_ALLINT & (regSet.rsGetModifiedRegsMask() | ~RBM_INT_CALLEE_SAVED);
availMask &= ~maskArgRegsLiveIn; // Remove all of the incoming argument registers as they are currently live
availMask &= ~genRegMask(initReg); // Remove the pre-calculated initReg
regNumber rOffset = initReg;
regNumber rLimit;
regMaskTP tempMask;
// We pick the next lowest register number for rLimit
noway_assert(availMask != RBM_NONE);
tempMask = genFindLowestBit(availMask);
rLimit = genRegNumFromMask(tempMask);
// Generate:
//
// mov rOffset, -pageSize // On arm, this turns out to be "movw r1, 0xf000; sxth r1, r1".
// // We could save 4 bytes in the prolog by using "movs r1, 0" at the
// // runtime expense of running a useless first loop iteration.
// mov rLimit, -frameSize
// loop:
// ldr wzr, [sp + rOffset]
// sub rOffset, pageSize
// cmp rLimit, rOffset
// b.ls loop // If rLimit is lower or same, we need to probe this rOffset. Note
// // especially that if it is the same, we haven't probed this page.
noway_assert((ssize_t)(int)frameSize == (ssize_t)frameSize); // make sure framesize safely fits within an int
instGen_Set_Reg_To_Imm(EA_PTRSIZE, rOffset, -(ssize_t)pageSize);
instGen_Set_Reg_To_Imm(EA_PTRSIZE, rLimit, -(ssize_t)frameSize);
// There's a "virtual" label here. But we can't create a label in the prolog, so we use the magic
// `emitIns_J` with a negative `instrCount` to branch back a specific number of instructions.
GetEmitter()->emitIns_R_R_R(INS_ldr, EA_4BYTE, REG_ZR, REG_SPBASE, rOffset);
GetEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, rOffset, rOffset, pageSize);
GetEmitter()->emitIns_R_R(INS_cmp, EA_PTRSIZE, rLimit, rOffset); // If equal, we need to probe again
GetEmitter()->emitIns_J(INS_bls, NULL, -4);
*pInitRegZeroed = false; // The initReg does not contain zero
compiler->unwindPadding();
lastTouchDelta = frameSize % pageSize;
}
if (lastTouchDelta + STACK_PROBE_BOUNDARY_THRESHOLD_BYTES > pageSize)
{
assert(lastTouchDelta + STACK_PROBE_BOUNDARY_THRESHOLD_BYTES < 2 * pageSize);
instGen_Set_Reg_To_Imm(EA_PTRSIZE, initReg, -(ssize_t)frameSize);
GetEmitter()->emitIns_R_R_R(INS_ldr, EA_4BYTE, REG_ZR, REG_SPBASE, initReg);
compiler->unwindPadding();
regSet.verifyRegUsed(initReg);
*pInitRegZeroed = false; // The initReg does not contain zero
}
}
//-----------------------------------------------------------------------------------
// instGen_MemoryBarrier: Emit a MemoryBarrier instruction
//
// Arguments:
// barrierKind - kind of barrier to emit (Full or Load-Only).
//
// Notes:
// All MemoryBarriers instructions can be removed by DOTNET_JitNoMemoryBarriers=1
//
void CodeGen::instGen_MemoryBarrier(BarrierKind barrierKind)
{
#ifdef DEBUG
if (JitConfig.JitNoMemoryBarriers() == 1)
{
return;
}
#endif // DEBUG
// Avoid emitting redundant memory barriers on arm64 if they belong to the same IG
// and there were no memory accesses in-between them
emitter::instrDesc* lastMemBarrier = GetEmitter()->emitLastMemBarrier;
if ((lastMemBarrier != nullptr) && compiler->opts.OptimizationEnabled())
{
BarrierKind prevBarrierKind = BARRIER_FULL;
if (lastMemBarrier->idSmallCns() == INS_BARRIER_ISHLD)
{
prevBarrierKind = BARRIER_LOAD_ONLY;
}
else
{
// Currently we only emit two kinds of barriers on arm64:
// ISH - Full (inner shareable domain)
// ISHLD - LoadOnly (inner shareable domain)
assert(lastMemBarrier->idSmallCns() == INS_BARRIER_ISH);
}
if ((prevBarrierKind == BARRIER_LOAD_ONLY) && (barrierKind == BARRIER_FULL))
{
// Previous memory barrier: load-only, current: full
// Upgrade the previous one to full
assert((prevBarrierKind == BARRIER_LOAD_ONLY) && (barrierKind == BARRIER_FULL));
lastMemBarrier->idSmallCns(INS_BARRIER_ISH);
}
}
else
{
GetEmitter()->emitIns_BARR(INS_dmb, barrierKind == BARRIER_LOAD_ONLY ? INS_BARRIER_ISHLD : INS_BARRIER_ISH);
}
}
//-----------------------------------------------------------------------------------
// genCodeForMadd: Emit a madd (Multiply-Add) instruction
//
// Arguments:
// tree - GT_MADD tree where op1 or op2 is GT_ADD
//
void CodeGen::genCodeForMadd(GenTreeOp* tree)
{
assert(tree->OperIs(GT_MADD) && varTypeIsIntegral(tree) && !(tree->gtFlags & GTF_SET_FLAGS));
genConsumeOperands(tree);
GenTree* a;
GenTree* b;
GenTree* c;
if (tree->gtGetOp1()->OperIs(GT_MUL) && tree->gtGetOp1()->isContained())
{
a = tree->gtGetOp1()->gtGetOp1();
b = tree->gtGetOp1()->gtGetOp2();
c = tree->gtGetOp2();
}
else
{
assert(tree->gtGetOp2()->OperIs(GT_MUL) && tree->gtGetOp2()->isContained());
a = tree->gtGetOp2()->gtGetOp1();
b = tree->gtGetOp2()->gtGetOp2();
c = tree->gtGetOp1();
}
bool useMsub = false;
if (a->OperIs(GT_NEG) && a->isContained())
{
a = a->gtGetOp1();
useMsub = true;
}
if (b->OperIs(GT_NEG) && b->isContained())
{
b = b->gtGetOp1();
useMsub = !useMsub; // it's either "a * -b" or "-a * -b" which is the same as "a * b"
}
GetEmitter()->emitIns_R_R_R_R(useMsub ? INS_msub : INS_madd, emitActualTypeSize(tree), tree->GetRegNum(),
a->GetRegNum(), b->GetRegNum(), c->GetRegNum());
genProduceReg(tree);
}
//-----------------------------------------------------------------------------------
// genCodeForMsub: Emit a msub (Multiply-Subtract) instruction
//
// Arguments:
// tree - GT_MSUB tree where op2 is GT_MUL
//
void CodeGen::genCodeForMsub(GenTreeOp* tree)
{
assert(tree->OperIs(GT_MSUB) && varTypeIsIntegral(tree) && !(tree->gtFlags & GTF_SET_FLAGS));
genConsumeOperands(tree);
assert(tree->gtGetOp2()->OperIs(GT_MUL));
assert(tree->gtGetOp2()->isContained());
GenTree* a = tree->gtGetOp1();
GenTree* b = tree->gtGetOp2()->gtGetOp1();
GenTree* c = tree->gtGetOp2()->gtGetOp2();
// d = a - b * c
// MSUB d, b, c, a
GetEmitter()->emitIns_R_R_R_R(INS_msub, emitActualTypeSize(tree), tree->GetRegNum(), b->GetRegNum(), c->GetRegNum(),
a->GetRegNum());
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genCodeForBfiz: Generates the code sequence for a GenTree node that
// represents a bitfield insert in zero with sign/zero extension.
//
// Arguments:
// tree - the bitfield insert in zero node.
//
void CodeGen::genCodeForBfiz(GenTreeOp* tree)
{
assert(tree->OperIs(GT_BFIZ));
emitAttr size = emitActualTypeSize(tree);
unsigned shiftBy = (unsigned)tree->gtGetOp2()->AsIntCon()->IconValue();
unsigned shiftByImm = shiftBy & (emitter::getBitWidth(size) - 1);
GenTreeCast* cast = tree->gtGetOp1()->AsCast();
GenTree* castOp = cast->CastOp();
genConsumeRegs(castOp);
unsigned srcBits = varTypeIsSmall(cast->CastToType()) ? genTypeSize(cast->CastToType()) * BITS_PER_BYTE
: genTypeSize(castOp) * BITS_PER_BYTE;
const bool isUnsigned = cast->IsUnsigned() || varTypeIsUnsigned(cast->CastToType());
GetEmitter()->emitIns_R_R_I_I(isUnsigned ? INS_ubfiz : INS_sbfiz, size, tree->GetRegNum(), castOp->GetRegNum(),
(int)shiftByImm, (int)srcBits);
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genCodeForAddEx: Generates the code sequence for a GenTree node that
// represents an addition with sign or zero extended
//
// Arguments:
// tree - the add with extend node.
//
void CodeGen::genCodeForAddEx(GenTreeOp* tree)
{
assert(tree->OperIs(GT_ADDEX));
genConsumeOperands(tree);
GenTree* op;
GenTree* containedOp;
if (tree->gtGetOp1()->isContained())
{
containedOp = tree->gtGetOp1();
op = tree->gtGetOp2();
}
else
{
containedOp = tree->gtGetOp2();
op = tree->gtGetOp1();
}
assert(containedOp->isContained() && !op->isContained());
regNumber dstReg = tree->GetRegNum();
regNumber op1Reg = op->GetRegNum();
regNumber op2Reg = containedOp->gtGetOp1()->GetRegNum();
if (containedOp->OperIs(GT_CAST))
{
GenTreeCast* cast = containedOp->AsCast();
assert(varTypeIsLong(cast->CastToType()));
insOpts opts = cast->IsUnsigned() ? INS_OPTS_UXTW : INS_OPTS_SXTW;
GetEmitter()->emitIns_R_R_R(tree->gtSetFlags() ? INS_adds : INS_add, emitActualTypeSize(tree), dstReg, op1Reg,
op2Reg, opts);
}
else
{
assert(containedOp->OperIs(GT_LSH));
ssize_t cns = containedOp->gtGetOp2()->AsIntCon()->IconValue();
GetEmitter()->emitIns_R_R_R_I(tree->gtSetFlags() ? INS_adds : INS_add, emitActualTypeSize(tree), dstReg, op1Reg,
op2Reg, cns, INS_OPTS_LSL);
}
genProduceReg(tree);
}
#endif // TARGET_ARM64
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Arm64 Code Generator XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#ifdef TARGET_ARM64
#include "emit.h"
#include "codegen.h"
#include "lower.h"
#include "gcinfo.h"
#include "gcinfoencoder.h"
#include "patchpointinfo.h"
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Prolog / Epilog XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
void CodeGen::genPopCalleeSavedRegistersAndFreeLclFrame(bool jmpEpilog)
{
assert(compiler->compGeneratingEpilog);
regMaskTP rsRestoreRegs = regSet.rsGetModifiedRegsMask() & RBM_CALLEE_SAVED;
if (isFramePointerUsed())
{
rsRestoreRegs |= RBM_FPBASE;
}
rsRestoreRegs |= RBM_LR; // We must save/restore the return address (in the LR register)
regMaskTP regsToRestoreMask = rsRestoreRegs;
const int totalFrameSize = genTotalFrameSize();
// Fetch info about the frame we saved when creating the prolog.
//
const int frameType = compiler->compFrameInfo.frameType;
const int calleeSaveSpOffset = compiler->compFrameInfo.calleeSaveSpOffset;
const int calleeSaveSpDelta = compiler->compFrameInfo.calleeSaveSpDelta;
const int offsetSpToSavedFp = compiler->compFrameInfo.offsetSpToSavedFp;
switch (frameType)
{
case 1:
{
JITDUMP("Frame type 1. #outsz=0; #framesz=%d; localloc? %s\n", totalFrameSize,
dspBool(compiler->compLocallocUsed));
if (compiler->compLocallocUsed)
{
// Restore sp from fp
// mov sp, fp
inst_Mov(TYP_I_IMPL, REG_SPBASE, REG_FPBASE, /* canSkip */ false);
compiler->unwindSetFrameReg(REG_FPBASE, 0);
}
regsToRestoreMask &= ~(RBM_FP | RBM_LR); // We'll restore FP/LR at the end, and post-index SP.
break;
}
case 2:
{
JITDUMP("Frame type 2 (save FP/LR at bottom). #outsz=%d; #framesz=%d; localloc? %s\n",
unsigned(compiler->lvaOutgoingArgSpaceSize), totalFrameSize, dspBool(compiler->compLocallocUsed));
assert(!genSaveFpLrWithAllCalleeSavedRegisters);
if (compiler->compLocallocUsed)
{
// Restore sp from fp
// sub sp, fp, #outsz // Uses #outsz if FP/LR stored at bottom
int SPtoFPdelta = genSPtoFPdelta();
GetEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, REG_SPBASE, REG_FPBASE, SPtoFPdelta);
compiler->unwindSetFrameReg(REG_FPBASE, SPtoFPdelta);
}
regsToRestoreMask &= ~(RBM_FP | RBM_LR); // We'll restore FP/LR at the end, and post-index SP.
break;
}
case 3:
{
JITDUMP("Frame type 3 (save FP/LR at bottom). #outsz=%d; #framesz=%d; localloc? %s\n",
unsigned(compiler->lvaOutgoingArgSpaceSize), totalFrameSize, dspBool(compiler->compLocallocUsed));
assert(!genSaveFpLrWithAllCalleeSavedRegisters);
JITDUMP(" calleeSaveSpDelta=%d\n", calleeSaveSpDelta);
regsToRestoreMask &= ~(RBM_FP | RBM_LR); // We'll restore FP/LR at the end, and (hopefully) post-index SP.
int remainingFrameSz = totalFrameSize - calleeSaveSpDelta;
assert(remainingFrameSz > 0);
if (compiler->lvaOutgoingArgSpaceSize > 504)
{
// We can't do "ldp fp,lr,[sp,#outsz]" because #outsz is too big.
// If compiler->lvaOutgoingArgSpaceSize is not aligned, we need to align the SP adjustment.
assert(remainingFrameSz > (int)compiler->lvaOutgoingArgSpaceSize);
int spAdjustment2Unaligned = remainingFrameSz - compiler->lvaOutgoingArgSpaceSize;
int spAdjustment2 = (int)roundUp((unsigned)spAdjustment2Unaligned, STACK_ALIGN);
int alignmentAdjustment2 = spAdjustment2 - spAdjustment2Unaligned;
assert((alignmentAdjustment2 == 0) || (alignmentAdjustment2 == REGSIZE_BYTES));
// Restore sp from fp. No need to update sp after this since we've set up fp before adjusting sp
// in prolog.
// sub sp, fp, #alignmentAdjustment2
GetEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, REG_SPBASE, REG_FPBASE, alignmentAdjustment2);
compiler->unwindSetFrameReg(REG_FPBASE, alignmentAdjustment2);
// Generate:
// ldp fp,lr,[sp]
// add sp,sp,#remainingFrameSz
JITDUMP(" alignmentAdjustment2=%d\n", alignmentAdjustment2);
genEpilogRestoreRegPair(REG_FP, REG_LR, alignmentAdjustment2, spAdjustment2, false, REG_IP1, nullptr);
}
else
{
if (compiler->compLocallocUsed)
{
// Restore sp from fp; here that's #outsz from SP
// sub sp, fp, #outsz
int SPtoFPdelta = genSPtoFPdelta();
assert(SPtoFPdelta == (int)compiler->lvaOutgoingArgSpaceSize);
GetEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, REG_SPBASE, REG_FPBASE, SPtoFPdelta);
compiler->unwindSetFrameReg(REG_FPBASE, SPtoFPdelta);
}
// Generate:
// ldp fp,lr,[sp,#outsz]
// add sp,sp,#remainingFrameSz ; might need to load this constant in a scratch register if
// ; it's large
JITDUMP(" remainingFrameSz=%d\n", remainingFrameSz);
genEpilogRestoreRegPair(REG_FP, REG_LR, compiler->lvaOutgoingArgSpaceSize, remainingFrameSz, false,
REG_IP1, nullptr);
}
// Unlike frameType=1 or frameType=2 that restore SP at the end,
// frameType=3 already adjusted SP above to delete local frame.
// There is at most one alignment slot between SP and where we store the callee-saved registers.
assert((calleeSaveSpOffset == 0) || (calleeSaveSpOffset == REGSIZE_BYTES));
break;
}
case 4:
{
JITDUMP("Frame type 4 (save FP/LR at top). #outsz=%d; #framesz=%d; localloc? %s\n",
unsigned(compiler->lvaOutgoingArgSpaceSize), totalFrameSize, dspBool(compiler->compLocallocUsed));
assert(genSaveFpLrWithAllCalleeSavedRegisters);
if (compiler->compLocallocUsed)
{
// Restore sp from fp
// sub sp, fp, #outsz // Uses #outsz if FP/LR stored at bottom
int SPtoFPdelta = genSPtoFPdelta();
GetEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, REG_SPBASE, REG_FPBASE, SPtoFPdelta);
compiler->unwindSetFrameReg(REG_FPBASE, SPtoFPdelta);
}
break;
}
case 5:
{
JITDUMP("Frame type 5 (save FP/LR at top). #outsz=%d; #framesz=%d; localloc? %s\n",
unsigned(compiler->lvaOutgoingArgSpaceSize), totalFrameSize, dspBool(compiler->compLocallocUsed));
assert((calleeSaveSpOffset == 0) || (calleeSaveSpOffset == REGSIZE_BYTES));
// Restore sp from fp:
// sub sp, fp, #sp-to-fp-delta
// This is the same whether there is localloc or not. Note that we don't need to do anything to remove the
// "remainingFrameSz" to reverse the SUB of that amount in the prolog.
GetEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, REG_SPBASE, REG_FPBASE, offsetSpToSavedFp);
compiler->unwindSetFrameReg(REG_FPBASE, offsetSpToSavedFp);
break;
}
default:
unreached();
}
JITDUMP(" calleeSaveSpOffset=%d, calleeSaveSpDelta=%d\n", calleeSaveSpOffset, calleeSaveSpDelta);
genRestoreCalleeSavedRegistersHelp(regsToRestoreMask, calleeSaveSpOffset, calleeSaveSpDelta);
switch (frameType)
{
case 1:
{
// Generate:
// ldp fp,lr,[sp],#framesz
GetEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, totalFrameSize,
INS_OPTS_POST_INDEX);
compiler->unwindSaveRegPairPreindexed(REG_FP, REG_LR, -totalFrameSize);
break;
}
case 2:
{
// Generate:
// ldr fp,lr,[sp,#outsz]
// add sp,sp,#framesz
GetEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE,
compiler->lvaOutgoingArgSpaceSize);
compiler->unwindSaveRegPair(REG_FP, REG_LR, compiler->lvaOutgoingArgSpaceSize);
GetEmitter()->emitIns_R_R_I(INS_add, EA_PTRSIZE, REG_SPBASE, REG_SPBASE, totalFrameSize);
compiler->unwindAllocStack(totalFrameSize);
break;
}
case 3:
case 4:
case 5:
{
// Nothing to do after restoring callee-saved registers.
break;
}
default:
{
unreached();
}
}
// For OSR, we must also adjust the SP to remove the Tier0 frame.
//
if (compiler->opts.IsOSR())
{
PatchpointInfo* const patchpointInfo = compiler->info.compPatchpointInfo;
const int tier0FrameSize = patchpointInfo->TotalFrameSize();
JITDUMP("Extra SP adjust for OSR to pop off Tier0 frame: %d bytes\n", tier0FrameSize);
// Tier0 size may exceed simple immediate. We're in the epilog so not clear if we can
// use a scratch reg. So just do two subtracts if necessary.
//
int spAdjust = tier0FrameSize;
if (!GetEmitter()->emitIns_valid_imm_for_add(tier0FrameSize, EA_PTRSIZE))
{
const int lowPart = spAdjust & 0xFFF;
const int highPart = spAdjust - lowPart;
assert(GetEmitter()->emitIns_valid_imm_for_add(highPart, EA_PTRSIZE));
GetEmitter()->emitIns_R_R_I(INS_add, EA_PTRSIZE, REG_SPBASE, REG_SPBASE, highPart);
compiler->unwindAllocStack(highPart);
spAdjust = lowPart;
}
assert(GetEmitter()->emitIns_valid_imm_for_add(spAdjust, EA_PTRSIZE));
GetEmitter()->emitIns_R_R_I(INS_add, EA_PTRSIZE, REG_SPBASE, REG_SPBASE, spAdjust);
compiler->unwindAllocStack(spAdjust);
}
}
//------------------------------------------------------------------------
// genInstrWithConstant: we will typically generate one instruction
//
// ins reg1, reg2, imm
//
// However the imm might not fit as a directly encodable immediate,
// when it doesn't fit we generate extra instruction(s) that sets up
// the 'regTmp' with the proper immediate value.
//
// mov regTmp, imm
// ins reg1, reg2, regTmp
//
// Arguments:
// ins - instruction
// attr - operation size and GC attribute
// reg1, reg2 - first and second register operands
// imm - immediate value (third operand when it fits)
// tmpReg - temp register to use when the 'imm' doesn't fit. Can be REG_NA
// if caller knows for certain the constant will fit.
// inUnwindRegion - true if we are in a prolog/epilog region with unwind codes.
// Default: false.
//
// Return Value:
// returns true if the immediate was small enough to be encoded inside instruction. If not,
// returns false meaning the immediate was too large and tmpReg was used and modified.
//
bool CodeGen::genInstrWithConstant(instruction ins,
emitAttr attr,
regNumber reg1,
regNumber reg2,
ssize_t imm,
regNumber tmpReg,
bool inUnwindRegion /* = false */)
{
bool immFitsInIns = false;
emitAttr size = EA_SIZE(attr);
// reg1 is usually a dest register
// reg2 is always source register
assert(tmpReg != reg2); // regTmp can not match any source register
switch (ins)
{
case INS_add:
case INS_sub:
if (imm < 0)
{
imm = -imm;
ins = (ins == INS_add) ? INS_sub : INS_add;
}
immFitsInIns = emitter::emitIns_valid_imm_for_add(imm, size);
break;
case INS_strb:
case INS_strh:
case INS_str:
// reg1 is a source register for store instructions
assert(tmpReg != reg1); // regTmp can not match any source register
immFitsInIns = emitter::emitIns_valid_imm_for_ldst_offset(imm, size);
break;
case INS_ldrsb:
case INS_ldrsh:
case INS_ldrsw:
case INS_ldrb:
case INS_ldrh:
case INS_ldr:
immFitsInIns = emitter::emitIns_valid_imm_for_ldst_offset(imm, size);
break;
default:
assert(!"Unexpected instruction in genInstrWithConstant");
break;
}
if (immFitsInIns)
{
// generate a single instruction that encodes the immediate directly
GetEmitter()->emitIns_R_R_I(ins, attr, reg1, reg2, imm);
}
else
{
// caller can specify REG_NA for tmpReg, when it "knows" that the immediate will always fit
assert(tmpReg != REG_NA);
// generate two or more instructions
// first we load the immediate into tmpReg
instGen_Set_Reg_To_Imm(EA_PTRSIZE, tmpReg, imm);
regSet.verifyRegUsed(tmpReg);
// when we are in an unwind code region
// we record the extra instructions using unwindPadding()
if (inUnwindRegion)
{
compiler->unwindPadding();
}
// generate the instruction using a three register encoding with the immediate in tmpReg
GetEmitter()->emitIns_R_R_R(ins, attr, reg1, reg2, tmpReg);
}
return immFitsInIns;
}
//------------------------------------------------------------------------
// genStackPointerAdjustment: add a specified constant value to the stack pointer in either the prolog
// or the epilog. The unwind codes for the generated instructions are produced. An available temporary
// register is required to be specified, in case the constant is too large to encode in an "add"
// instruction (or "sub" instruction if we choose to use one), such that we need to load the constant
// into a register first, before using it.
//
// Arguments:
// spDelta - the value to add to SP (can be negative)
// tmpReg - an available temporary register
// pTmpRegIsZero - If we use tmpReg, and pTmpRegIsZero is non-null, we set *pTmpRegIsZero to 'false'.
// Otherwise, we don't touch it.
// reportUnwindData - If true, report the change in unwind data. Otherwise, do not report it.
//
// Return Value:
// None.
void CodeGen::genStackPointerAdjustment(ssize_t spDelta, regNumber tmpReg, bool* pTmpRegIsZero, bool reportUnwindData)
{
// Even though INS_add is specified here, the encoder will choose either
// an INS_add or an INS_sub and encode the immediate as a positive value
//
bool wasTempRegisterUsedForImm =
!genInstrWithConstant(INS_add, EA_PTRSIZE, REG_SPBASE, REG_SPBASE, spDelta, tmpReg, true);
if (wasTempRegisterUsedForImm)
{
if (pTmpRegIsZero != nullptr)
{
*pTmpRegIsZero = false;
}
}
if (reportUnwindData)
{
// spDelta is negative in the prolog, positive in the epilog, but we always tell the unwind codes the positive
// value.
ssize_t spDeltaAbs = abs(spDelta);
unsigned unwindSpDelta = (unsigned)spDeltaAbs;
assert((ssize_t)unwindSpDelta == spDeltaAbs); // make sure that it fits in a unsigned
compiler->unwindAllocStack(unwindSpDelta);
}
}
//------------------------------------------------------------------------
// genPrologSaveRegPair: Save a pair of general-purpose or floating-point/SIMD registers in a function or funclet
// prolog. If possible, we use pre-indexed addressing to adjust SP and store the registers with a single instruction.
// The caller must ensure that we can use the STP instruction, and that spOffset will be in the legal range for that
// instruction.
//
// Arguments:
// reg1 - First register of pair to save.
// reg2 - Second register of pair to save.
// spOffset - The offset from SP to store reg1 (must be positive or zero).
// spDelta - If non-zero, the amount to add to SP before the register saves (must be negative or
// zero).
// useSaveNextPair - True if the last prolog instruction was to save the previous register pair. This
// allows us to emit the "save_next" unwind code.
// tmpReg - An available temporary register. Needed for the case of large frames.
// pTmpRegIsZero - If we use tmpReg, and pTmpRegIsZero is non-null, we set *pTmpRegIsZero to 'false'.
// Otherwise, we don't touch it.
//
// Return Value:
// None.
void CodeGen::genPrologSaveRegPair(regNumber reg1,
regNumber reg2,
int spOffset,
int spDelta,
bool useSaveNextPair,
regNumber tmpReg,
bool* pTmpRegIsZero)
{
assert(spOffset >= 0);
assert(spDelta <= 0);
assert((spDelta % 16) == 0); // SP changes must be 16-byte aligned
assert(genIsValidFloatReg(reg1) == genIsValidFloatReg(reg2)); // registers must be both general-purpose, or both
// FP/SIMD
bool needToSaveRegs = true;
if (spDelta != 0)
{
assert(!useSaveNextPair);
if ((spOffset == 0) && (spDelta >= -512))
{
// We can use pre-indexed addressing.
// stp REG, REG + 1, [SP, #spDelta]!
// 64-bit STP offset range: -512 to 504, multiple of 8.
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, reg1, reg2, REG_SPBASE, spDelta, INS_OPTS_PRE_INDEX);
compiler->unwindSaveRegPairPreindexed(reg1, reg2, spDelta);
needToSaveRegs = false;
}
else // (spOffset != 0) || (spDelta < -512)
{
// We need to do SP adjustment separately from the store; we can't fold in a pre-indexed addressing and the
// non-zero offset.
// generate sub SP,SP,imm
genStackPointerAdjustment(spDelta, tmpReg, pTmpRegIsZero, /* reportUnwindData */ true);
}
}
if (needToSaveRegs)
{
// stp REG, REG + 1, [SP, #offset]
// 64-bit STP offset range: -512 to 504, multiple of 8.
assert(spOffset <= 504);
assert((spOffset % 8) == 0);
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, reg1, reg2, REG_SPBASE, spOffset);
if (TargetOS::IsUnix && compiler->generateCFIUnwindCodes())
{
useSaveNextPair = false;
}
if (useSaveNextPair)
{
// This works as long as we've only been saving pairs, in order, and we've saved the previous one just
// before this one.
compiler->unwindSaveNext();
}
else
{
compiler->unwindSaveRegPair(reg1, reg2, spOffset);
}
}
}
//------------------------------------------------------------------------
// genPrologSaveReg: Like genPrologSaveRegPair, but for a single register. Save a single general-purpose or
// floating-point/SIMD register in a function or funclet prolog. Note that if we wish to change SP (i.e., spDelta != 0),
// then spOffset must be 8. This is because otherwise we would create an alignment hole above the saved register, not
// below it, which we currently don't support. This restriction could be loosened if the callers change to handle it
// (and this function changes to support using pre-indexed STR addressing). The caller must ensure that we can use the
// STR instruction, and that spOffset will be in the legal range for that instruction.
//
// Arguments:
// reg1 - Register to save.
// spOffset - The offset from SP to store reg1 (must be positive or zero).
// spDelta - If non-zero, the amount to add to SP before the register saves (must be negative or
// zero).
// tmpReg - An available temporary register. Needed for the case of large frames.
// pTmpRegIsZero - If we use tmpReg, and pTmpRegIsZero is non-null, we set *pTmpRegIsZero to 'false'.
// Otherwise, we don't touch it.
//
// Return Value:
// None.
void CodeGen::genPrologSaveReg(regNumber reg1, int spOffset, int spDelta, regNumber tmpReg, bool* pTmpRegIsZero)
{
assert(spOffset >= 0);
assert(spDelta <= 0);
assert((spDelta % 16) == 0); // SP changes must be 16-byte aligned
bool needToSaveRegs = true;
if (spDelta != 0)
{
if ((spOffset == 0) && (spDelta >= -256))
{
// We can use pre-index addressing.
// str REG, [SP, #spDelta]!
GetEmitter()->emitIns_R_R_I(INS_str, EA_PTRSIZE, reg1, REG_SPBASE, spDelta, INS_OPTS_PRE_INDEX);
compiler->unwindSaveRegPreindexed(reg1, spDelta);
needToSaveRegs = false;
}
else // (spOffset != 0) || (spDelta < -256)
{
// generate sub SP,SP,imm
genStackPointerAdjustment(spDelta, tmpReg, pTmpRegIsZero, /* reportUnwindData */ true);
}
}
if (needToSaveRegs)
{
// str REG, [SP, #offset]
// 64-bit STR offset range: 0 to 32760, multiple of 8.
GetEmitter()->emitIns_R_R_I(INS_str, EA_PTRSIZE, reg1, REG_SPBASE, spOffset);
compiler->unwindSaveReg(reg1, spOffset);
}
}
//------------------------------------------------------------------------
// genEpilogRestoreRegPair: This is the opposite of genPrologSaveRegPair(), run in the epilog instead of the prolog.
// The stack pointer adjustment, if requested, is done after the register restore, using post-index addressing.
// The caller must ensure that we can use the LDP instruction, and that spOffset will be in the legal range for that
// instruction.
//
// Arguments:
// reg1 - First register of pair to restore.
// reg2 - Second register of pair to restore.
// spOffset - The offset from SP to load reg1 (must be positive or zero).
// spDelta - If non-zero, the amount to add to SP after the register restores (must be positive or
// zero).
// useSaveNextPair - True if the last prolog instruction was to save the previous register pair. This
// allows us to emit the "save_next" unwind code.
// tmpReg - An available temporary register. Needed for the case of large frames.
// pTmpRegIsZero - If we use tmpReg, and pTmpRegIsZero is non-null, we set *pTmpRegIsZero to 'false'.
// Otherwise, we don't touch it.
//
// Return Value:
// None.
void CodeGen::genEpilogRestoreRegPair(regNumber reg1,
regNumber reg2,
int spOffset,
int spDelta,
bool useSaveNextPair,
regNumber tmpReg,
bool* pTmpRegIsZero)
{
assert(spOffset >= 0);
assert(spDelta >= 0);
assert((spDelta % 16) == 0); // SP changes must be 16-byte aligned
assert(genIsValidFloatReg(reg1) == genIsValidFloatReg(reg2)); // registers must be both general-purpose, or both
// FP/SIMD
if (spDelta != 0)
{
assert(!useSaveNextPair);
if ((spOffset == 0) && (spDelta <= 504))
{
// Fold the SP change into this instruction.
// ldp reg1, reg2, [SP], #spDelta
GetEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, reg1, reg2, REG_SPBASE, spDelta, INS_OPTS_POST_INDEX);
compiler->unwindSaveRegPairPreindexed(reg1, reg2, -spDelta);
}
else // (spOffset != 0) || (spDelta > 504)
{
// Can't fold in the SP change; need to use a separate ADD instruction.
// ldp reg1, reg2, [SP, #offset]
GetEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, reg1, reg2, REG_SPBASE, spOffset);
compiler->unwindSaveRegPair(reg1, reg2, spOffset);
// generate add SP,SP,imm
genStackPointerAdjustment(spDelta, tmpReg, pTmpRegIsZero, /* reportUnwindData */ true);
}
}
else
{
GetEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, reg1, reg2, REG_SPBASE, spOffset);
if (TargetOS::IsUnix && compiler->generateCFIUnwindCodes())
{
useSaveNextPair = false;
}
if (useSaveNextPair)
{
compiler->unwindSaveNext();
}
else
{
compiler->unwindSaveRegPair(reg1, reg2, spOffset);
}
}
}
//------------------------------------------------------------------------
// genEpilogRestoreReg: The opposite of genPrologSaveReg(), run in the epilog instead of the prolog.
//
// Arguments:
// reg1 - Register to restore.
// spOffset - The offset from SP to restore reg1 (must be positive or zero).
// spDelta - If non-zero, the amount to add to SP after the register restores (must be positive or
// zero).
// tmpReg - An available temporary register. Needed for the case of large frames.
// pTmpRegIsZero - If we use tmpReg, and pTmpRegIsZero is non-null, we set *pTmpRegIsZero to 'false'.
// Otherwise, we don't touch it.
//
// Return Value:
// None.
void CodeGen::genEpilogRestoreReg(regNumber reg1, int spOffset, int spDelta, regNumber tmpReg, bool* pTmpRegIsZero)
{
assert(spOffset >= 0);
assert(spDelta >= 0);
assert((spDelta % 16) == 0); // SP changes must be 16-byte aligned
if (spDelta != 0)
{
if ((spOffset == 0) && (spDelta <= 255))
{
// We can use post-index addressing.
// ldr REG, [SP], #spDelta
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, reg1, REG_SPBASE, spDelta, INS_OPTS_POST_INDEX);
compiler->unwindSaveRegPreindexed(reg1, -spDelta);
}
else // (spOffset != 0) || (spDelta > 255)
{
// ldr reg1, [SP, #offset]
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, reg1, REG_SPBASE, spOffset);
compiler->unwindSaveReg(reg1, spOffset);
// generate add SP,SP,imm
genStackPointerAdjustment(spDelta, tmpReg, pTmpRegIsZero, /* reportUnwindData */ true);
}
}
else
{
// ldr reg1, [SP, #offset]
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, reg1, REG_SPBASE, spOffset);
compiler->unwindSaveReg(reg1, spOffset);
}
}
//------------------------------------------------------------------------
// genBuildRegPairsStack: Build a stack of register pairs for prolog/epilog save/restore for the given mask.
// The first register pair will contain the lowest register. Register pairs will combine neighbor
// registers in pairs. If it can't be done (for example if we have a hole or this is the last reg in a mask with
// odd number of regs) then the second element of that RegPair will be REG_NA.
//
// Arguments:
// regsMask - a mask of registers for prolog/epilog generation;
// regStack - a regStack instance to build the stack in, used to save temp copyings.
//
// Return value:
// no return value; the regStack argument is modified.
//
// static
void CodeGen::genBuildRegPairsStack(regMaskTP regsMask, ArrayStack<RegPair>* regStack)
{
assert(regStack != nullptr);
assert(regStack->Height() == 0);
unsigned regsCount = genCountBits(regsMask);
while (regsMask != RBM_NONE)
{
regMaskTP reg1Mask = genFindLowestBit(regsMask);
regNumber reg1 = genRegNumFromMask(reg1Mask);
regsMask &= ~reg1Mask;
regsCount -= 1;
bool isPairSave = false;
if (regsCount > 0)
{
regMaskTP reg2Mask = genFindLowestBit(regsMask);
regNumber reg2 = genRegNumFromMask(reg2Mask);
if (reg2 == REG_NEXT(reg1))
{
// The JIT doesn't allow saving pair (R28,FP), even though the
// save_regp register pair unwind code specification allows it.
// The JIT always saves (FP,LR) as a pair, and uses the save_fplr
// unwind code. This only comes up in stress mode scenarios
// where callee-saved registers are not allocated completely
// from lowest-to-highest, without gaps.
if (reg1 != REG_R28)
{
// Both registers must have the same type to be saved as pair.
if (genIsValidFloatReg(reg1) == genIsValidFloatReg(reg2))
{
isPairSave = true;
regsMask &= ~reg2Mask;
regsCount -= 1;
regStack->Push(RegPair(reg1, reg2));
}
}
}
}
if (!isPairSave)
{
regStack->Push(RegPair(reg1));
}
}
assert(regsCount == 0 && regsMask == RBM_NONE);
genSetUseSaveNextPairs(regStack);
}
//------------------------------------------------------------------------
// genSetUseSaveNextPairs: Set useSaveNextPair for each RegPair on the stack which unwind info can be encoded as
// save_next code.
//
// Arguments:
// regStack - a regStack instance to set useSaveNextPair.
//
// Notes:
// We can use save_next for RegPair(N, N+1) only when we have sequence like (N-2, N-1), (N, N+1).
// In this case in the prolog save_next for (N, N+1) refers to save_pair(N-2, N-1);
// in the epilog the unwinder will search for the first save_pair (N-2, N-1)
// and then go back to the first save_next (N, N+1) to restore it first.
//
// static
void CodeGen::genSetUseSaveNextPairs(ArrayStack<RegPair>* regStack)
{
for (int i = 1; i < regStack->Height(); ++i)
{
RegPair& curr = regStack->BottomRef(i);
RegPair prev = regStack->Bottom(i - 1);
if (prev.reg2 == REG_NA || curr.reg2 == REG_NA)
{
continue;
}
if (REG_NEXT(prev.reg2) != curr.reg1)
{
continue;
}
if (genIsValidFloatReg(prev.reg2) != genIsValidFloatReg(curr.reg1))
{
// It is possible to support changing of the last int pair with the first float pair,
// but it is very rare case and it would require superfluous changes in the unwinder.
continue;
}
curr.useSaveNextPair = true;
}
}
//------------------------------------------------------------------------
// genGetSlotSizeForRegsInMask: Get the stack slot size appropriate for the register type from the mask.
//
// Arguments:
// regsMask - a mask of registers for prolog/epilog generation.
//
// Return value:
// stack slot size in bytes.
//
// Note: Because int and float register type sizes match we can call this function with a mask that includes both.
//
// static
int CodeGen::genGetSlotSizeForRegsInMask(regMaskTP regsMask)
{
assert((regsMask & (RBM_CALLEE_SAVED | RBM_FP | RBM_LR)) == regsMask); // Do not expect anything else.
static_assert_no_msg(REGSIZE_BYTES == FPSAVE_REGSIZE_BYTES);
return REGSIZE_BYTES;
}
//------------------------------------------------------------------------
// genSaveCalleeSavedRegisterGroup: Saves the group of registers described by the mask.
//
// Arguments:
// regsMask - a mask of registers for prolog generation;
// spDelta - if non-zero, the amount to add to SP before the first register save (or together with it);
// spOffset - the offset from SP that is the beginning of the callee-saved register area;
//
void CodeGen::genSaveCalleeSavedRegisterGroup(regMaskTP regsMask, int spDelta, int spOffset)
{
const int slotSize = genGetSlotSizeForRegsInMask(regsMask);
ArrayStack<RegPair> regStack(compiler->getAllocator(CMK_Codegen));
genBuildRegPairsStack(regsMask, ®Stack);
for (int i = 0; i < regStack.Height(); ++i)
{
RegPair regPair = regStack.Bottom(i);
if (regPair.reg2 != REG_NA)
{
// We can use a STP instruction.
genPrologSaveRegPair(regPair.reg1, regPair.reg2, spOffset, spDelta, regPair.useSaveNextPair, REG_IP0,
nullptr);
spOffset += 2 * slotSize;
}
else
{
// No register pair; we use a STR instruction.
genPrologSaveReg(regPair.reg1, spOffset, spDelta, REG_IP0, nullptr);
spOffset += slotSize;
}
spDelta = 0; // We've now changed SP already, if necessary; don't do it again.
}
}
//------------------------------------------------------------------------
// genSaveCalleeSavedRegistersHelp: Save the callee-saved registers in 'regsToSaveMask' to the stack frame
// in the function or funclet prolog. Registers are saved in register number order from low addresses
// to high addresses. This means that integer registers are saved at lower addresses than floatint-point/SIMD
// registers. However, when genSaveFpLrWithAllCalleeSavedRegisters is true, the integer registers are stored
// at higher addresses than floating-point/SIMD registers, that is, the relative order of these two classes
// is reveresed. This is done to put the saved frame pointer very high in the frame, for simplicity.
//
// TODO: We could always put integer registers at the higher addresses, if desired, to remove this special
// case. It would cause many asm diffs when first implemented.
//
// If establishing frame pointer chaining, it must be done after saving the callee-saved registers.
//
// We can only use the instructions that are allowed by the unwind codes. The caller ensures that
// there is enough space on the frame to store these registers, and that the store instructions
// we need to use (STR or STP) are encodable with the stack-pointer immediate offsets we need to use.
//
// The caller can tell us to fold in a stack pointer adjustment, which we will do with the first instruction.
// Note that the stack pointer adjustment must be by a multiple of 16 to preserve the invariant that the
// stack pointer is always 16 byte aligned. If we are saving an odd number of callee-saved
// registers, though, we will have an empty alignment slot somewhere. It turns out we will put
// it below (at a lower address) the callee-saved registers, as that is currently how we
// do frame layout. This means that the first stack offset will be 8 and the stack pointer
// adjustment must be done by a SUB, and not folded in to a pre-indexed store.
//
// Arguments:
// regsToSaveMask - The mask of callee-saved registers to save. If empty, this function does nothing.
// lowestCalleeSavedOffset - The offset from SP that is the beginning of the callee-saved register area. Note that
// if non-zero spDelta, then this is the offset of the first save *after* that
// SP adjustment.
// spDelta - If non-zero, the amount to add to SP before the register saves (must be negative or
// zero).
//
// Notes:
// The save set can contain LR in which case LR is saved along with the other callee-saved registers.
// But currently Jit doesn't use frames without frame pointer on arm64.
//
void CodeGen::genSaveCalleeSavedRegistersHelp(regMaskTP regsToSaveMask, int lowestCalleeSavedOffset, int spDelta)
{
assert(spDelta <= 0);
assert(-spDelta <= STACK_PROBE_BOUNDARY_THRESHOLD_BYTES);
unsigned regsToSaveCount = genCountBits(regsToSaveMask);
if (regsToSaveCount == 0)
{
if (spDelta != 0)
{
// Currently this is the case for varargs only
// whose size is MAX_REG_ARG * REGSIZE_BYTES = 64 bytes.
genStackPointerAdjustment(spDelta, REG_NA, nullptr, /* reportUnwindData */ true);
}
return;
}
assert((spDelta % 16) == 0);
// We also can save FP and LR, even though they are not in RBM_CALLEE_SAVED.
assert(regsToSaveCount <= genCountBits(RBM_CALLEE_SAVED | RBM_FP | RBM_LR));
// Save integer registers at higher addresses than floating-point registers.
regMaskTP maskSaveRegsFloat = regsToSaveMask & RBM_ALLFLOAT;
regMaskTP maskSaveRegsInt = regsToSaveMask & ~maskSaveRegsFloat;
if (maskSaveRegsFloat != RBM_NONE)
{
genSaveCalleeSavedRegisterGroup(maskSaveRegsFloat, spDelta, lowestCalleeSavedOffset);
spDelta = 0;
lowestCalleeSavedOffset += genCountBits(maskSaveRegsFloat) * FPSAVE_REGSIZE_BYTES;
}
if (maskSaveRegsInt != RBM_NONE)
{
genSaveCalleeSavedRegisterGroup(maskSaveRegsInt, spDelta, lowestCalleeSavedOffset);
// No need to update spDelta, lowestCalleeSavedOffset since they're not used after this.
}
}
//------------------------------------------------------------------------
// genRestoreCalleeSavedRegisterGroup: Restores the group of registers described by the mask.
//
// Arguments:
// regsMask - a mask of registers for epilog generation;
// spDelta - if non-zero, the amount to add to SP after the last register restore (or together with it);
// spOffset - the offset from SP that is the beginning of the callee-saved register area;
//
void CodeGen::genRestoreCalleeSavedRegisterGroup(regMaskTP regsMask, int spDelta, int spOffset)
{
const int slotSize = genGetSlotSizeForRegsInMask(regsMask);
ArrayStack<RegPair> regStack(compiler->getAllocator(CMK_Codegen));
genBuildRegPairsStack(regsMask, ®Stack);
int stackDelta = 0;
for (int i = 0; i < regStack.Height(); ++i)
{
bool lastRestoreInTheGroup = (i == regStack.Height() - 1);
bool updateStackDelta = lastRestoreInTheGroup && (spDelta != 0);
if (updateStackDelta)
{
// Update stack delta only if it is the last restore (the first save).
assert(stackDelta == 0);
stackDelta = spDelta;
}
RegPair regPair = regStack.Top(i);
if (regPair.reg2 != REG_NA)
{
spOffset -= 2 * slotSize;
genEpilogRestoreRegPair(regPair.reg1, regPair.reg2, spOffset, stackDelta, regPair.useSaveNextPair, REG_IP1,
nullptr);
}
else
{
spOffset -= slotSize;
genEpilogRestoreReg(regPair.reg1, spOffset, stackDelta, REG_IP1, nullptr);
}
}
}
//------------------------------------------------------------------------
// genRestoreCalleeSavedRegistersHelp: Restore the callee-saved registers in 'regsToRestoreMask' from the stack frame
// in the function or funclet epilog. This exactly reverses the actions of genSaveCalleeSavedRegistersHelp().
//
// Arguments:
// regsToRestoreMask - The mask of callee-saved registers to restore. If empty, this function does nothing.
// lowestCalleeSavedOffset - The offset from SP that is the beginning of the callee-saved register area.
// spDelta - If non-zero, the amount to add to SP after the register restores (must be positive or
// zero).
//
// Here's an example restore sequence:
// ldp x27, x28, [sp,#96]
// ldp x25, x26, [sp,#80]
// ldp x23, x24, [sp,#64]
// ldp x21, x22, [sp,#48]
// ldp x19, x20, [sp,#32]
//
// For the case of non-zero spDelta, we assume the base of the callee-save registers to restore is at SP, and
// the last restore adjusts SP by the specified amount. For example:
// ldp x27, x28, [sp,#64]
// ldp x25, x26, [sp,#48]
// ldp x23, x24, [sp,#32]
// ldp x21, x22, [sp,#16]
// ldp x19, x20, [sp], #80
//
// Note you call the unwind functions specifying the prolog operation that is being un-done. So, for example, when
// generating a post-indexed load, you call the unwind function for specifying the corresponding preindexed store.
//
// Return Value:
// None.
void CodeGen::genRestoreCalleeSavedRegistersHelp(regMaskTP regsToRestoreMask, int lowestCalleeSavedOffset, int spDelta)
{
assert(spDelta >= 0);
unsigned regsToRestoreCount = genCountBits(regsToRestoreMask);
if (regsToRestoreCount == 0)
{
if (spDelta != 0)
{
// Currently this is the case for varargs only
// whose size is MAX_REG_ARG * REGSIZE_BYTES = 64 bytes.
genStackPointerAdjustment(spDelta, REG_NA, nullptr, /* reportUnwindData */ true);
}
return;
}
assert((spDelta % 16) == 0);
// We also can restore FP and LR, even though they are not in RBM_CALLEE_SAVED.
assert(regsToRestoreCount <= genCountBits(RBM_CALLEE_SAVED | RBM_FP | RBM_LR));
// Point past the end, to start. We predecrement to find the offset to load from.
static_assert_no_msg(REGSIZE_BYTES == FPSAVE_REGSIZE_BYTES);
int spOffset = lowestCalleeSavedOffset + regsToRestoreCount * REGSIZE_BYTES;
// Save integer registers at higher addresses than floating-point registers.
regMaskTP maskRestoreRegsFloat = regsToRestoreMask & RBM_ALLFLOAT;
regMaskTP maskRestoreRegsInt = regsToRestoreMask & ~maskRestoreRegsFloat;
// Restore in the opposite order of saving.
if (maskRestoreRegsInt != RBM_NONE)
{
int spIntDelta = (maskRestoreRegsFloat != RBM_NONE) ? 0 : spDelta; // should we delay the SP adjustment?
genRestoreCalleeSavedRegisterGroup(maskRestoreRegsInt, spIntDelta, spOffset);
spOffset -= genCountBits(maskRestoreRegsInt) * REGSIZE_BYTES;
}
if (maskRestoreRegsFloat != RBM_NONE)
{
// If there is any spDelta, it must be used here.
genRestoreCalleeSavedRegisterGroup(maskRestoreRegsFloat, spDelta, spOffset);
// No need to update spOffset since it's not used after this.
}
}
// clang-format off
/*****************************************************************************
*
* Generates code for an EH funclet prolog.
*
* Funclets have the following incoming arguments:
*
* catch: x0 = the exception object that was caught (see GT_CATCH_ARG)
* filter: x0 = the exception object to filter (see GT_CATCH_ARG), x1 = CallerSP of the containing function
* finally/fault: none
*
* Funclets set the following registers on exit:
*
* catch: x0 = the address at which execution should resume (see BBJ_EHCATCHRET)
* filter: x0 = non-zero if the handler should handle the exception, zero otherwise (see GT_RETFILT)
* finally/fault: none
*
* The ARM64 funclet prolog sequence is one of the following (Note: #framesz is total funclet frame size,
* including everything; #outsz is outgoing argument space. #framesz must be a multiple of 16):
*
* Frame type 1:
* For #outsz == 0 and #framesz <= 512:
* stp fp,lr,[sp,-#framesz]! ; establish the frame (predecrement by #framesz), save FP/LR
* stp x19,x20,[sp,#xxx] ; save callee-saved registers, as necessary
*
* The funclet frame is thus:
*
* | |
* |-----------------------|
* | incoming arguments |
* +=======================+ <---- Caller's SP
* | Varargs regs space | // Only for varargs main functions; 64 bytes
* |-----------------------|
* |Callee saved registers | // multiple of 8 bytes
* |-----------------------|
* | PSP slot | // 8 bytes (omitted in CoreRT ABI)
* |-----------------------|
* ~ alignment padding ~ // To make the whole frame 16 byte aligned.
* |-----------------------|
* | Saved FP, LR | // 16 bytes
* |-----------------------| <---- Ambient SP
* | | |
* ~ | Stack grows ~
* | | downward |
* V
*
* Frame type 2:
* For #outsz != 0 and #framesz <= 512:
* sub sp,sp,#framesz ; establish the frame
* stp fp,lr,[sp,#outsz] ; save FP/LR.
* stp x19,x20,[sp,#xxx] ; save callee-saved registers, as necessary
*
* The funclet frame is thus:
*
* | |
* |-----------------------|
* | incoming arguments |
* +=======================+ <---- Caller's SP
* | Varargs regs space | // Only for varargs main functions; 64 bytes
* |-----------------------|
* |Callee saved registers | // multiple of 8 bytes
* |-----------------------|
* | PSP slot | // 8 bytes (omitted in CoreRT ABI)
* |-----------------------|
* ~ alignment padding ~ // To make the whole frame 16 byte aligned.
* |-----------------------|
* | Saved FP, LR | // 16 bytes
* |-----------------------|
* | Outgoing arg space | // multiple of 8 bytes
* |-----------------------| <---- Ambient SP
* | | |
* ~ | Stack grows ~
* | | downward |
* V
*
* Frame type 3:
* For #framesz > 512:
* stp fp,lr,[sp,- (#framesz - #outsz)]! ; establish the frame, save FP/LR
* ; note that it is guaranteed here that (#framesz - #outsz) <= 240
* stp x19,x20,[sp,#xxx] ; save callee-saved registers, as necessary
* sub sp,sp,#outsz ; create space for outgoing argument space
*
* The funclet frame is thus:
*
* | |
* |-----------------------|
* | incoming arguments |
* +=======================+ <---- Caller's SP
* | Varargs regs space | // Only for varargs main functions; 64 bytes
* |-----------------------|
* |Callee saved registers | // multiple of 8 bytes
* |-----------------------|
* | PSP slot | // 8 bytes (omitted in CoreRT ABI)
* |-----------------------|
* ~ alignment padding ~ // To make the first SP subtraction 16 byte aligned
* |-----------------------|
* | Saved FP, LR | // 16 bytes
* |-----------------------|
* ~ alignment padding ~ // To make the whole frame 16 byte aligned (specifically, to 16-byte align the outgoing argument space).
* |-----------------------|
* | Outgoing arg space | // multiple of 8 bytes
* |-----------------------| <---- Ambient SP
* | | |
* ~ | Stack grows ~
* | | downward |
* V
*
* Both #1 and #2 only change SP once. That means that there will be a maximum of one alignment slot needed. For the general case, #3,
* it is possible that we will need to add alignment to both changes to SP, leading to 16 bytes of alignment. Remember that the stack
* pointer needs to be 16 byte aligned at all times. The size of the PSP slot plus callee-saved registers space is a maximum of 240 bytes:
*
* FP,LR registers
* 10 int callee-saved register x19-x28
* 8 float callee-saved registers v8-v15
* 8 saved integer argument registers x0-x7, if varargs function
* 1 PSP slot
* 1 alignment slot
* == 30 slots * 8 bytes = 240 bytes.
*
* The outgoing argument size, however, can be very large, if we call a function that takes a large number of
* arguments (note that we currently use the same outgoing argument space size in the funclet as for the main
* function, even if the funclet doesn't have any calls, or has a much smaller, or larger, maximum number of
* outgoing arguments for any call). In that case, we need to 16-byte align the initial change to SP, before
* saving off the callee-saved registers and establishing the PSPsym, so we can use the limited immediate offset
* encodings we have available, before doing another 16-byte aligned SP adjustment to create the outgoing argument
* space. Both changes to SP might need to add alignment padding.
*
* In addition to the above "standard" frames, we also need to support a frame where the saved FP/LR are at the
* highest addresses. This is to match the frame layout (specifically, callee-saved registers including FP/LR
* and the PSPSym) that is used in the main function when a GS cookie is required due to the use of localloc.
* (Note that localloc cannot be used in a funclet.) In these variants, not only has the position of FP/LR
* changed, but where the alignment padding is placed has also changed.
*
* Frame type 4 (variant of frame types 1 and 2):
* For #framesz <= 512:
* sub sp,sp,#framesz ; establish the frame
* stp x19,x20,[sp,#xxx] ; save callee-saved registers, as necessary
* stp fp,lr,[sp,#yyy] ; save FP/LR.
* ; write PSPSym
*
* The "#framesz <= 512" condition ensures that after we've established the frame, we can use "stp" with its
* maximum allowed offset (504) to save the callee-saved register at the highest address.
*
* We use "sub" instead of folding it into the next instruction as a predecrement, as we need to write PSPSym
* at the bottom of the stack, and there might also be an alignment padding slot.
*
* The funclet frame is thus:
*
* | |
* |-----------------------|
* | incoming arguments |
* +=======================+ <---- Caller's SP
* | Varargs regs space | // Only for varargs main functions; 64 bytes
* |-----------------------|
* | Saved LR | // 8 bytes
* |-----------------------|
* | Saved FP | // 8 bytes
* |-----------------------|
* |Callee saved registers | // multiple of 8 bytes
* |-----------------------|
* | PSP slot | // 8 bytes (omitted in CoreRT ABI)
* |-----------------------|
* ~ alignment padding ~ // To make the whole frame 16 byte aligned.
* |-----------------------|
* | Outgoing arg space | // multiple of 8 bytes (optional; if #outsz > 0)
* |-----------------------| <---- Ambient SP
* | | |
* ~ | Stack grows ~
* | | downward |
* V
*
* Frame type 5 (variant of frame type 3):
* For #framesz > 512:
* sub sp,sp,(#framesz - #outsz) ; establish part of the frame. Note that it is guaranteed here that (#framesz - #outsz) <= 240
* stp x19,x20,[sp,#xxx] ; save callee-saved registers, as necessary
* stp fp,lr,[sp,#yyy] ; save FP/LR.
* sub sp,sp,#outsz ; create space for outgoing argument space
* ; write PSPSym
*
* For large frames with "#framesz > 512", we must do one SP adjustment first, after which we can save callee-saved
* registers with up to the maximum "stp" offset of 504. Then, we can establish the rest of the frame (namely, the
* space for the outgoing argument space).
*
* The funclet frame is thus:
*
* | |
* |-----------------------|
* | incoming arguments |
* +=======================+ <---- Caller's SP
* | Varargs regs space | // Only for varargs main functions; 64 bytes
* |-----------------------|
* | Saved LR | // 8 bytes
* |-----------------------|
* | Saved FP | // 8 bytes
* |-----------------------|
* |Callee saved registers | // multiple of 8 bytes
* |-----------------------|
* | PSP slot | // 8 bytes (omitted in CoreRT ABI)
* |-----------------------|
* ~ alignment padding ~ // To make the first SP subtraction 16 byte aligned
* |-----------------------|
* ~ alignment padding ~ // To make the whole frame 16 byte aligned (specifically, to 16-byte align the outgoing argument space).
* |-----------------------|
* | Outgoing arg space | // multiple of 8 bytes
* |-----------------------| <---- Ambient SP
* | | |
* ~ | Stack grows ~
* | | downward |
* V
*
* Note that in this case we might have 16 bytes of alignment that is adjacent. This is because we are doing 2 SP
* subtractions, and each one must be aligned up to 16 bytes.
*
* Note that in all cases, the PSPSym is in exactly the same position with respect to Caller-SP, and that location is the same relative to Caller-SP
* as in the main function.
*
* Funclets do not have varargs arguments. However, because the PSPSym must exist at the same offset from Caller-SP as in the main function, we
* must add buffer space for the saved varargs argument registers here, if the main function did the same.
*
* ; After this header, fill the PSP slot, for use by the VM (it gets reported with the GC info), or by code generation of nested filters.
* ; This is not part of the "OS prolog"; it has no associated unwind data, and is not reversed in the funclet epilog.
*
* if (this is a filter funclet)
* {
* // x1 on entry to a filter funclet is CallerSP of the containing function:
* // either the main function, or the funclet for a handler that this filter is dynamically nested within.
* // Note that a filter can be dynamically nested within a funclet even if it is not statically within
* // a funclet. Consider:
* //
* // try {
* // try {
* // throw new Exception();
* // } catch(Exception) {
* // throw new Exception(); // The exception thrown here ...
* // }
* // } filter { // ... will be processed here, while the "catch" funclet frame is still on the stack
* // } filter-handler {
* // }
* //
* // Because of this, we need a PSP in the main function anytime a filter funclet doesn't know whether the enclosing frame will
* // be a funclet or main function. We won't know any time there is a filter protecting nested EH. To simplify, we just always
* // create a main function PSP for any function with a filter.
*
* ldr x1, [x1, #CallerSP_to_PSP_slot_delta] ; Load the CallerSP of the main function (stored in the PSP of the dynamically containing funclet or function)
* str x1, [sp, #SP_to_PSP_slot_delta] ; store the PSP
* add fp, x1, #Function_CallerSP_to_FP_delta ; re-establish the frame pointer
* }
* else
* {
* // This is NOT a filter funclet. The VM re-establishes the frame pointer on entry.
* // TODO-ARM64-CQ: if VM set x1 to CallerSP on entry, like for filters, we could save an instruction.
*
* add x3, fp, #Function_FP_to_CallerSP_delta ; compute the CallerSP, given the frame pointer. x3 is scratch.
* str x3, [sp, #SP_to_PSP_slot_delta] ; store the PSP
* }
*
* An example epilog sequence is then:
*
* add sp,sp,#outsz ; if any outgoing argument space
* ... ; restore callee-saved registers
* ldp x19,x20,[sp,#xxx]
* ldp fp,lr,[sp],#framesz
* ret lr
*
*/
// clang-format on
void CodeGen::genFuncletProlog(BasicBlock* block)
{
#ifdef DEBUG
if (verbose)
printf("*************** In genFuncletProlog()\n");
#endif
assert(block != NULL);
assert(block->bbFlags & BBF_FUNCLET_BEG);
ScopedSetVariable<bool> _setGeneratingProlog(&compiler->compGeneratingProlog, true);
gcInfo.gcResetForBB();
compiler->unwindBegProlog();
regMaskTP maskSaveRegsFloat = genFuncletInfo.fiSaveRegs & RBM_ALLFLOAT;
regMaskTP maskSaveRegsInt = genFuncletInfo.fiSaveRegs & ~maskSaveRegsFloat;
// Funclets must always save LR and FP, since when we have funclets we must have an FP frame.
assert((maskSaveRegsInt & RBM_LR) != 0);
assert((maskSaveRegsInt & RBM_FP) != 0);
bool isFilter = (block->bbCatchTyp == BBCT_FILTER);
regMaskTP maskArgRegsLiveIn;
if (isFilter)
{
maskArgRegsLiveIn = RBM_R0 | RBM_R1;
}
else if ((block->bbCatchTyp == BBCT_FINALLY) || (block->bbCatchTyp == BBCT_FAULT))
{
maskArgRegsLiveIn = RBM_NONE;
}
else
{
maskArgRegsLiveIn = RBM_R0;
}
if (genFuncletInfo.fiFrameType == 1)
{
// With OSR we may see large values for fiSpDelta1
// (we really need to probe the frame, sigh)
if (compiler->opts.IsOSR())
{
genStackPointerAdjustment(genFuncletInfo.fiSpDelta1, REG_SCRATCH, nullptr, /* reportUnwindData */ true);
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, 0);
compiler->unwindSaveRegPair(REG_FP, REG_LR, 0);
}
else
{
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, genFuncletInfo.fiSpDelta1,
INS_OPTS_PRE_INDEX);
compiler->unwindSaveRegPairPreindexed(REG_FP, REG_LR, genFuncletInfo.fiSpDelta1);
}
maskSaveRegsInt &= ~(RBM_LR | RBM_FP); // We've saved these now
assert(genFuncletInfo.fiSpDelta2 == 0);
assert(genFuncletInfo.fiSP_to_FPLR_save_delta == 0);
}
else if (genFuncletInfo.fiFrameType == 2)
{
// fiFrameType==2 constraints:
assert(genFuncletInfo.fiSpDelta1 < 0);
assert(genFuncletInfo.fiSpDelta1 >= -512);
// generate sub SP,SP,imm
genStackPointerAdjustment(genFuncletInfo.fiSpDelta1, REG_NA, nullptr, /* reportUnwindData */ true);
assert(genFuncletInfo.fiSpDelta2 == 0);
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE,
genFuncletInfo.fiSP_to_FPLR_save_delta);
compiler->unwindSaveRegPair(REG_FP, REG_LR, genFuncletInfo.fiSP_to_FPLR_save_delta);
maskSaveRegsInt &= ~(RBM_LR | RBM_FP); // We've saved these now
}
else if (genFuncletInfo.fiFrameType == 3)
{
// With OSR we may see large values for fiSpDelta1
// (we really need to probe the frame, sigh)
if (compiler->opts.IsOSR())
{
genStackPointerAdjustment(genFuncletInfo.fiSpDelta1, REG_SCRATCH, nullptr, /* reportUnwindData */ true);
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, 0);
compiler->unwindSaveRegPair(REG_FP, REG_LR, 0);
}
else
{
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, genFuncletInfo.fiSpDelta1,
INS_OPTS_PRE_INDEX);
compiler->unwindSaveRegPairPreindexed(REG_FP, REG_LR, genFuncletInfo.fiSpDelta1);
}
maskSaveRegsInt &= ~(RBM_LR | RBM_FP); // We've saved these now
}
else if (genFuncletInfo.fiFrameType == 4)
{
// fiFrameType==4 constraints:
assert(genFuncletInfo.fiSpDelta1 < 0);
assert(genFuncletInfo.fiSpDelta1 >= -512);
// generate sub SP,SP,imm
genStackPointerAdjustment(genFuncletInfo.fiSpDelta1, REG_NA, nullptr, /* reportUnwindData */ true);
assert(genFuncletInfo.fiSpDelta2 == 0);
}
else
{
assert(genFuncletInfo.fiFrameType == 5);
if (compiler->opts.IsOSR())
{
genStackPointerAdjustment(genFuncletInfo.fiSpDelta1, REG_SCRATCH, nullptr, /* reportUnwindData */ true);
}
else
{
// Nothing to do here; the first SP adjustment will be done by saving the callee-saved registers.
}
}
int lowestCalleeSavedOffset = genFuncletInfo.fiSP_to_CalleeSave_delta +
genFuncletInfo.fiSpDelta2; // We haven't done the second adjustment of SP yet (if any)
genSaveCalleeSavedRegistersHelp(maskSaveRegsInt | maskSaveRegsFloat, lowestCalleeSavedOffset, 0);
if ((genFuncletInfo.fiFrameType == 3) || (genFuncletInfo.fiFrameType == 5))
{
// Note that genFuncletInfo.fiSpDelta2 is always a non-positive value
assert(genFuncletInfo.fiSpDelta2 <= 0);
// generate sub SP,SP,imm
if (genFuncletInfo.fiSpDelta2 < 0)
{
genStackPointerAdjustment(genFuncletInfo.fiSpDelta2, REG_R2, nullptr, /* reportUnwindData */ true);
}
else
{
// we will only see fiSpDelta2 == 0 for osr funclets
assert(compiler->opts.IsOSR());
}
}
// This is the end of the OS-reported prolog for purposes of unwinding
compiler->unwindEndProlog();
// If there is no PSPSym (CoreRT ABI), we are done. Otherwise, we need to set up the PSPSym in the funclet frame.
if (compiler->lvaPSPSym != BAD_VAR_NUM)
{
if (isFilter)
{
// This is the first block of a filter
// Note that register x1 = CallerSP of the containing function
// X1 is overwritten by the first Load (new callerSP)
// X2 is scratch when we have a large constant offset
// Load the CallerSP of the main function (stored in the PSP of the dynamically containing funclet or
// function)
genInstrWithConstant(INS_ldr, EA_PTRSIZE, REG_R1, REG_R1, genFuncletInfo.fiCallerSP_to_PSP_slot_delta,
REG_R2, false);
regSet.verifyRegUsed(REG_R1);
// Store the PSP value (aka CallerSP)
genInstrWithConstant(INS_str, EA_PTRSIZE, REG_R1, REG_SPBASE, genFuncletInfo.fiSP_to_PSP_slot_delta, REG_R2,
false);
// re-establish the frame pointer
genInstrWithConstant(INS_add, EA_PTRSIZE, REG_FPBASE, REG_R1,
genFuncletInfo.fiFunction_CallerSP_to_FP_delta, REG_R2, false);
}
else // This is a non-filter funclet
{
// X3 is scratch, X2 can also become scratch
// compute the CallerSP, given the frame pointer. x3 is scratch.
genInstrWithConstant(INS_add, EA_PTRSIZE, REG_R3, REG_FPBASE,
-genFuncletInfo.fiFunction_CallerSP_to_FP_delta, REG_R2, false);
regSet.verifyRegUsed(REG_R3);
genInstrWithConstant(INS_str, EA_PTRSIZE, REG_R3, REG_SPBASE, genFuncletInfo.fiSP_to_PSP_slot_delta, REG_R2,
false);
}
}
}
/*****************************************************************************
*
* Generates code for an EH funclet epilog.
*/
void CodeGen::genFuncletEpilog()
{
#ifdef DEBUG
if (verbose)
printf("*************** In genFuncletEpilog()\n");
#endif
ScopedSetVariable<bool> _setGeneratingEpilog(&compiler->compGeneratingEpilog, true);
bool unwindStarted = false;
if (!unwindStarted)
{
// We can delay this until we know we'll generate an unwindable instruction, if necessary.
compiler->unwindBegEpilog();
unwindStarted = true;
}
regMaskTP maskRestoreRegsFloat = genFuncletInfo.fiSaveRegs & RBM_ALLFLOAT;
regMaskTP maskRestoreRegsInt = genFuncletInfo.fiSaveRegs & ~maskRestoreRegsFloat;
// Funclets must always save LR and FP, since when we have funclets we must have an FP frame.
assert((maskRestoreRegsInt & RBM_LR) != 0);
assert((maskRestoreRegsInt & RBM_FP) != 0);
if ((genFuncletInfo.fiFrameType == 3) || (genFuncletInfo.fiFrameType == 5))
{
// Note that genFuncletInfo.fiSpDelta2 is always a non-positive value
assert(genFuncletInfo.fiSpDelta2 <= 0);
// generate add SP,SP,imm
if (genFuncletInfo.fiSpDelta2 < 0)
{
genStackPointerAdjustment(-genFuncletInfo.fiSpDelta2, REG_R2, nullptr, /* reportUnwindData */ true);
}
else
{
// we should only zee zero SpDelta2 with osr.
assert(compiler->opts.IsOSR());
}
}
regMaskTP regsToRestoreMask = maskRestoreRegsInt | maskRestoreRegsFloat;
if ((genFuncletInfo.fiFrameType == 1) || (genFuncletInfo.fiFrameType == 2) || (genFuncletInfo.fiFrameType == 3))
{
regsToRestoreMask &= ~(RBM_LR | RBM_FP); // We restore FP/LR at the end
}
int lowestCalleeSavedOffset = genFuncletInfo.fiSP_to_CalleeSave_delta + genFuncletInfo.fiSpDelta2;
genRestoreCalleeSavedRegistersHelp(regsToRestoreMask, lowestCalleeSavedOffset, 0);
if (genFuncletInfo.fiFrameType == 1)
{
// With OSR we may see large values for fiSpDelta1
//
if (compiler->opts.IsOSR())
{
GetEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, 0);
compiler->unwindSaveRegPair(REG_FP, REG_LR, 0);
genStackPointerAdjustment(-genFuncletInfo.fiSpDelta1, REG_SCRATCH, nullptr, /* reportUnwindData */ true);
}
else
{
GetEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, -genFuncletInfo.fiSpDelta1,
INS_OPTS_POST_INDEX);
compiler->unwindSaveRegPairPreindexed(REG_FP, REG_LR, genFuncletInfo.fiSpDelta1);
}
assert(genFuncletInfo.fiSpDelta2 == 0);
assert(genFuncletInfo.fiSP_to_FPLR_save_delta == 0);
}
else if (genFuncletInfo.fiFrameType == 2)
{
GetEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE,
genFuncletInfo.fiSP_to_FPLR_save_delta);
compiler->unwindSaveRegPair(REG_FP, REG_LR, genFuncletInfo.fiSP_to_FPLR_save_delta);
// fiFrameType==2 constraints:
assert(genFuncletInfo.fiSpDelta1 < 0);
assert(genFuncletInfo.fiSpDelta1 >= -512);
// generate add SP,SP,imm
genStackPointerAdjustment(-genFuncletInfo.fiSpDelta1, REG_NA, nullptr, /* reportUnwindData */ true);
assert(genFuncletInfo.fiSpDelta2 == 0);
}
else if (genFuncletInfo.fiFrameType == 3)
{
// With OSR we may see large values for fiSpDelta1
//
if (compiler->opts.IsOSR())
{
GetEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, 0);
compiler->unwindSaveRegPair(REG_FP, REG_LR, 0);
genStackPointerAdjustment(-genFuncletInfo.fiSpDelta1, REG_SCRATCH, nullptr, /* reportUnwindData */ true);
}
else
{
GetEmitter()->emitIns_R_R_R_I(INS_ldp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, -genFuncletInfo.fiSpDelta1,
INS_OPTS_POST_INDEX);
compiler->unwindSaveRegPairPreindexed(REG_FP, REG_LR, genFuncletInfo.fiSpDelta1);
}
}
else if (genFuncletInfo.fiFrameType == 4)
{
// fiFrameType==4 constraints:
assert(genFuncletInfo.fiSpDelta1 < 0);
assert(genFuncletInfo.fiSpDelta1 >= -512);
// generate add SP,SP,imm
genStackPointerAdjustment(-genFuncletInfo.fiSpDelta1, REG_NA, nullptr, /* reportUnwindData */ true);
assert(genFuncletInfo.fiSpDelta2 == 0);
}
else
{
assert(genFuncletInfo.fiFrameType == 5);
// Same work as fiFrameType==4, but different asserts.
assert(genFuncletInfo.fiSpDelta1 < 0);
// With OSR we may see large values for fiSpDelta1 as the funclet
// frame currently must pad with the Tier0 frame size.
//
if (compiler->opts.IsOSR())
{
genStackPointerAdjustment(-genFuncletInfo.fiSpDelta1, REG_SCRATCH, nullptr, /* reportUnwindData */ true);
}
else
{
// generate add SP,SP,imm
assert(genFuncletInfo.fiSpDelta1 >= -240);
genStackPointerAdjustment(-genFuncletInfo.fiSpDelta1, REG_NA, nullptr, /* reportUnwindData */ true);
}
}
inst_RV(INS_ret, REG_LR, TYP_I_IMPL);
compiler->unwindReturn(REG_LR);
compiler->unwindEndEpilog();
}
/*****************************************************************************
*
* Capture the information used to generate the funclet prologs and epilogs.
* Note that all funclet prologs are identical, and all funclet epilogs are
* identical (per type: filters are identical, and non-filters are identical).
* Thus, we compute the data used for these just once.
*
* See genFuncletProlog() for more information about the prolog/epilog sequences.
*/
void CodeGen::genCaptureFuncletPrologEpilogInfo()
{
if (!compiler->ehAnyFunclets())
return;
assert(isFramePointerUsed());
// The frame size and offsets must be finalized
assert(compiler->lvaDoneFrameLayout == Compiler::FINAL_FRAME_LAYOUT);
unsigned const PSPSize = (compiler->lvaPSPSym != BAD_VAR_NUM) ? REGSIZE_BYTES : 0;
// Because a method and funclets must have the same caller-relative PSPSym offset,
// if there is a PSPSym, we have to pad the funclet frame size for OSR.
//
unsigned osrPad = 0;
if (compiler->opts.IsOSR() && (PSPSize > 0))
{
osrPad = compiler->info.compPatchpointInfo->TotalFrameSize();
}
genFuncletInfo.fiFunction_CallerSP_to_FP_delta = genCallerSPtoFPdelta() - osrPad;
regMaskTP rsMaskSaveRegs = regSet.rsMaskCalleeSaved;
assert((rsMaskSaveRegs & RBM_LR) != 0);
assert((rsMaskSaveRegs & RBM_FP) != 0);
unsigned saveRegsCount = genCountBits(rsMaskSaveRegs);
unsigned saveRegsPlusPSPSize = saveRegsCount * REGSIZE_BYTES + PSPSize;
if (compiler->info.compIsVarArgs)
{
// For varargs we always save all of the integer register arguments
// so that they are contiguous with the incoming stack arguments.
saveRegsPlusPSPSize += MAX_REG_ARG * REGSIZE_BYTES;
}
unsigned const saveRegsPlusPSPSizeAligned = roundUp(saveRegsPlusPSPSize, STACK_ALIGN);
assert(compiler->lvaOutgoingArgSpaceSize % REGSIZE_BYTES == 0);
unsigned const outgoingArgSpaceAligned = roundUp(compiler->lvaOutgoingArgSpaceSize, STACK_ALIGN);
unsigned const maxFuncletFrameSizeAligned = saveRegsPlusPSPSizeAligned + osrPad + outgoingArgSpaceAligned;
assert((maxFuncletFrameSizeAligned % STACK_ALIGN) == 0);
int SP_to_FPLR_save_delta;
int SP_to_PSP_slot_delta;
int CallerSP_to_PSP_slot_delta;
unsigned const funcletFrameSize = saveRegsPlusPSPSize + osrPad + compiler->lvaOutgoingArgSpaceSize;
unsigned const funcletFrameSizeAligned = roundUp(funcletFrameSize, STACK_ALIGN);
assert(funcletFrameSizeAligned <= maxFuncletFrameSizeAligned);
unsigned const funcletFrameAlignmentPad = funcletFrameSizeAligned - funcletFrameSize;
assert((funcletFrameAlignmentPad == 0) || (funcletFrameAlignmentPad == REGSIZE_BYTES));
if (maxFuncletFrameSizeAligned <= 512)
{
if (genSaveFpLrWithAllCalleeSavedRegisters)
{
SP_to_FPLR_save_delta = funcletFrameSizeAligned - (2 /* FP, LR */ * REGSIZE_BYTES);
if (compiler->info.compIsVarArgs)
{
SP_to_FPLR_save_delta -= MAX_REG_ARG * REGSIZE_BYTES;
}
SP_to_PSP_slot_delta = compiler->lvaOutgoingArgSpaceSize + funcletFrameAlignmentPad + osrPad;
CallerSP_to_PSP_slot_delta = -(int)(osrPad + saveRegsPlusPSPSize);
genFuncletInfo.fiFrameType = 4;
}
else
{
SP_to_FPLR_save_delta = compiler->lvaOutgoingArgSpaceSize;
SP_to_PSP_slot_delta = SP_to_FPLR_save_delta + 2 /* FP, LR */ * REGSIZE_BYTES + funcletFrameAlignmentPad;
CallerSP_to_PSP_slot_delta = -(int)(osrPad + saveRegsPlusPSPSize - 2 /* FP, LR */ * REGSIZE_BYTES);
if (compiler->lvaOutgoingArgSpaceSize == 0)
{
genFuncletInfo.fiFrameType = 1;
}
else
{
genFuncletInfo.fiFrameType = 2;
}
}
genFuncletInfo.fiSpDelta1 = -(int)funcletFrameSizeAligned;
genFuncletInfo.fiSpDelta2 = 0;
assert(genFuncletInfo.fiSpDelta1 + genFuncletInfo.fiSpDelta2 == -(int)funcletFrameSizeAligned);
}
else
{
unsigned saveRegsPlusPSPAlignmentPad = saveRegsPlusPSPSizeAligned - saveRegsPlusPSPSize;
assert((saveRegsPlusPSPAlignmentPad == 0) || (saveRegsPlusPSPAlignmentPad == REGSIZE_BYTES));
if (genSaveFpLrWithAllCalleeSavedRegisters)
{
SP_to_FPLR_save_delta = funcletFrameSizeAligned - (2 /* FP, LR */ * REGSIZE_BYTES);
if (compiler->info.compIsVarArgs)
{
SP_to_FPLR_save_delta -= MAX_REG_ARG * REGSIZE_BYTES;
}
SP_to_PSP_slot_delta =
compiler->lvaOutgoingArgSpaceSize + funcletFrameAlignmentPad + saveRegsPlusPSPAlignmentPad;
CallerSP_to_PSP_slot_delta = -(int)(osrPad + saveRegsPlusPSPSize);
genFuncletInfo.fiFrameType = 5;
}
else
{
SP_to_FPLR_save_delta = outgoingArgSpaceAligned;
SP_to_PSP_slot_delta = SP_to_FPLR_save_delta + 2 /* FP, LR */ * REGSIZE_BYTES + saveRegsPlusPSPAlignmentPad;
CallerSP_to_PSP_slot_delta = -(int)(osrPad + saveRegsPlusPSPSizeAligned - 2 /* FP, LR */ * REGSIZE_BYTES -
saveRegsPlusPSPAlignmentPad);
genFuncletInfo.fiFrameType = 3;
}
genFuncletInfo.fiSpDelta1 = -(int)(osrPad + saveRegsPlusPSPSizeAligned);
genFuncletInfo.fiSpDelta2 = -(int)outgoingArgSpaceAligned;
assert(genFuncletInfo.fiSpDelta1 + genFuncletInfo.fiSpDelta2 == -(int)maxFuncletFrameSizeAligned);
}
/* Now save it for future use */
genFuncletInfo.fiSaveRegs = rsMaskSaveRegs;
genFuncletInfo.fiSP_to_FPLR_save_delta = SP_to_FPLR_save_delta;
genFuncletInfo.fiSP_to_PSP_slot_delta = SP_to_PSP_slot_delta;
genFuncletInfo.fiSP_to_CalleeSave_delta = SP_to_PSP_slot_delta + PSPSize;
genFuncletInfo.fiCallerSP_to_PSP_slot_delta = CallerSP_to_PSP_slot_delta;
#ifdef DEBUG
if (verbose)
{
printf("\n");
printf("Funclet prolog / epilog info\n");
printf(" Save regs: ");
dspRegMask(genFuncletInfo.fiSaveRegs);
printf("\n");
if (compiler->opts.IsOSR())
{
printf(" OSR Pad: %d\n", osrPad);
}
printf(" SP to FP/LR save location delta: %d\n", genFuncletInfo.fiSP_to_FPLR_save_delta);
printf(" SP to PSP slot delta: %d\n", genFuncletInfo.fiSP_to_PSP_slot_delta);
printf(" SP to callee-saved area delta: %d\n", genFuncletInfo.fiSP_to_CalleeSave_delta);
printf(" Caller SP to PSP slot delta: %d\n", genFuncletInfo.fiCallerSP_to_PSP_slot_delta);
printf(" Frame type: %d\n", genFuncletInfo.fiFrameType);
printf(" SP delta 1: %d\n", genFuncletInfo.fiSpDelta1);
printf(" SP delta 2: %d\n", genFuncletInfo.fiSpDelta2);
if (compiler->lvaPSPSym != BAD_VAR_NUM)
{
if (CallerSP_to_PSP_slot_delta !=
compiler->lvaGetCallerSPRelativeOffset(compiler->lvaPSPSym)) // for debugging
{
printf("lvaGetCallerSPRelativeOffset(lvaPSPSym): %d\n",
compiler->lvaGetCallerSPRelativeOffset(compiler->lvaPSPSym));
}
}
}
assert(genFuncletInfo.fiSP_to_FPLR_save_delta >= 0);
assert(genFuncletInfo.fiSP_to_PSP_slot_delta >= 0);
assert(genFuncletInfo.fiSP_to_CalleeSave_delta >= 0);
assert(genFuncletInfo.fiCallerSP_to_PSP_slot_delta <= 0);
if (compiler->lvaPSPSym != BAD_VAR_NUM)
{
assert(genFuncletInfo.fiCallerSP_to_PSP_slot_delta ==
compiler->lvaGetCallerSPRelativeOffset(compiler->lvaPSPSym)); // same offset used in main function and
// funclet!
}
#endif // DEBUG
}
void CodeGen::genSetPSPSym(regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
if (compiler->lvaPSPSym == BAD_VAR_NUM)
{
return;
}
noway_assert(isFramePointerUsed()); // We need an explicit frame pointer
int SPtoCallerSPdelta = -genCallerSPtoInitialSPdelta();
if (compiler->opts.IsOSR())
{
SPtoCallerSPdelta += compiler->info.compPatchpointInfo->TotalFrameSize();
}
// We will just use the initReg since it is an available register
// and we are probably done using it anyway...
regNumber regTmp = initReg;
*pInitRegZeroed = false;
GetEmitter()->emitIns_R_R_Imm(INS_add, EA_PTRSIZE, regTmp, REG_SPBASE, SPtoCallerSPdelta);
GetEmitter()->emitIns_S_R(INS_str, EA_PTRSIZE, regTmp, compiler->lvaPSPSym, 0);
}
//-----------------------------------------------------------------------------
// genZeroInitFrameUsingBlockInit: architecture-specific helper for genZeroInitFrame in the case
// `genUseBlockInit` is set.
//
// Arguments:
// untrLclHi - (Untracked locals High-Offset) The upper bound offset at which the zero init
// code will end initializing memory (not inclusive).
// untrLclLo - (Untracked locals Low-Offset) The lower bound at which the zero init code will
// start zero initializing memory.
// initReg - A scratch register (that gets set to zero on some platforms).
// pInitRegZeroed - OUT parameter. *pInitRegZeroed is set to 'true' if this method sets initReg register to zero,
// 'false' if initReg was set to a non-zero value, and left unchanged if initReg was not touched.
//
void CodeGen::genZeroInitFrameUsingBlockInit(int untrLclHi, int untrLclLo, regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
assert(genUseBlockInit);
assert(untrLclHi > untrLclLo);
int bytesToWrite = untrLclHi - untrLclLo;
const regNumber zeroSimdReg = REG_ZERO_INIT_FRAME_SIMD;
bool simdRegZeroed = false;
const int simdRegPairSizeBytes = 2 * FP_REGSIZE_BYTES;
regNumber addrReg = REG_ZERO_INIT_FRAME_REG1;
if (addrReg == initReg)
{
*pInitRegZeroed = false;
}
int addrOffset = 0;
// The following invariants are held below:
//
// 1) [addrReg, #addrOffset] points at a location where next chunk of zero bytes will be written;
// 2) bytesToWrite specifies the number of bytes on the frame to initialize;
// 3) if simdRegZeroed is true then 128-bit wide zeroSimdReg contains zeroes.
const int bytesUseZeroingLoop = 192;
if (bytesToWrite >= bytesUseZeroingLoop)
{
// Generates the following code:
//
// When the size of the region is greater than or equal to 256 bytes
// **and** DC ZVA instruction use is permitted
// **and** the instruction block size is configured to 64 bytes:
//
// movi v16.16b, #0
// add x9, fp, #(untrLclLo+64)
// add x10, fp, #(untrLclHi-64)
// stp q16, q16, [x9, #-64]
// stp q16, q16, [x9, #-32]
// bfm x9, xzr, #0, #5
//
// loop:
// dc zva, x9
// add x9, x9, #64
// cmp x9, x10
// blo loop
//
// stp q16, q16, [x10]
// stp q16, q16, [x10, #32]
//
// Otherwise:
//
// movi v16.16b, #0
// add x9, fp, #(untrLclLo-32)
// mov x10, #(bytesToWrite-64)
//
// loop:
// stp q16, q16, [x9, #32]
// stp q16, q16, [x9, #64]!
// subs x10, x10, #64
// bge loop
const int bytesUseDataCacheZeroInstruction = 256;
GetEmitter()->emitIns_R_I(INS_movi, EA_16BYTE, zeroSimdReg, 0, INS_OPTS_16B);
simdRegZeroed = true;
if ((bytesToWrite >= bytesUseDataCacheZeroInstruction) &&
compiler->compOpportunisticallyDependsOn(InstructionSet_Dczva))
{
// The first and the last 64 bytes should be written with two stp q-reg instructions.
// This is in order to avoid **unintended** zeroing of the data by dc zva
// outside of [fp+untrLclLo, fp+untrLclHi) memory region.
genInstrWithConstant(INS_add, EA_PTRSIZE, addrReg, genFramePointerReg(), untrLclLo + 64, addrReg);
addrOffset = -64;
const regNumber endAddrReg = REG_ZERO_INIT_FRAME_REG2;
if (endAddrReg == initReg)
{
*pInitRegZeroed = false;
}
genInstrWithConstant(INS_add, EA_PTRSIZE, endAddrReg, genFramePointerReg(), untrLclHi - 64, endAddrReg);
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_16BYTE, zeroSimdReg, zeroSimdReg, addrReg, addrOffset);
addrOffset += simdRegPairSizeBytes;
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_16BYTE, zeroSimdReg, zeroSimdReg, addrReg, addrOffset);
addrOffset += simdRegPairSizeBytes;
assert(addrOffset == 0);
GetEmitter()->emitIns_R_R_I_I(INS_bfm, EA_PTRSIZE, addrReg, REG_ZR, 0, 5);
// addrReg points at the beginning of a cache line.
GetEmitter()->emitIns_R(INS_dczva, EA_PTRSIZE, addrReg);
GetEmitter()->emitIns_R_R_I(INS_add, EA_PTRSIZE, addrReg, addrReg, 64);
GetEmitter()->emitIns_R_R(INS_cmp, EA_PTRSIZE, addrReg, endAddrReg);
GetEmitter()->emitIns_J(INS_blo, NULL, -4);
addrReg = endAddrReg;
bytesToWrite = 64;
}
else
{
genInstrWithConstant(INS_add, EA_PTRSIZE, addrReg, genFramePointerReg(), untrLclLo - 32, addrReg);
addrOffset = 32;
const regNumber countReg = REG_ZERO_INIT_FRAME_REG2;
if (countReg == initReg)
{
*pInitRegZeroed = false;
}
instGen_Set_Reg_To_Imm(EA_PTRSIZE, countReg, bytesToWrite - 64);
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_16BYTE, zeroSimdReg, zeroSimdReg, addrReg, 32);
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_16BYTE, zeroSimdReg, zeroSimdReg, addrReg, 64,
INS_OPTS_PRE_INDEX);
GetEmitter()->emitIns_R_R_I(INS_subs, EA_PTRSIZE, countReg, countReg, 64);
GetEmitter()->emitIns_J(INS_bge, NULL, -4);
bytesToWrite %= 64;
}
}
else
{
genInstrWithConstant(INS_add, EA_PTRSIZE, addrReg, genFramePointerReg(), untrLclLo, addrReg);
}
if (bytesToWrite >= simdRegPairSizeBytes)
{
// Generates the following code:
//
// movi v16.16b, #0
// stp q16, q16, [x9, #addrOffset]
// stp q16, q16, [x9, #(addrOffset+32)]
// ...
// stp q16, q16, [x9, #(addrOffset+roundDown(bytesToWrite, 32))]
if (!simdRegZeroed)
{
GetEmitter()->emitIns_R_I(INS_movi, EA_16BYTE, zeroSimdReg, 0, INS_OPTS_16B);
simdRegZeroed = true;
}
for (; bytesToWrite >= simdRegPairSizeBytes; bytesToWrite -= simdRegPairSizeBytes)
{
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_16BYTE, zeroSimdReg, zeroSimdReg, addrReg, addrOffset);
addrOffset += simdRegPairSizeBytes;
}
}
const int regPairSizeBytes = 2 * REGSIZE_BYTES;
if (bytesToWrite >= regPairSizeBytes)
{
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_ZR, REG_ZR, addrReg, addrOffset);
addrOffset += regPairSizeBytes;
bytesToWrite -= regPairSizeBytes;
}
if (bytesToWrite >= REGSIZE_BYTES)
{
GetEmitter()->emitIns_R_R_I(INS_str, EA_PTRSIZE, REG_ZR, addrReg, addrOffset);
addrOffset += REGSIZE_BYTES;
bytesToWrite -= REGSIZE_BYTES;
}
if (bytesToWrite == sizeof(int))
{
GetEmitter()->emitIns_R_R_I(INS_str, EA_4BYTE, REG_ZR, addrReg, addrOffset);
bytesToWrite = 0;
}
assert(bytesToWrite == 0);
}
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX End Prolog / Epilog XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
BasicBlock* CodeGen::genCallFinally(BasicBlock* block)
{
// Generate a call to the finally, like this:
// mov x0,qword ptr [fp + 10H] / sp // Load x0 with PSPSym, or sp if PSPSym is not used
// bl finally-funclet
// b finally-return // Only for non-retless finally calls
// The 'b' can be a NOP if we're going to the next block.
if (compiler->lvaPSPSym != BAD_VAR_NUM)
{
GetEmitter()->emitIns_R_S(INS_ldr, EA_PTRSIZE, REG_R0, compiler->lvaPSPSym, 0);
}
else
{
GetEmitter()->emitIns_Mov(INS_mov, EA_PTRSIZE, REG_R0, REG_SPBASE, /* canSkip */ false);
}
GetEmitter()->emitIns_J(INS_bl_local, block->bbJumpDest);
if (block->bbFlags & BBF_RETLESS_CALL)
{
// We have a retless call, and the last instruction generated was a call.
// If the next block is in a different EH region (or is the end of the code
// block), then we need to generate a breakpoint here (since it will never
// get executed) to get proper unwind behavior.
if ((block->bbNext == nullptr) || !BasicBlock::sameEHRegion(block, block->bbNext))
{
instGen(INS_BREAKPOINT); // This should never get executed
}
}
else
{
// Because of the way the flowgraph is connected, the liveness info for this one instruction
// after the call is not (can not be) correct in cases where a variable has a last use in the
// handler. So turn off GC reporting for this single instruction.
GetEmitter()->emitDisableGC();
// Now go to where the finally funclet needs to return to.
if (block->bbNext->bbJumpDest == block->bbNext->bbNext)
{
// Fall-through.
// TODO-ARM64-CQ: Can we get rid of this instruction, and just have the call return directly
// to the next instruction? This would depend on stack walking from within the finally
// handler working without this instruction being in this special EH region.
instGen(INS_nop);
}
else
{
inst_JMP(EJ_jmp, block->bbNext->bbJumpDest);
}
GetEmitter()->emitEnableGC();
}
// The BBJ_ALWAYS is used because the BBJ_CALLFINALLY can't point to the
// jump target using bbJumpDest - that is already used to point
// to the finally block. So just skip past the BBJ_ALWAYS unless the
// block is RETLESS.
if (!(block->bbFlags & BBF_RETLESS_CALL))
{
assert(block->isBBCallAlwaysPair());
block = block->bbNext;
}
return block;
}
void CodeGen::genEHCatchRet(BasicBlock* block)
{
// For long address (default): `adrp + add` will be emitted.
// For short address (proven later): `adr` will be emitted.
GetEmitter()->emitIns_R_L(INS_adr, EA_PTRSIZE, block->bbJumpDest, REG_INTRET);
}
// move an immediate value into an integer register
void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size,
regNumber reg,
ssize_t imm,
insFlags flags DEBUGARG(size_t targetHandle) DEBUGARG(GenTreeFlags gtFlags))
{
// reg cannot be a FP register
assert(!genIsValidFloatReg(reg));
if (!compiler->opts.compReloc)
{
size = EA_SIZE(size); // Strip any Reloc flags from size if we aren't doing relocs
}
if (EA_IS_RELOC(size))
{
// This emits a pair of adrp/add (two instructions) with fix-ups.
GetEmitter()->emitIns_R_AI(INS_adrp, size, reg, imm DEBUGARG(targetHandle) DEBUGARG(gtFlags));
}
else if (imm == 0)
{
instGen_Set_Reg_To_Zero(size, reg, flags);
}
else
{
if (emitter::emitIns_valid_imm_for_mov(imm, size))
{
GetEmitter()->emitIns_R_I(INS_mov, size, reg, imm);
}
else
{
// Arm64 allows any arbitrary 16-bit constant to be loaded into a register halfword
// There are three forms
// movk which loads into any halfword preserving the remaining halfwords
// movz which loads into any halfword zeroing the remaining halfwords
// movn which loads into any halfword zeroing the remaining halfwords then bitwise inverting the register
// In some cases it is preferable to use movn, because it has the side effect of filling the other halfwords
// with ones
// Determine whether movn or movz will require the fewest instructions to populate the immediate
int preferMovn = 0;
for (int i = (size == EA_8BYTE) ? 48 : 16; i >= 0; i -= 16)
{
if (uint16_t(imm >> i) == 0xffff)
++preferMovn; // a single movk 0xffff could be skipped if movn was used
else if (uint16_t(imm >> i) == 0x0000)
--preferMovn; // a single movk 0 could be skipped if movz was used
}
// Select the first instruction. Any additional instruction will use movk
instruction ins = (preferMovn > 0) ? INS_movn : INS_movz;
// Initial movz or movn will fill the remaining bytes with the skipVal
// This can allow skipping filling a halfword
uint16_t skipVal = (preferMovn > 0) ? 0xffff : 0;
unsigned bits = (size == EA_8BYTE) ? 64 : 32;
// Iterate over imm examining 16 bits at a time
for (unsigned i = 0; i < bits; i += 16)
{
uint16_t imm16 = uint16_t(imm >> i);
if (imm16 != skipVal)
{
if (ins == INS_movn)
{
// For the movn case, we need to bitwise invert the immediate. This is because
// (movn x0, ~imm16) === (movz x0, imm16; or x0, x0, #0xffff`ffff`ffff`0000)
imm16 = ~imm16;
}
GetEmitter()->emitIns_R_I_I(ins, size, reg, imm16, i, INS_OPTS_LSL);
// Once the initial movz/movn is emitted the remaining instructions will all use movk
ins = INS_movk;
}
}
// We must emit a movn or movz or we have not done anything
// The cases which hit this assert should be (emitIns_valid_imm_for_mov() == true) and
// should not be in this else condition
assert(ins == INS_movk);
}
// The caller may have requested that the flags be set on this mov (rarely/never)
if (flags == INS_FLAGS_SET)
{
GetEmitter()->emitIns_R_I(INS_tst, size, reg, 0);
}
}
regSet.verifyRegUsed(reg);
}
/***********************************************************************************
*
* Generate code to set a register 'targetReg' of type 'targetType' to the constant
* specified by the constant (GT_CNS_INT or GT_CNS_DBL) in 'tree'. This does not call
* genProduceReg() on the target register.
*/
void CodeGen::genSetRegToConst(regNumber targetReg, var_types targetType, GenTree* tree)
{
switch (tree->gtOper)
{
case GT_CNS_INT:
{
GenTreeIntConCommon* con = tree->AsIntConCommon();
ssize_t cnsVal = con->IconValue();
emitAttr attr = emitActualTypeSize(targetType);
// TODO-CQ: Currently we cannot do this for all handles because of
// https://github.com/dotnet/runtime/issues/60712
if (con->ImmedValNeedsReloc(compiler))
{
attr = EA_SET_FLG(attr, EA_CNS_RELOC_FLG);
}
if (targetType == TYP_BYREF)
{
attr = EA_SET_FLG(attr, EA_BYREF_FLG);
}
instGen_Set_Reg_To_Imm(attr, targetReg, cnsVal,
INS_FLAGS_DONT_CARE DEBUGARG(tree->AsIntCon()->gtTargetHandle)
DEBUGARG(tree->AsIntCon()->gtFlags));
regSet.verifyRegUsed(targetReg);
}
break;
case GT_CNS_DBL:
{
emitter* emit = GetEmitter();
emitAttr size = emitActualTypeSize(tree);
double constValue = tree->AsDblCon()->gtDconVal;
// Make sure we use "movi reg, 0x00" only for positive zero (0.0) and not for negative zero (-0.0)
if (*(__int64*)&constValue == 0)
{
// A faster/smaller way to generate 0.0
// We will just zero out the entire vector register for both float and double
emit->emitIns_R_I(INS_movi, EA_16BYTE, targetReg, 0x00, INS_OPTS_16B);
}
else if (emitter::emitIns_valid_imm_for_fmov(constValue))
{
// We can load the FP constant using the fmov FP-immediate for this constValue
emit->emitIns_R_F(INS_fmov, size, targetReg, constValue);
}
else
{
// Get a temp integer register to compute long address.
regNumber addrReg = tree->GetSingleTempReg();
// We must load the FP constant from the constant pool
// Emit a data section constant for the float or double constant.
CORINFO_FIELD_HANDLE hnd = emit->emitFltOrDblConst(constValue, size);
// For long address (default): `adrp + ldr + fmov` will be emitted.
// For short address (proven later), `ldr` will be emitted.
emit->emitIns_R_C(INS_ldr, size, targetReg, addrReg, hnd, 0);
}
}
break;
default:
unreached();
}
}
// Produce code for a GT_INC_SATURATE node.
void CodeGen::genCodeForIncSaturate(GenTree* tree)
{
regNumber targetReg = tree->GetRegNum();
// The arithmetic node must be sitting in a register (since it's not contained)
assert(!tree->isContained());
// The dst can only be a register.
assert(targetReg != REG_NA);
GenTree* operand = tree->gtGetOp1();
assert(!operand->isContained());
// The src must be a register.
regNumber operandReg = genConsumeReg(operand);
GetEmitter()->emitIns_R_R_I(INS_adds, emitActualTypeSize(tree), targetReg, operandReg, 1);
GetEmitter()->emitIns_R_R_COND(INS_cinv, emitActualTypeSize(tree), targetReg, targetReg, INS_COND_HS);
genProduceReg(tree);
}
// Generate code to get the high N bits of a N*N=2N bit multiplication result
void CodeGen::genCodeForMulHi(GenTreeOp* treeNode)
{
assert(!treeNode->gtOverflowEx());
genConsumeOperands(treeNode);
regNumber targetReg = treeNode->GetRegNum();
var_types targetType = treeNode->TypeGet();
emitter* emit = GetEmitter();
emitAttr attr = emitActualTypeSize(treeNode);
unsigned isUnsigned = (treeNode->gtFlags & GTF_UNSIGNED);
GenTree* op1 = treeNode->gtGetOp1();
GenTree* op2 = treeNode->gtGetOp2();
assert(!varTypeIsFloating(targetType));
// The arithmetic node must be sitting in a register (since it's not contained)
assert(targetReg != REG_NA);
if (EA_SIZE(attr) == EA_8BYTE)
{
instruction ins = isUnsigned ? INS_umulh : INS_smulh;
regNumber r = emit->emitInsTernary(ins, attr, treeNode, op1, op2);
assert(r == targetReg);
}
else
{
assert(EA_SIZE(attr) == EA_4BYTE);
instruction ins = isUnsigned ? INS_umull : INS_smull;
regNumber r = emit->emitInsTernary(ins, EA_4BYTE, treeNode, op1, op2);
emit->emitIns_R_R_I(isUnsigned ? INS_lsr : INS_asr, EA_8BYTE, targetReg, targetReg, 32);
}
genProduceReg(treeNode);
}
// Generate code for ADD, SUB, MUL, DIV, UDIV, AND, AND_NOT, OR and XOR
// This method is expected to have called genConsumeOperands() before calling it.
void CodeGen::genCodeForBinary(GenTreeOp* treeNode)
{
const genTreeOps oper = treeNode->OperGet();
regNumber targetReg = treeNode->GetRegNum();
var_types targetType = treeNode->TypeGet();
emitter* emit = GetEmitter();
assert(treeNode->OperIs(GT_ADD, GT_SUB, GT_MUL, GT_DIV, GT_UDIV, GT_AND, GT_AND_NOT, GT_OR, GT_XOR));
GenTree* op1 = treeNode->gtGetOp1();
GenTree* op2 = treeNode->gtGetOp2();
instruction ins = genGetInsForOper(treeNode->OperGet(), targetType);
if ((treeNode->gtFlags & GTF_SET_FLAGS) != 0)
{
switch (oper)
{
case GT_ADD:
ins = INS_adds;
break;
case GT_SUB:
ins = INS_subs;
break;
case GT_AND:
ins = INS_ands;
break;
case GT_AND_NOT:
ins = INS_bics;
break;
default:
noway_assert(!"Unexpected BinaryOp with GTF_SET_FLAGS set");
}
}
// The arithmetic node must be sitting in a register (since it's not contained)
assert(targetReg != REG_NA);
regNumber r = emit->emitInsTernary(ins, emitActualTypeSize(treeNode), treeNode, op1, op2);
assert(r == targetReg);
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genCodeForLclVar: Produce code for a GT_LCL_VAR node.
//
// Arguments:
// tree - the GT_LCL_VAR node
//
void CodeGen::genCodeForLclVar(GenTreeLclVar* tree)
{
unsigned varNum = tree->GetLclNum();
LclVarDsc* varDsc = compiler->lvaGetDesc(varNum);
var_types targetType = varDsc->GetRegisterType(tree);
bool isRegCandidate = varDsc->lvIsRegCandidate();
// lcl_vars are not defs
assert((tree->gtFlags & GTF_VAR_DEF) == 0);
// If this is a register candidate that has been spilled, genConsumeReg() will
// reload it at the point of use. Otherwise, if it's not in a register, we load it here.
if (!isRegCandidate && !tree->IsMultiReg() && !(tree->gtFlags & GTF_SPILLED))
{
// targetType must be a normal scalar type and not a TYP_STRUCT
assert(targetType != TYP_STRUCT);
instruction ins = ins_Load(targetType);
emitAttr attr = emitActualTypeSize(targetType);
emitter* emit = GetEmitter();
emit->emitIns_R_S(ins, attr, tree->GetRegNum(), varNum, 0);
genProduceReg(tree);
}
}
//------------------------------------------------------------------------
// genCodeForStoreLclFld: Produce code for a GT_STORE_LCL_FLD node.
//
// Arguments:
// tree - the GT_STORE_LCL_FLD node
//
void CodeGen::genCodeForStoreLclFld(GenTreeLclFld* tree)
{
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->GetRegNum();
emitter* emit = GetEmitter();
noway_assert(targetType != TYP_STRUCT);
#ifdef FEATURE_SIMD
// storing of TYP_SIMD12 (i.e. Vector3) field
if (tree->TypeGet() == TYP_SIMD12)
{
genStoreLclTypeSIMD12(tree);
return;
}
#endif // FEATURE_SIMD
// record the offset
unsigned offset = tree->GetLclOffs();
// We must have a stack store with GT_STORE_LCL_FLD
noway_assert(targetReg == REG_NA);
unsigned varNum = tree->GetLclNum();
LclVarDsc* varDsc = compiler->lvaGetDesc(varNum);
// Ensure that lclVar nodes are typed correctly.
assert(!varDsc->lvNormalizeOnStore() || targetType == genActualType(varDsc->TypeGet()));
GenTree* data = tree->gtOp1;
genConsumeRegs(data);
regNumber dataReg = REG_NA;
if (data->isContainedIntOrIImmed())
{
assert(data->IsIntegralConst(0));
dataReg = REG_ZR;
}
else if (data->isContained())
{
assert(data->OperIs(GT_BITCAST));
const GenTree* bitcastSrc = data->AsUnOp()->gtGetOp1();
assert(!bitcastSrc->isContained());
dataReg = bitcastSrc->GetRegNum();
}
else
{
assert(!data->isContained());
dataReg = data->GetRegNum();
}
assert(dataReg != REG_NA);
instruction ins = ins_StoreFromSrc(dataReg, targetType);
emitAttr attr = emitActualTypeSize(targetType);
emit->emitIns_S_R(ins, attr, dataReg, varNum, offset);
genUpdateLife(tree);
varDsc->SetRegNum(REG_STK);
}
//------------------------------------------------------------------------
// genCodeForStoreLclVar: Produce code for a GT_STORE_LCL_VAR node.
//
// Arguments:
// lclNode - the GT_STORE_LCL_VAR node
//
void CodeGen::genCodeForStoreLclVar(GenTreeLclVar* lclNode)
{
GenTree* data = lclNode->gtOp1;
// Stores from a multi-reg source are handled separately.
if (data->gtSkipReloadOrCopy()->IsMultiRegNode())
{
genMultiRegStoreToLocal(lclNode);
return;
}
LclVarDsc* varDsc = compiler->lvaGetDesc(lclNode);
if (lclNode->IsMultiReg())
{
// This is the case of storing to a multi-reg HFA local from a fixed-size SIMD type.
assert(varTypeIsSIMD(data) && varDsc->lvIsHfa() && (varDsc->GetHfaType() == TYP_FLOAT));
regNumber operandReg = genConsumeReg(data);
unsigned int regCount = varDsc->lvFieldCnt;
for (unsigned i = 0; i < regCount; ++i)
{
regNumber varReg = lclNode->GetRegByIndex(i);
assert(varReg != REG_NA);
unsigned fieldLclNum = varDsc->lvFieldLclStart + i;
LclVarDsc* fieldVarDsc = compiler->lvaGetDesc(fieldLclNum);
assert(fieldVarDsc->TypeGet() == TYP_FLOAT);
GetEmitter()->emitIns_R_R_I(INS_dup, emitTypeSize(TYP_FLOAT), varReg, operandReg, i);
}
genProduceReg(lclNode);
}
else
{
regNumber targetReg = lclNode->GetRegNum();
emitter* emit = GetEmitter();
unsigned varNum = lclNode->GetLclNum();
var_types targetType = varDsc->GetRegisterType(lclNode);
#ifdef FEATURE_SIMD
// storing of TYP_SIMD12 (i.e. Vector3) field
if (targetType == TYP_SIMD12)
{
genStoreLclTypeSIMD12(lclNode);
return;
}
#endif // FEATURE_SIMD
genConsumeRegs(data);
regNumber dataReg = REG_NA;
if (data->isContained())
{
// This is only possible for a zero-init or bitcast.
const bool zeroInit = (data->IsIntegralConst(0) || data->IsSIMDZero());
assert(zeroInit || data->OperIs(GT_BITCAST));
if (zeroInit && varTypeIsSIMD(targetType))
{
if (targetReg != REG_NA)
{
emit->emitIns_R_I(INS_movi, emitActualTypeSize(targetType), targetReg, 0x00, INS_OPTS_16B);
genProduceReg(lclNode);
}
else
{
if (targetType == TYP_SIMD16)
{
GetEmitter()->emitIns_S_S_R_R(INS_stp, EA_8BYTE, EA_8BYTE, REG_ZR, REG_ZR, varNum, 0);
}
else
{
assert(targetType == TYP_SIMD8);
GetEmitter()->emitIns_S_R(INS_str, EA_8BYTE, REG_ZR, varNum, 0);
}
genUpdateLife(lclNode);
}
return;
}
if (zeroInit)
{
dataReg = REG_ZR;
}
else
{
const GenTree* bitcastSrc = data->AsUnOp()->gtGetOp1();
assert(!bitcastSrc->isContained());
dataReg = bitcastSrc->GetRegNum();
}
}
else
{
assert(!data->isContained());
dataReg = data->GetRegNum();
}
assert(dataReg != REG_NA);
if (targetReg == REG_NA) // store into stack based LclVar
{
inst_set_SV_var(lclNode);
instruction ins = ins_StoreFromSrc(dataReg, targetType);
emitAttr attr = emitActualTypeSize(targetType);
emit->emitIns_S_R(ins, attr, dataReg, varNum, /* offset */ 0);
genUpdateLife(lclNode);
varDsc->SetRegNum(REG_STK);
}
else // store into register (i.e move into register)
{
// Assign into targetReg when dataReg (from op1) is not the same register
inst_Mov(targetType, targetReg, dataReg, /* canSkip */ true);
genProduceReg(lclNode);
}
}
}
//------------------------------------------------------------------------
// genSimpleReturn: Generates code for simple return statement for arm64.
//
// Note: treeNode's and op1's registers are already consumed.
//
// Arguments:
// treeNode - The GT_RETURN or GT_RETFILT tree node with non-struct and non-void type
//
// Return Value:
// None
//
void CodeGen::genSimpleReturn(GenTree* treeNode)
{
assert(treeNode->OperGet() == GT_RETURN || treeNode->OperGet() == GT_RETFILT);
GenTree* op1 = treeNode->gtGetOp1();
var_types targetType = treeNode->TypeGet();
assert(targetType != TYP_STRUCT);
assert(targetType != TYP_VOID);
regNumber retReg = varTypeUsesFloatArgReg(treeNode) ? REG_FLOATRET : REG_INTRET;
bool movRequired = (op1->GetRegNum() != retReg);
if (!movRequired)
{
if (op1->OperGet() == GT_LCL_VAR)
{
GenTreeLclVarCommon* lcl = op1->AsLclVarCommon();
const LclVarDsc* varDsc = compiler->lvaGetDesc(lcl);
bool isRegCandidate = varDsc->lvIsRegCandidate();
if (isRegCandidate && ((op1->gtFlags & GTF_SPILLED) == 0))
{
// We may need to generate a zero-extending mov instruction to load the value from this GT_LCL_VAR
var_types op1Type = genActualType(op1->TypeGet());
var_types lclType = genActualType(varDsc->TypeGet());
if (genTypeSize(op1Type) < genTypeSize(lclType))
{
movRequired = true;
}
}
}
}
emitAttr attr = emitActualTypeSize(targetType);
GetEmitter()->emitIns_Mov(INS_mov, attr, retReg, op1->GetRegNum(), /* canSkip */ !movRequired);
}
/***********************************************************************************************
* Generate code for localloc
*/
void CodeGen::genLclHeap(GenTree* tree)
{
assert(tree->OperGet() == GT_LCLHEAP);
assert(compiler->compLocallocUsed);
GenTree* size = tree->AsOp()->gtOp1;
noway_assert((genActualType(size->gtType) == TYP_INT) || (genActualType(size->gtType) == TYP_I_IMPL));
regNumber targetReg = tree->GetRegNum();
regNumber regCnt = REG_NA;
regNumber pspSymReg = REG_NA;
var_types type = genActualType(size->gtType);
emitAttr easz = emitTypeSize(type);
BasicBlock* endLabel = nullptr;
BasicBlock* loop = nullptr;
unsigned stackAdjustment = 0;
const target_ssize_t ILLEGAL_LAST_TOUCH_DELTA = (target_ssize_t)-1;
target_ssize_t lastTouchDelta =
ILLEGAL_LAST_TOUCH_DELTA; // The number of bytes from SP to the last stack address probed.
noway_assert(isFramePointerUsed()); // localloc requires Frame Pointer to be established since SP changes
noway_assert(genStackLevel == 0); // Can't have anything on the stack
// compute the amount of memory to allocate to properly STACK_ALIGN.
size_t amount = 0;
if (size->IsCnsIntOrI())
{
// If size is a constant, then it must be contained.
assert(size->isContained());
// If amount is zero then return null in targetReg
amount = size->AsIntCon()->gtIconVal;
if (amount == 0)
{
instGen_Set_Reg_To_Zero(EA_PTRSIZE, targetReg);
goto BAILOUT;
}
// 'amount' is the total number of bytes to localloc to properly STACK_ALIGN
amount = AlignUp(amount, STACK_ALIGN);
}
else
{
// If 0 bail out by returning null in targetReg
genConsumeRegAndCopy(size, targetReg);
endLabel = genCreateTempLabel();
GetEmitter()->emitIns_R_R(INS_tst, easz, targetReg, targetReg);
inst_JMP(EJ_eq, endLabel);
// Compute the size of the block to allocate and perform alignment.
// If compInitMem=true, we can reuse targetReg as regcnt,
// since we don't need any internal registers.
if (compiler->info.compInitMem)
{
assert(tree->AvailableTempRegCount() == 0);
regCnt = targetReg;
}
else
{
regCnt = tree->ExtractTempReg();
inst_Mov(size->TypeGet(), regCnt, targetReg, /* canSkip */ true);
}
// Align to STACK_ALIGN
// regCnt will be the total number of bytes to localloc
inst_RV_IV(INS_add, regCnt, (STACK_ALIGN - 1), emitActualTypeSize(type));
inst_RV_IV(INS_and, regCnt, ~(STACK_ALIGN - 1), emitActualTypeSize(type));
}
// If we have an outgoing arg area then we must adjust the SP by popping off the
// outgoing arg area. We will restore it right before we return from this method.
//
// Localloc returns stack space that aligned to STACK_ALIGN bytes. The following
// are the cases that need to be handled:
// i) Method has out-going arg area.
// It is guaranteed that size of out-going arg area is STACK_ALIGN'ed (see fgMorphArgs).
// Therefore, we will pop off the out-going arg area from the stack pointer before allocating the localloc
// space.
// ii) Method has no out-going arg area.
// Nothing to pop off from the stack.
if (compiler->lvaOutgoingArgSpaceSize > 0)
{
assert((compiler->lvaOutgoingArgSpaceSize % STACK_ALIGN) == 0); // This must be true for the stack to remain
// aligned
genInstrWithConstant(INS_add, EA_PTRSIZE, REG_SPBASE, REG_SPBASE, compiler->lvaOutgoingArgSpaceSize,
rsGetRsvdReg());
stackAdjustment += compiler->lvaOutgoingArgSpaceSize;
}
if (size->IsCnsIntOrI())
{
// We should reach here only for non-zero, constant size allocations.
assert(amount > 0);
const int storePairRegsWritesBytes = 2 * REGSIZE_BYTES;
// For small allocations we will generate up to four stp instructions, to zero 16 to 64 bytes.
static_assert_no_msg(STACK_ALIGN == storePairRegsWritesBytes);
assert(amount % storePairRegsWritesBytes == 0); // stp stores two registers at a time
if (compiler->info.compInitMem)
{
if (amount <= LCLHEAP_UNROLL_LIMIT)
{
// The following zeroes the last 16 bytes and probes the page containing [sp, #16] address.
// stp xzr, xzr, [sp, #-16]!
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_ZR, REG_ZR, REG_SPBASE, -storePairRegsWritesBytes,
INS_OPTS_PRE_INDEX);
if (amount > storePairRegsWritesBytes)
{
// The following sets SP to its final value and zeroes the first 16 bytes of the allocated space.
// stp xzr, xzr, [sp, #-amount+16]!
const ssize_t finalSpDelta = (ssize_t)amount - storePairRegsWritesBytes;
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_ZR, REG_ZR, REG_SPBASE, -finalSpDelta,
INS_OPTS_PRE_INDEX);
// The following zeroes the remaining space in [finalSp+16, initialSp-16) interval
// using a sequence of stp instruction with unsigned offset.
for (ssize_t offset = storePairRegsWritesBytes; offset < finalSpDelta;
offset += storePairRegsWritesBytes)
{
// stp xzr, xzr, [sp, #offset]
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_ZR, REG_ZR, REG_SPBASE, offset);
}
}
lastTouchDelta = 0;
goto ALLOC_DONE;
}
}
else if (amount < compiler->eeGetPageSize()) // must be < not <=
{
// Since the size is less than a page, simply adjust the SP value.
// The SP might already be in the guard page, so we must touch it BEFORE
// the alloc, not after.
// Note the we check against the lower boundary of the post-index immediate range [-256, 256)
// since the offset is -amount.
const bool canEncodeLoadRegPostIndexOffset = amount <= 256;
if (canEncodeLoadRegPostIndexOffset)
{
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_ZR, REG_SPBASE, -(ssize_t)amount,
INS_OPTS_POST_INDEX);
}
else if (emitter::canEncodeLoadOrStorePairOffset(-(ssize_t)amount, EA_8BYTE))
{
// The following probes the page and allocates the local heap.
// ldp tmpReg, xzr, [sp], #-amount
// Note that we cannot use ldp xzr, xzr since
// the behaviour of ldp where two source registers are the same is unpredictable.
const regNumber tmpReg = targetReg;
GetEmitter()->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, tmpReg, REG_ZR, REG_SPBASE, -(ssize_t)amount,
INS_OPTS_POST_INDEX);
}
else
{
// ldr wzr, [sp]
// sub, sp, #amount
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_ZR, REG_SPBASE, 0);
genInstrWithConstant(INS_sub, EA_PTRSIZE, REG_SPBASE, REG_SPBASE, amount, rsGetRsvdReg());
}
lastTouchDelta = amount;
goto ALLOC_DONE;
}
// else, "mov regCnt, amount"
// If compInitMem=true, we can reuse targetReg as regcnt.
// Since size is a constant, regCnt is not yet initialized.
assert(regCnt == REG_NA);
if (compiler->info.compInitMem)
{
assert(tree->AvailableTempRegCount() == 0);
regCnt = targetReg;
}
else
{
regCnt = tree->ExtractTempReg();
}
instGen_Set_Reg_To_Imm(((unsigned int)amount == amount) ? EA_4BYTE : EA_8BYTE, regCnt, amount);
}
if (compiler->info.compInitMem)
{
BasicBlock* loop = genCreateTempLabel();
// At this point 'regCnt' is set to the total number of bytes to locAlloc.
// Since we have to zero out the allocated memory AND ensure that the stack pointer is always valid
// by tickling the pages, we will just push 0's on the stack.
//
// Note: regCnt is guaranteed to be even on Amd64 since STACK_ALIGN/TARGET_POINTER_SIZE = 2
// and localloc size is a multiple of STACK_ALIGN.
// Loop:
genDefineTempLabel(loop);
// We can use pre-indexed addressing.
// stp ZR, ZR, [SP, #-16]!
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_ZR, REG_ZR, REG_SPBASE, -16, INS_OPTS_PRE_INDEX);
// If not done, loop
// Note that regCnt is the number of bytes to stack allocate.
// Therefore we need to subtract 16 from regcnt here.
assert(genIsValidIntReg(regCnt));
inst_RV_IV(INS_subs, regCnt, 16, emitActualTypeSize(type));
inst_JMP(EJ_ne, loop);
lastTouchDelta = 0;
}
else
{
// At this point 'regCnt' is set to the total number of bytes to localloc.
//
// We don't need to zero out the allocated memory. However, we do have
// to tickle the pages to ensure that SP is always valid and is
// in sync with the "stack guard page". Note that in the worst
// case SP is on the last byte of the guard page. Thus you must
// touch SP-0 first not SP-0x1000.
//
// This is similar to the prolog code in CodeGen::genAllocLclFrame().
//
// Note that we go through a few hoops so that SP never points to
// illegal pages at any time during the tickling process.
//
// subs regCnt, SP, regCnt // regCnt now holds ultimate SP
// bvc Loop // result is smaller than original SP (no wrap around)
// mov regCnt, #0 // Overflow, pick lowest possible value
//
// Loop:
// ldr wzr, [SP + 0] // tickle the page - read from the page
// sub regTmp, SP, PAGE_SIZE // decrement SP by eeGetPageSize()
// cmp regTmp, regCnt
// jb Done
// mov SP, regTmp
// j Loop
//
// Done:
// mov SP, regCnt
//
// Setup the regTmp
regNumber regTmp = tree->GetSingleTempReg();
BasicBlock* loop = genCreateTempLabel();
BasicBlock* done = genCreateTempLabel();
// subs regCnt, SP, regCnt // regCnt now holds ultimate SP
GetEmitter()->emitIns_R_R_R(INS_subs, EA_PTRSIZE, regCnt, REG_SPBASE, regCnt);
inst_JMP(EJ_vc, loop); // branch if the V flag is not set
// Overflow, set regCnt to lowest possible value
instGen_Set_Reg_To_Zero(EA_PTRSIZE, regCnt);
genDefineTempLabel(loop);
// tickle the page - Read from the updated SP - this triggers a page fault when on the guard page
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_ZR, REG_SPBASE, 0);
// decrement SP by eeGetPageSize()
GetEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, regTmp, REG_SPBASE, compiler->eeGetPageSize());
GetEmitter()->emitIns_R_R(INS_cmp, EA_PTRSIZE, regTmp, regCnt);
inst_JMP(EJ_lo, done);
// Update SP to be at the next page of stack that we will tickle
GetEmitter()->emitIns_Mov(INS_mov, EA_PTRSIZE, REG_SPBASE, regTmp, /* canSkip */ false);
// Jump to loop and tickle new stack address
inst_JMP(EJ_jmp, loop);
// Done with stack tickle loop
genDefineTempLabel(done);
// Now just move the final value to SP
GetEmitter()->emitIns_Mov(INS_mov, EA_PTRSIZE, REG_SPBASE, regCnt, /* canSkip */ false);
// lastTouchDelta is dynamic, and can be up to a page. So if we have outgoing arg space,
// we're going to assume the worst and probe.
}
ALLOC_DONE:
// Re-adjust SP to allocate outgoing arg area. We must probe this adjustment.
if (stackAdjustment != 0)
{
assert((stackAdjustment % STACK_ALIGN) == 0); // This must be true for the stack to remain aligned
assert((lastTouchDelta == ILLEGAL_LAST_TOUCH_DELTA) || (lastTouchDelta >= 0));
const regNumber tmpReg = rsGetRsvdReg();
if ((lastTouchDelta == ILLEGAL_LAST_TOUCH_DELTA) ||
(stackAdjustment + (unsigned)lastTouchDelta + STACK_PROBE_BOUNDARY_THRESHOLD_BYTES >
compiler->eeGetPageSize()))
{
genStackPointerConstantAdjustmentLoopWithProbe(-(ssize_t)stackAdjustment, tmpReg);
}
else
{
genStackPointerConstantAdjustment(-(ssize_t)stackAdjustment, tmpReg);
}
// Return the stackalloc'ed address in result register.
// TargetReg = SP + stackAdjustment.
//
genInstrWithConstant(INS_add, EA_PTRSIZE, targetReg, REG_SPBASE, (ssize_t)stackAdjustment, tmpReg);
}
else // stackAdjustment == 0
{
// Move the final value of SP to targetReg
inst_Mov(TYP_I_IMPL, targetReg, REG_SPBASE, /* canSkip */ false);
}
BAILOUT:
if (endLabel != nullptr)
genDefineTempLabel(endLabel);
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genCodeForNegNot: Produce code for a GT_NEG/GT_NOT node.
//
// Arguments:
// tree - the node
//
void CodeGen::genCodeForNegNot(GenTree* tree)
{
assert(tree->OperIs(GT_NEG, GT_NOT));
var_types targetType = tree->TypeGet();
assert(!tree->OperIs(GT_NOT) || !varTypeIsFloating(targetType));
regNumber targetReg = tree->GetRegNum();
instruction ins = genGetInsForOper(tree->OperGet(), targetType);
if ((tree->gtFlags & GTF_SET_FLAGS) != 0)
{
switch (tree->OperGet())
{
case GT_NEG:
ins = INS_negs;
break;
default:
noway_assert(!"Unexpected UnaryOp with GTF_SET_FLAGS set");
}
}
// The arithmetic node must be sitting in a register (since it's not contained)
assert(!tree->isContained());
// The dst can only be a register.
assert(targetReg != REG_NA);
GenTree* operand = tree->gtGetOp1();
assert(!operand->isContained());
// The src must be a register.
regNumber operandReg = genConsumeReg(operand);
GetEmitter()->emitIns_R_R(ins, emitActualTypeSize(tree), targetReg, operandReg);
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genCodeForBswap: Produce code for a GT_BSWAP / GT_BSWAP16 node.
//
// Arguments:
// tree - the node
//
void CodeGen::genCodeForBswap(GenTree* tree)
{
assert(tree->OperIs(GT_BSWAP, GT_BSWAP16));
regNumber targetReg = tree->GetRegNum();
var_types targetType = tree->TypeGet();
GenTree* operand = tree->gtGetOp1();
assert(operand->isUsedFromReg());
regNumber operandReg = genConsumeReg(operand);
if (tree->OperIs(GT_BSWAP16))
{
inst_RV_RV(INS_rev16, targetReg, operandReg, targetType);
}
else
{
inst_RV_RV(INS_rev, targetReg, operandReg, targetType);
}
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genCodeForDivMod: Produce code for a GT_DIV/GT_UDIV node. We don't see MOD:
// (1) integer MOD is morphed into a sequence of sub, mul, div in fgMorph;
// (2) float/double MOD is morphed into a helper call by front-end.
//
// Arguments:
// tree - the node
//
void CodeGen::genCodeForDivMod(GenTreeOp* tree)
{
assert(tree->OperIs(GT_DIV, GT_UDIV));
var_types targetType = tree->TypeGet();
emitter* emit = GetEmitter();
genConsumeOperands(tree);
if (varTypeIsFloating(targetType))
{
// Floating point divide never raises an exception
genCodeForBinary(tree);
}
else // an integer divide operation
{
GenTree* divisorOp = tree->gtGetOp2();
emitAttr size = EA_ATTR(genTypeSize(genActualType(tree->TypeGet())));
if (divisorOp->IsIntegralConst(0))
{
// We unconditionally throw a divide by zero exception
genJumpToThrowHlpBlk(EJ_jmp, SCK_DIV_BY_ZERO);
// We still need to call genProduceReg
genProduceReg(tree);
}
else // the divisor is not the constant zero
{
regNumber divisorReg = divisorOp->GetRegNum();
// Generate the require runtime checks for GT_DIV or GT_UDIV
if (tree->gtOper == GT_DIV)
{
BasicBlock* sdivLabel = genCreateTempLabel();
// Two possible exceptions:
// (AnyVal / 0) => DivideByZeroException
// (MinInt / -1) => ArithmeticException
//
bool checkDividend = true;
// Do we have an immediate for the 'divisorOp'?
//
if (divisorOp->IsCnsIntOrI())
{
GenTreeIntConCommon* intConstTree = divisorOp->AsIntConCommon();
ssize_t intConstValue = intConstTree->IconValue();
assert(intConstValue != 0); // already checked above by IsIntegralConst(0)
if (intConstValue != -1)
{
checkDividend = false; // We statically know that the dividend is not -1
}
}
else // insert check for divison by zero
{
// Check if the divisor is zero throw a DivideByZeroException
emit->emitIns_R_I(INS_cmp, size, divisorReg, 0);
genJumpToThrowHlpBlk(EJ_eq, SCK_DIV_BY_ZERO);
}
if (checkDividend)
{
// Check if the divisor is not -1 branch to 'sdivLabel'
emit->emitIns_R_I(INS_cmp, size, divisorReg, -1);
inst_JMP(EJ_ne, sdivLabel);
// If control flow continues past here the 'divisorReg' is known to be -1
regNumber dividendReg = tree->gtGetOp1()->GetRegNum();
// At this point the divisor is known to be -1
//
// Issue the 'adds zr, dividendReg, dividendReg' instruction
// this will set both the Z and V flags only when dividendReg is MinInt
//
emit->emitIns_R_R_R(INS_adds, size, REG_ZR, dividendReg, dividendReg);
inst_JMP(EJ_ne, sdivLabel); // goto sdiv if the Z flag is clear
genJumpToThrowHlpBlk(EJ_vs, SCK_ARITH_EXCPN); // if the V flags is set throw
// ArithmeticException
genDefineTempLabel(sdivLabel);
}
genCodeForBinary(tree); // Generate the sdiv instruction
}
else // (tree->gtOper == GT_UDIV)
{
// Only one possible exception
// (AnyVal / 0) => DivideByZeroException
//
// Note that division by the constant 0 was already checked for above by the
// op2->IsIntegralConst(0) check
//
if (!divisorOp->IsCnsIntOrI())
{
// divisorOp is not a constant, so it could be zero
//
emit->emitIns_R_I(INS_cmp, size, divisorReg, 0);
genJumpToThrowHlpBlk(EJ_eq, SCK_DIV_BY_ZERO);
}
genCodeForBinary(tree);
}
}
}
}
// Generate code for CpObj nodes wich copy structs that have interleaved
// GC pointers.
// For this case we'll generate a sequence of loads/stores in the case of struct
// slots that don't contain GC pointers. The generated code will look like:
// ldr tempReg, [R13, #8]
// str tempReg, [R14, #8]
//
// In the case of a GC-Pointer we'll call the ByRef write barrier helper
// who happens to use the same registers as the previous call to maintain
// the same register requirements and register killsets:
// bl CORINFO_HELP_ASSIGN_BYREF
//
// So finally an example would look like this:
// ldr tempReg, [R13, #8]
// str tempReg, [R14, #8]
// bl CORINFO_HELP_ASSIGN_BYREF
// ldr tempReg, [R13, #8]
// str tempReg, [R14, #8]
// bl CORINFO_HELP_ASSIGN_BYREF
// ldr tempReg, [R13, #8]
// str tempReg, [R14, #8]
void CodeGen::genCodeForCpObj(GenTreeObj* cpObjNode)
{
GenTree* dstAddr = cpObjNode->Addr();
GenTree* source = cpObjNode->Data();
var_types srcAddrType = TYP_BYREF;
bool sourceIsLocal = false;
assert(source->isContained());
if (source->gtOper == GT_IND)
{
GenTree* srcAddr = source->gtGetOp1();
assert(!srcAddr->isContained());
srcAddrType = srcAddr->TypeGet();
}
else
{
noway_assert(source->IsLocal());
sourceIsLocal = true;
}
bool dstOnStack = dstAddr->gtSkipReloadOrCopy()->OperIsLocalAddr();
#ifdef DEBUG
assert(!dstAddr->isContained());
// This GenTree node has data about GC pointers, this means we're dealing
// with CpObj.
assert(cpObjNode->GetLayout()->HasGCPtr());
#endif // DEBUG
// Consume the operands and get them into the right registers.
// They may now contain gc pointers (depending on their type; gcMarkRegPtrVal will "do the right thing").
genConsumeBlockOp(cpObjNode, REG_WRITE_BARRIER_DST_BYREF, REG_WRITE_BARRIER_SRC_BYREF, REG_NA);
gcInfo.gcMarkRegPtrVal(REG_WRITE_BARRIER_SRC_BYREF, srcAddrType);
gcInfo.gcMarkRegPtrVal(REG_WRITE_BARRIER_DST_BYREF, dstAddr->TypeGet());
ClassLayout* layout = cpObjNode->GetLayout();
unsigned slots = layout->GetSlotCount();
// Temp register(s) used to perform the sequence of loads and stores.
regNumber tmpReg = cpObjNode->ExtractTempReg();
regNumber tmpReg2 = REG_NA;
assert(genIsValidIntReg(tmpReg));
assert(tmpReg != REG_WRITE_BARRIER_SRC_BYREF);
assert(tmpReg != REG_WRITE_BARRIER_DST_BYREF);
if (slots > 1)
{
tmpReg2 = cpObjNode->GetSingleTempReg();
assert(tmpReg2 != tmpReg);
assert(genIsValidIntReg(tmpReg2));
assert(tmpReg2 != REG_WRITE_BARRIER_DST_BYREF);
assert(tmpReg2 != REG_WRITE_BARRIER_SRC_BYREF);
}
if (cpObjNode->gtFlags & GTF_BLK_VOLATILE)
{
// issue a full memory barrier before a volatile CpObj operation
instGen_MemoryBarrier();
}
emitter* emit = GetEmitter();
// If we can prove it's on the stack we don't need to use the write barrier.
if (dstOnStack)
{
unsigned i = 0;
// Check if two or more remaining slots and use a ldp/stp sequence
while (i < slots - 1)
{
emitAttr attr0 = emitTypeSize(layout->GetGCPtrType(i + 0));
emitAttr attr1 = emitTypeSize(layout->GetGCPtrType(i + 1));
emit->emitIns_R_R_R_I(INS_ldp, attr0, tmpReg, tmpReg2, REG_WRITE_BARRIER_SRC_BYREF, 2 * TARGET_POINTER_SIZE,
INS_OPTS_POST_INDEX, attr1);
emit->emitIns_R_R_R_I(INS_stp, attr0, tmpReg, tmpReg2, REG_WRITE_BARRIER_DST_BYREF, 2 * TARGET_POINTER_SIZE,
INS_OPTS_POST_INDEX, attr1);
i += 2;
}
// Use a ldr/str sequence for the last remainder
if (i < slots)
{
emitAttr attr0 = emitTypeSize(layout->GetGCPtrType(i + 0));
emit->emitIns_R_R_I(INS_ldr, attr0, tmpReg, REG_WRITE_BARRIER_SRC_BYREF, TARGET_POINTER_SIZE,
INS_OPTS_POST_INDEX);
emit->emitIns_R_R_I(INS_str, attr0, tmpReg, REG_WRITE_BARRIER_DST_BYREF, TARGET_POINTER_SIZE,
INS_OPTS_POST_INDEX);
}
}
else
{
unsigned gcPtrCount = cpObjNode->GetLayout()->GetGCPtrCount();
unsigned i = 0;
while (i < slots)
{
if (!layout->IsGCPtr(i))
{
// Check if the next slot's type is also TYP_GC_NONE and use ldp/stp
if ((i + 1 < slots) && !layout->IsGCPtr(i + 1))
{
emit->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, tmpReg, tmpReg2, REG_WRITE_BARRIER_SRC_BYREF,
2 * TARGET_POINTER_SIZE, INS_OPTS_POST_INDEX);
emit->emitIns_R_R_R_I(INS_stp, EA_8BYTE, tmpReg, tmpReg2, REG_WRITE_BARRIER_DST_BYREF,
2 * TARGET_POINTER_SIZE, INS_OPTS_POST_INDEX);
++i; // extra increment of i, since we are copying two items
}
else
{
emit->emitIns_R_R_I(INS_ldr, EA_8BYTE, tmpReg, REG_WRITE_BARRIER_SRC_BYREF, TARGET_POINTER_SIZE,
INS_OPTS_POST_INDEX);
emit->emitIns_R_R_I(INS_str, EA_8BYTE, tmpReg, REG_WRITE_BARRIER_DST_BYREF, TARGET_POINTER_SIZE,
INS_OPTS_POST_INDEX);
}
}
else
{
// In the case of a GC-Pointer we'll call the ByRef write barrier helper
genEmitHelperCall(CORINFO_HELP_ASSIGN_BYREF, 0, EA_PTRSIZE);
gcPtrCount--;
}
++i;
}
assert(gcPtrCount == 0);
}
if (cpObjNode->gtFlags & GTF_BLK_VOLATILE)
{
// issue a load barrier after a volatile CpObj operation
instGen_MemoryBarrier(BARRIER_LOAD_ONLY);
}
// Clear the gcInfo for REG_WRITE_BARRIER_SRC_BYREF and REG_WRITE_BARRIER_DST_BYREF.
// While we normally update GC info prior to the last instruction that uses them,
// these actually live into the helper call.
gcInfo.gcMarkRegSetNpt(RBM_WRITE_BARRIER_SRC_BYREF | RBM_WRITE_BARRIER_DST_BYREF);
}
// generate code do a switch statement based on a table of ip-relative offsets
void CodeGen::genTableBasedSwitch(GenTree* treeNode)
{
genConsumeOperands(treeNode->AsOp());
regNumber idxReg = treeNode->AsOp()->gtOp1->GetRegNum();
regNumber baseReg = treeNode->AsOp()->gtOp2->GetRegNum();
regNumber tmpReg = treeNode->GetSingleTempReg();
// load the ip-relative offset (which is relative to start of fgFirstBB)
GetEmitter()->emitIns_R_R_R(INS_ldr, EA_4BYTE, baseReg, baseReg, idxReg, INS_OPTS_LSL);
// add it to the absolute address of fgFirstBB
GetEmitter()->emitIns_R_L(INS_adr, EA_PTRSIZE, compiler->fgFirstBB, tmpReg);
GetEmitter()->emitIns_R_R_R(INS_add, EA_PTRSIZE, baseReg, baseReg, tmpReg);
// br baseReg
GetEmitter()->emitIns_R(INS_br, emitActualTypeSize(TYP_I_IMPL), baseReg);
}
// emits the table and an instruction to get the address of the first element
void CodeGen::genJumpTable(GenTree* treeNode)
{
noway_assert(compiler->compCurBB->bbJumpKind == BBJ_SWITCH);
assert(treeNode->OperGet() == GT_JMPTABLE);
unsigned jumpCount = compiler->compCurBB->bbJumpSwt->bbsCount;
BasicBlock** jumpTable = compiler->compCurBB->bbJumpSwt->bbsDstTab;
unsigned jmpTabOffs;
unsigned jmpTabBase;
jmpTabBase = GetEmitter()->emitBBTableDataGenBeg(jumpCount, true);
jmpTabOffs = 0;
JITDUMP("\n J_M%03u_DS%02u LABEL DWORD\n", compiler->compMethodID, jmpTabBase);
for (unsigned i = 0; i < jumpCount; i++)
{
BasicBlock* target = *jumpTable++;
noway_assert(target->bbFlags & BBF_HAS_LABEL);
JITDUMP(" DD L_M%03u_" FMT_BB "\n", compiler->compMethodID, target->bbNum);
GetEmitter()->emitDataGenData(i, target);
};
GetEmitter()->emitDataGenEnd();
// Access to inline data is 'abstracted' by a special type of static member
// (produced by eeFindJitDataOffs) which the emitter recognizes as being a reference
// to constant data, not a real static field.
GetEmitter()->emitIns_R_C(INS_adr, emitActualTypeSize(TYP_I_IMPL), treeNode->GetRegNum(), REG_NA,
compiler->eeFindJitDataOffs(jmpTabBase), 0);
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genLockedInstructions: Generate code for a GT_XADD, GT_XAND, GT_XORR or GT_XCHG node.
//
// Arguments:
// treeNode - the GT_XADD/XAND/XORR/XCHG node
//
void CodeGen::genLockedInstructions(GenTreeOp* treeNode)
{
GenTree* data = treeNode->AsOp()->gtOp2;
GenTree* addr = treeNode->AsOp()->gtOp1;
regNumber targetReg = treeNode->GetRegNum();
regNumber dataReg = data->GetRegNum();
regNumber addrReg = addr->GetRegNum();
genConsumeAddress(addr);
genConsumeRegs(data);
emitAttr dataSize = emitActualTypeSize(data);
if (compiler->compOpportunisticallyDependsOn(InstructionSet_Atomics))
{
assert(!data->isContainedIntOrIImmed());
switch (treeNode->gtOper)
{
case GT_XORR:
GetEmitter()->emitIns_R_R_R(INS_ldsetal, dataSize, dataReg, (targetReg == REG_NA) ? REG_ZR : targetReg,
addrReg);
break;
case GT_XAND:
{
// Grab a temp reg to perform `MVN` for dataReg first.
regNumber tempReg = treeNode->GetSingleTempReg();
GetEmitter()->emitIns_R_R(INS_mvn, dataSize, tempReg, dataReg);
GetEmitter()->emitIns_R_R_R(INS_ldclral, dataSize, tempReg, (targetReg == REG_NA) ? REG_ZR : targetReg,
addrReg);
break;
}
case GT_XCHG:
GetEmitter()->emitIns_R_R_R(INS_swpal, dataSize, dataReg, targetReg, addrReg);
break;
case GT_XADD:
GetEmitter()->emitIns_R_R_R(INS_ldaddal, dataSize, dataReg, (targetReg == REG_NA) ? REG_ZR : targetReg,
addrReg);
break;
default:
assert(!"Unexpected treeNode->gtOper");
}
}
else
{
// These are imported normally if Atomics aren't supported.
assert(!treeNode->OperIs(GT_XORR, GT_XAND));
regNumber exResultReg = treeNode->ExtractTempReg(RBM_ALLINT);
regNumber storeDataReg = (treeNode->OperGet() == GT_XCHG) ? dataReg : treeNode->ExtractTempReg(RBM_ALLINT);
regNumber loadReg = (targetReg != REG_NA) ? targetReg : storeDataReg;
// Check allocator assumptions
//
// The register allocator should have extended the lifetimes of all input and internal registers so that
// none interfere with the target.
noway_assert(addrReg != targetReg);
noway_assert(addrReg != loadReg);
noway_assert(dataReg != loadReg);
noway_assert(addrReg != storeDataReg);
noway_assert((treeNode->OperGet() == GT_XCHG) || (addrReg != dataReg));
assert(addr->isUsedFromReg());
noway_assert(exResultReg != REG_NA);
noway_assert(exResultReg != targetReg);
noway_assert((targetReg != REG_NA) || (treeNode->OperGet() != GT_XCHG));
// Store exclusive unpredictable cases must be avoided
noway_assert(exResultReg != storeDataReg);
noway_assert(exResultReg != addrReg);
// NOTE: `genConsumeAddress` marks the consumed register as not a GC pointer, as it assumes that the input
// registers
// die at the first instruction generated by the node. This is not the case for these atomics as the input
// registers are multiply-used. As such, we need to mark the addr register as containing a GC pointer until
// we are finished generating the code for this node.
gcInfo.gcMarkRegPtrVal(addrReg, addr->TypeGet());
// Emit code like this:
// retry:
// ldxr loadReg, [addrReg]
// add storeDataReg, loadReg, dataReg # Only for GT_XADD
// # GT_XCHG storeDataReg === dataReg
// stxr exResult, storeDataReg, [addrReg]
// cbnz exResult, retry
// dmb ish
BasicBlock* labelRetry = genCreateTempLabel();
genDefineTempLabel(labelRetry);
// The following instruction includes a acquire half barrier
GetEmitter()->emitIns_R_R(INS_ldaxr, dataSize, loadReg, addrReg);
switch (treeNode->OperGet())
{
case GT_XADD:
if (data->isContainedIntOrIImmed())
{
// Even though INS_add is specified here, the encoder will choose either
// an INS_add or an INS_sub and encode the immediate as a positive value
genInstrWithConstant(INS_add, dataSize, storeDataReg, loadReg, data->AsIntConCommon()->IconValue(),
REG_NA);
}
else
{
GetEmitter()->emitIns_R_R_R(INS_add, dataSize, storeDataReg, loadReg, dataReg);
}
break;
case GT_XCHG:
assert(!data->isContained());
storeDataReg = dataReg;
break;
default:
unreached();
}
// The following instruction includes a release half barrier
GetEmitter()->emitIns_R_R_R(INS_stlxr, dataSize, exResultReg, storeDataReg, addrReg);
GetEmitter()->emitIns_J_R(INS_cbnz, EA_4BYTE, labelRetry, exResultReg);
instGen_MemoryBarrier();
gcInfo.gcMarkRegSetNpt(addr->gtGetRegMask());
}
if (treeNode->GetRegNum() != REG_NA)
{
genProduceReg(treeNode);
}
}
//------------------------------------------------------------------------
// genCodeForCmpXchg: Produce code for a GT_CMPXCHG node.
//
// Arguments:
// tree - the GT_CMPXCHG node
//
void CodeGen::genCodeForCmpXchg(GenTreeCmpXchg* treeNode)
{
assert(treeNode->OperIs(GT_CMPXCHG));
GenTree* addr = treeNode->gtOpLocation; // arg1
GenTree* data = treeNode->gtOpValue; // arg2
GenTree* comparand = treeNode->gtOpComparand; // arg3
regNumber targetReg = treeNode->GetRegNum();
regNumber dataReg = data->GetRegNum();
regNumber addrReg = addr->GetRegNum();
regNumber comparandReg = comparand->GetRegNum();
genConsumeAddress(addr);
genConsumeRegs(data);
genConsumeRegs(comparand);
if (compiler->compOpportunisticallyDependsOn(InstructionSet_Atomics))
{
emitAttr dataSize = emitActualTypeSize(data);
// casal use the comparand as the target reg
GetEmitter()->emitIns_Mov(INS_mov, dataSize, targetReg, comparandReg, /* canSkip */ true);
// Catch case we destroyed data or address before use
noway_assert((addrReg != targetReg) || (targetReg == comparandReg));
noway_assert((dataReg != targetReg) || (targetReg == comparandReg));
GetEmitter()->emitIns_R_R_R(INS_casal, dataSize, targetReg, dataReg, addrReg);
}
else
{
regNumber exResultReg = treeNode->ExtractTempReg(RBM_ALLINT);
// Check allocator assumptions
//
// The register allocator should have extended the lifetimes of all input and internal registers so that
// none interfere with the target.
noway_assert(addrReg != targetReg);
noway_assert(dataReg != targetReg);
noway_assert(comparandReg != targetReg);
noway_assert(addrReg != dataReg);
noway_assert(targetReg != REG_NA);
noway_assert(exResultReg != REG_NA);
noway_assert(exResultReg != targetReg);
assert(addr->isUsedFromReg());
assert(data->isUsedFromReg());
assert(!comparand->isUsedFromMemory());
// Store exclusive unpredictable cases must be avoided
noway_assert(exResultReg != dataReg);
noway_assert(exResultReg != addrReg);
// NOTE: `genConsumeAddress` marks the consumed register as not a GC pointer, as it assumes that the input
// registers
// die at the first instruction generated by the node. This is not the case for these atomics as the input
// registers are multiply-used. As such, we need to mark the addr register as containing a GC pointer until
// we are finished generating the code for this node.
gcInfo.gcMarkRegPtrVal(addrReg, addr->TypeGet());
// TODO-ARM64-CQ Use ARMv8.1 atomics if available
// https://github.com/dotnet/runtime/issues/8225
// Emit code like this:
// retry:
// ldxr targetReg, [addrReg]
// cmp targetReg, comparandReg
// bne compareFail
// stxr exResult, dataReg, [addrReg]
// cbnz exResult, retry
// compareFail:
// dmb ish
BasicBlock* labelRetry = genCreateTempLabel();
BasicBlock* labelCompareFail = genCreateTempLabel();
genDefineTempLabel(labelRetry);
// The following instruction includes a acquire half barrier
GetEmitter()->emitIns_R_R(INS_ldaxr, emitTypeSize(treeNode), targetReg, addrReg);
if (comparand->isContainedIntOrIImmed())
{
if (comparand->IsIntegralConst(0))
{
GetEmitter()->emitIns_J_R(INS_cbnz, emitActualTypeSize(treeNode), labelCompareFail, targetReg);
}
else
{
GetEmitter()->emitIns_R_I(INS_cmp, emitActualTypeSize(treeNode), targetReg,
comparand->AsIntConCommon()->IconValue());
GetEmitter()->emitIns_J(INS_bne, labelCompareFail);
}
}
else
{
GetEmitter()->emitIns_R_R(INS_cmp, emitActualTypeSize(treeNode), targetReg, comparandReg);
GetEmitter()->emitIns_J(INS_bne, labelCompareFail);
}
// The following instruction includes a release half barrier
GetEmitter()->emitIns_R_R_R(INS_stlxr, emitTypeSize(treeNode), exResultReg, dataReg, addrReg);
GetEmitter()->emitIns_J_R(INS_cbnz, EA_4BYTE, labelRetry, exResultReg);
genDefineTempLabel(labelCompareFail);
instGen_MemoryBarrier();
gcInfo.gcMarkRegSetNpt(addr->gtGetRegMask());
}
genProduceReg(treeNode);
}
instruction CodeGen::genGetInsForOper(genTreeOps oper, var_types type)
{
instruction ins = INS_BREAKPOINT;
if (varTypeIsFloating(type))
{
switch (oper)
{
case GT_ADD:
ins = INS_fadd;
break;
case GT_SUB:
ins = INS_fsub;
break;
case GT_MUL:
ins = INS_fmul;
break;
case GT_DIV:
ins = INS_fdiv;
break;
case GT_NEG:
ins = INS_fneg;
break;
default:
NYI("Unhandled oper in genGetInsForOper() - float");
unreached();
break;
}
}
else
{
switch (oper)
{
case GT_ADD:
ins = INS_add;
break;
case GT_AND:
ins = INS_and;
break;
case GT_AND_NOT:
ins = INS_bic;
break;
case GT_DIV:
ins = INS_sdiv;
break;
case GT_UDIV:
ins = INS_udiv;
break;
case GT_MUL:
ins = INS_mul;
break;
case GT_LSH:
ins = INS_lsl;
break;
case GT_NEG:
ins = INS_neg;
break;
case GT_NOT:
ins = INS_mvn;
break;
case GT_OR:
ins = INS_orr;
break;
case GT_ROR:
ins = INS_ror;
break;
case GT_RSH:
ins = INS_asr;
break;
case GT_RSZ:
ins = INS_lsr;
break;
case GT_SUB:
ins = INS_sub;
break;
case GT_XOR:
ins = INS_eor;
break;
default:
NYI("Unhandled oper in genGetInsForOper() - integer");
unreached();
break;
}
}
return ins;
}
//------------------------------------------------------------------------
// genCodeForReturnTrap: Produce code for a GT_RETURNTRAP node.
//
// Arguments:
// tree - the GT_RETURNTRAP node
//
void CodeGen::genCodeForReturnTrap(GenTreeOp* tree)
{
assert(tree->OperGet() == GT_RETURNTRAP);
// this is nothing but a conditional call to CORINFO_HELP_STOP_FOR_GC
// based on the contents of 'data'
GenTree* data = tree->gtOp1;
genConsumeRegs(data);
GetEmitter()->emitIns_R_I(INS_cmp, EA_4BYTE, data->GetRegNum(), 0);
BasicBlock* skipLabel = genCreateTempLabel();
inst_JMP(EJ_eq, skipLabel);
// emit the call to the EE-helper that stops for GC (or other reasons)
genEmitHelperCall(CORINFO_HELP_STOP_FOR_GC, 0, EA_UNKNOWN);
genDefineTempLabel(skipLabel);
}
//------------------------------------------------------------------------
// genCodeForStoreInd: Produce code for a GT_STOREIND node.
//
// Arguments:
// tree - the GT_STOREIND node
//
void CodeGen::genCodeForStoreInd(GenTreeStoreInd* tree)
{
#ifdef FEATURE_SIMD
// Storing Vector3 of size 12 bytes through indirection
if (tree->TypeGet() == TYP_SIMD12)
{
genStoreIndTypeSIMD12(tree);
return;
}
#endif // FEATURE_SIMD
GenTree* data = tree->Data();
GenTree* addr = tree->Addr();
GCInfo::WriteBarrierForm writeBarrierForm = gcInfo.gcIsWriteBarrierCandidate(tree, data);
if (writeBarrierForm != GCInfo::WBF_NoBarrier)
{
// data and addr must be in registers.
// Consume both registers so that any copies of interfering
// registers are taken care of.
genConsumeOperands(tree);
// At this point, we should not have any interference.
// That is, 'data' must not be in REG_WRITE_BARRIER_DST_BYREF,
// as that is where 'addr' must go.
noway_assert(data->GetRegNum() != REG_WRITE_BARRIER_DST_BYREF);
// 'addr' goes into x14 (REG_WRITE_BARRIER_DST)
genCopyRegIfNeeded(addr, REG_WRITE_BARRIER_DST);
// 'data' goes into x15 (REG_WRITE_BARRIER_SRC)
genCopyRegIfNeeded(data, REG_WRITE_BARRIER_SRC);
genGCWriteBarrier(tree, writeBarrierForm);
}
else // A normal store, not a WriteBarrier store
{
// We must consume the operands in the proper execution order,
// so that liveness is updated appropriately.
genConsumeAddress(addr);
if (!data->isContained())
{
genConsumeRegs(data);
}
regNumber dataReg;
if (data->isContainedIntOrIImmed())
{
assert(data->IsIntegralConst(0));
dataReg = REG_ZR;
}
else // data is not contained, so evaluate it into a register
{
assert(!data->isContained());
dataReg = data->GetRegNum();
}
var_types type = tree->TypeGet();
instruction ins = ins_StoreFromSrc(dataReg, type);
if ((tree->gtFlags & GTF_IND_VOLATILE) != 0)
{
bool addrIsInReg = addr->isUsedFromReg();
bool addrIsAligned = ((tree->gtFlags & GTF_IND_UNALIGNED) == 0);
if ((ins == INS_strb) && addrIsInReg)
{
ins = INS_stlrb;
}
else if ((ins == INS_strh) && addrIsInReg && addrIsAligned)
{
ins = INS_stlrh;
}
else if ((ins == INS_str) && genIsValidIntReg(dataReg) && addrIsInReg && addrIsAligned)
{
ins = INS_stlr;
}
else
{
// issue a full memory barrier before a volatile StInd
// Note: We cannot issue store barrier ishst because it is a weaker barrier.
// The loads can get rearranged around the barrier causing to read wrong
// value.
instGen_MemoryBarrier();
}
}
GetEmitter()->emitInsLoadStoreOp(ins, emitActualTypeSize(type), dataReg, tree);
// If store was to a variable, update variable liveness after instruction was emitted.
genUpdateLife(tree);
}
}
//------------------------------------------------------------------------
// genCodeForSwap: Produce code for a GT_SWAP node.
//
// Arguments:
// tree - the GT_SWAP node
//
void CodeGen::genCodeForSwap(GenTreeOp* tree)
{
assert(tree->OperIs(GT_SWAP));
// Swap is only supported for lclVar operands that are enregistered
// We do not consume or produce any registers. Both operands remain enregistered.
// However, the gc-ness may change.
assert(genIsRegCandidateLocal(tree->gtOp1) && genIsRegCandidateLocal(tree->gtOp2));
GenTreeLclVarCommon* lcl1 = tree->gtOp1->AsLclVarCommon();
LclVarDsc* varDsc1 = compiler->lvaGetDesc(lcl1);
var_types type1 = varDsc1->TypeGet();
GenTreeLclVarCommon* lcl2 = tree->gtOp2->AsLclVarCommon();
LclVarDsc* varDsc2 = compiler->lvaGetDesc(lcl2);
var_types type2 = varDsc2->TypeGet();
// We must have both int or both fp regs
assert(!varTypeIsFloating(type1) || varTypeIsFloating(type2));
// FP swap is not yet implemented (and should have NYI'd in LSRA)
assert(!varTypeIsFloating(type1));
regNumber oldOp1Reg = lcl1->GetRegNum();
regMaskTP oldOp1RegMask = genRegMask(oldOp1Reg);
regNumber oldOp2Reg = lcl2->GetRegNum();
regMaskTP oldOp2RegMask = genRegMask(oldOp2Reg);
// We don't call genUpdateVarReg because we don't have a tree node with the new register.
varDsc1->SetRegNum(oldOp2Reg);
varDsc2->SetRegNum(oldOp1Reg);
// Do the xchg
emitAttr size = EA_PTRSIZE;
if (varTypeGCtype(type1) != varTypeGCtype(type2))
{
// If the type specified to the emitter is a GC type, it will swap the GC-ness of the registers.
// Otherwise it will leave them alone, which is correct if they have the same GC-ness.
size = EA_GCREF;
}
NYI("register swap");
// inst_RV_RV(INS_xchg, oldOp1Reg, oldOp2Reg, TYP_I_IMPL, size);
// Update the gcInfo.
// Manually remove these regs for the gc sets (mostly to avoid confusing duplicative dump output)
gcInfo.gcRegByrefSetCur &= ~(oldOp1RegMask | oldOp2RegMask);
gcInfo.gcRegGCrefSetCur &= ~(oldOp1RegMask | oldOp2RegMask);
// gcMarkRegPtrVal will do the appropriate thing for non-gc types.
// It will also dump the updates.
gcInfo.gcMarkRegPtrVal(oldOp2Reg, type1);
gcInfo.gcMarkRegPtrVal(oldOp1Reg, type2);
}
//------------------------------------------------------------------------
// genIntToFloatCast: Generate code to cast an int/long to float/double
//
// Arguments:
// treeNode - The GT_CAST node
//
// Return Value:
// None.
//
// Assumptions:
// Cast is a non-overflow conversion.
// The treeNode must have an assigned register.
// SrcType= int32/uint32/int64/uint64 and DstType=float/double.
//
void CodeGen::genIntToFloatCast(GenTree* treeNode)
{
// int type --> float/double conversions are always non-overflow ones
assert(treeNode->OperGet() == GT_CAST);
assert(!treeNode->gtOverflow());
regNumber targetReg = treeNode->GetRegNum();
assert(genIsValidFloatReg(targetReg));
GenTree* op1 = treeNode->AsOp()->gtOp1;
assert(!op1->isContained()); // Cannot be contained
assert(genIsValidIntReg(op1->GetRegNum())); // Must be a valid int reg.
var_types dstType = treeNode->CastToType();
var_types srcType = genActualType(op1->TypeGet());
assert(!varTypeIsFloating(srcType) && varTypeIsFloating(dstType));
// force the srcType to unsigned if GT_UNSIGNED flag is set
if (treeNode->gtFlags & GTF_UNSIGNED)
{
srcType = varTypeToUnsigned(srcType);
}
// We should never see a srcType whose size is neither EA_4BYTE or EA_8BYTE
emitAttr srcSize = EA_ATTR(genTypeSize(srcType));
noway_assert((srcSize == EA_4BYTE) || (srcSize == EA_8BYTE));
instruction ins = varTypeIsUnsigned(srcType) ? INS_ucvtf : INS_scvtf;
insOpts cvtOption = INS_OPTS_NONE; // invalid value
if (dstType == TYP_DOUBLE)
{
if (srcSize == EA_4BYTE)
{
cvtOption = INS_OPTS_4BYTE_TO_D;
}
else
{
assert(srcSize == EA_8BYTE);
cvtOption = INS_OPTS_8BYTE_TO_D;
}
}
else
{
assert(dstType == TYP_FLOAT);
if (srcSize == EA_4BYTE)
{
cvtOption = INS_OPTS_4BYTE_TO_S;
}
else
{
assert(srcSize == EA_8BYTE);
cvtOption = INS_OPTS_8BYTE_TO_S;
}
}
genConsumeOperands(treeNode->AsOp());
GetEmitter()->emitIns_R_R(ins, emitActualTypeSize(dstType), treeNode->GetRegNum(), op1->GetRegNum(), cvtOption);
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genFloatToIntCast: Generate code to cast float/double to int/long
//
// Arguments:
// treeNode - The GT_CAST node
//
// Return Value:
// None.
//
// Assumptions:
// Cast is a non-overflow conversion.
// The treeNode must have an assigned register.
// SrcType=float/double and DstType= int32/uint32/int64/uint64
//
void CodeGen::genFloatToIntCast(GenTree* treeNode)
{
// we don't expect to see overflow detecting float/double --> int type conversions here
// as they should have been converted into helper calls by front-end.
assert(treeNode->OperGet() == GT_CAST);
assert(!treeNode->gtOverflow());
regNumber targetReg = treeNode->GetRegNum();
assert(genIsValidIntReg(targetReg)); // Must be a valid int reg.
GenTree* op1 = treeNode->AsOp()->gtOp1;
assert(!op1->isContained()); // Cannot be contained
assert(genIsValidFloatReg(op1->GetRegNum())); // Must be a valid float reg.
var_types dstType = treeNode->CastToType();
var_types srcType = op1->TypeGet();
assert(varTypeIsFloating(srcType) && !varTypeIsFloating(dstType));
// We should never see a dstType whose size is neither EA_4BYTE or EA_8BYTE
// For conversions to small types (byte/sbyte/int16/uint16) from float/double,
// we expect the front-end or lowering phase to have generated two levels of cast.
//
emitAttr dstSize = EA_ATTR(genTypeSize(dstType));
noway_assert((dstSize == EA_4BYTE) || (dstSize == EA_8BYTE));
instruction ins = INS_fcvtzs; // default to sign converts
insOpts cvtOption = INS_OPTS_NONE; // invalid value
if (varTypeIsUnsigned(dstType))
{
ins = INS_fcvtzu; // use unsigned converts
}
if (srcType == TYP_DOUBLE)
{
if (dstSize == EA_4BYTE)
{
cvtOption = INS_OPTS_D_TO_4BYTE;
}
else
{
assert(dstSize == EA_8BYTE);
cvtOption = INS_OPTS_D_TO_8BYTE;
}
}
else
{
assert(srcType == TYP_FLOAT);
if (dstSize == EA_4BYTE)
{
cvtOption = INS_OPTS_S_TO_4BYTE;
}
else
{
assert(dstSize == EA_8BYTE);
cvtOption = INS_OPTS_S_TO_8BYTE;
}
}
genConsumeOperands(treeNode->AsOp());
GetEmitter()->emitIns_R_R(ins, dstSize, treeNode->GetRegNum(), op1->GetRegNum(), cvtOption);
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genCkfinite: Generate code for ckfinite opcode.
//
// Arguments:
// treeNode - The GT_CKFINITE node
//
// Return Value:
// None.
//
// Assumptions:
// GT_CKFINITE node has reserved an internal register.
//
void CodeGen::genCkfinite(GenTree* treeNode)
{
assert(treeNode->OperGet() == GT_CKFINITE);
GenTree* op1 = treeNode->AsOp()->gtOp1;
var_types targetType = treeNode->TypeGet();
int expMask = (targetType == TYP_FLOAT) ? 0x7F8 : 0x7FF; // Bit mask to extract exponent.
int shiftAmount = targetType == TYP_FLOAT ? 20 : 52;
emitter* emit = GetEmitter();
// Extract exponent into a register.
regNumber intReg = treeNode->GetSingleTempReg();
regNumber fpReg = genConsumeReg(op1);
inst_Mov(targetType, intReg, fpReg, /* canSkip */ false, emitActualTypeSize(treeNode));
emit->emitIns_R_R_I(INS_lsr, emitActualTypeSize(targetType), intReg, intReg, shiftAmount);
// Mask of exponent with all 1's and check if the exponent is all 1's
emit->emitIns_R_R_I(INS_and, EA_4BYTE, intReg, intReg, expMask);
emit->emitIns_R_I(INS_cmp, EA_4BYTE, intReg, expMask);
// If exponent is all 1's, throw ArithmeticException
genJumpToThrowHlpBlk(EJ_eq, SCK_ARITH_EXCPN);
// if it is a finite value copy it to targetReg
inst_Mov(targetType, treeNode->GetRegNum(), fpReg, /* canSkip */ true);
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genCodeForCompare: Produce code for a GT_EQ/GT_NE/GT_LT/GT_LE/GT_GE/GT_GT/GT_TEST_EQ/GT_TEST_NE node.
//
// Arguments:
// tree - the node
//
void CodeGen::genCodeForCompare(GenTreeOp* tree)
{
regNumber targetReg = tree->GetRegNum();
emitter* emit = GetEmitter();
GenTree* op1 = tree->gtOp1;
GenTree* op2 = tree->gtOp2;
var_types op1Type = genActualType(op1->TypeGet());
var_types op2Type = genActualType(op2->TypeGet());
assert(!op1->isUsedFromMemory());
genConsumeOperands(tree);
emitAttr cmpSize = EA_ATTR(genTypeSize(op1Type));
assert(genTypeSize(op1Type) == genTypeSize(op2Type));
if (varTypeIsFloating(op1Type))
{
assert(varTypeIsFloating(op2Type));
assert(!op1->isContained());
assert(op1Type == op2Type);
if (op2->IsFPZero())
{
assert(op2->isContained());
emit->emitIns_R_F(INS_fcmp, cmpSize, op1->GetRegNum(), 0.0);
}
else
{
assert(!op2->isContained());
emit->emitIns_R_R(INS_fcmp, cmpSize, op1->GetRegNum(), op2->GetRegNum());
}
}
else
{
assert(!varTypeIsFloating(op2Type));
// We don't support swapping op1 and op2 to generate cmp reg, imm
assert(!op1->isContainedIntOrIImmed());
instruction ins = tree->OperIs(GT_TEST_EQ, GT_TEST_NE) ? INS_tst : INS_cmp;
if (op2->isContainedIntOrIImmed())
{
GenTreeIntConCommon* intConst = op2->AsIntConCommon();
emit->emitIns_R_I(ins, cmpSize, op1->GetRegNum(), intConst->IconValue());
}
else
{
emit->emitIns_R_R(ins, cmpSize, op1->GetRegNum(), op2->GetRegNum());
}
}
// Are we evaluating this into a register?
if (targetReg != REG_NA)
{
inst_SETCC(GenCondition::FromRelop(tree), tree->TypeGet(), targetReg);
genProduceReg(tree);
}
}
//------------------------------------------------------------------------
// genCodeForJumpCompare: Generates code for jmpCompare statement.
//
// A GT_JCMP node is created when a comparison and conditional branch
// can be executed in a single instruction.
//
// Arm64 has a few instructions with this behavior.
// - cbz/cbnz -- Compare and branch register zero/not zero
// - tbz/tbnz -- Test and branch register bit zero/not zero
//
// The cbz/cbnz supports the normal +/- 1MB branch range for conditional branches
// The tbz/tbnz supports a smaller +/- 32KB branch range
//
// A GT_JCMP cbz/cbnz node is created when there is a GT_EQ or GT_NE
// integer/unsigned comparison against #0 which is used by a GT_JTRUE
// condition jump node.
//
// A GT_JCMP tbz/tbnz node is created when there is a GT_TEST_EQ or GT_TEST_NE
// integer/unsigned comparison against against a mask with a single bit set
// which is used by a GT_JTRUE condition jump node.
//
// This node is repsonsible for consuming the register, and emitting the
// appropriate fused compare/test and branch instruction
//
// Two flags guide code generation
// GTF_JCMP_TST -- Set if this is a tbz/tbnz rather than cbz/cbnz
// GTF_JCMP_EQ -- Set if this is cbz/tbz rather than cbnz/tbnz
//
// Arguments:
// tree - The GT_JCMP tree node.
//
// Return Value:
// None
//
void CodeGen::genCodeForJumpCompare(GenTreeOp* tree)
{
assert(compiler->compCurBB->bbJumpKind == BBJ_COND);
GenTree* op1 = tree->gtGetOp1();
GenTree* op2 = tree->gtGetOp2();
assert(tree->OperIs(GT_JCMP));
assert(!varTypeIsFloating(tree));
assert(!op1->isUsedFromMemory());
assert(!op2->isUsedFromMemory());
assert(op2->IsCnsIntOrI());
assert(op2->isContained());
genConsumeOperands(tree);
regNumber reg = op1->GetRegNum();
emitAttr attr = emitActualTypeSize(op1->TypeGet());
if (tree->gtFlags & GTF_JCMP_TST)
{
ssize_t compareImm = op2->AsIntCon()->IconValue();
assert(isPow2(compareImm));
instruction ins = (tree->gtFlags & GTF_JCMP_EQ) ? INS_tbz : INS_tbnz;
int imm = genLog2((size_t)compareImm);
GetEmitter()->emitIns_J_R_I(ins, attr, compiler->compCurBB->bbJumpDest, reg, imm);
}
else
{
assert(op2->IsIntegralConst(0));
instruction ins = (tree->gtFlags & GTF_JCMP_EQ) ? INS_cbz : INS_cbnz;
GetEmitter()->emitIns_J_R(ins, attr, compiler->compCurBB->bbJumpDest, reg);
}
}
//---------------------------------------------------------------------
// genSPtoFPdelta - return offset from the stack pointer (Initial-SP) to the frame pointer. The frame pointer
// will point to the saved frame pointer slot (i.e., there will be frame pointer chaining).
//
int CodeGenInterface::genSPtoFPdelta() const
{
assert(isFramePointerUsed());
int delta = -1; // initialization to illegal value
if (IsSaveFpLrWithAllCalleeSavedRegisters())
{
// The saved frame pointer is at the top of the frame, just beneath the saved varargs register space and the
// saved LR.
delta = genTotalFrameSize() - (compiler->info.compIsVarArgs ? MAX_REG_ARG * REGSIZE_BYTES : 0) -
2 /* FP, LR */ * REGSIZE_BYTES;
}
else
{
// We place the saved frame pointer immediately above the outgoing argument space.
delta = (int)compiler->lvaOutgoingArgSpaceSize;
}
assert(delta >= 0);
return delta;
}
//---------------------------------------------------------------------
// genTotalFrameSize - return the total size of the stack frame, including local size,
// callee-saved register size, etc.
//
// Return value:
// Total frame size
//
int CodeGenInterface::genTotalFrameSize() const
{
// For varargs functions, we home all the incoming register arguments. They are not
// included in the compCalleeRegsPushed count. This is like prespill on ARM32, but
// since we don't use "push" instructions to save them, we don't have to do the
// save of these varargs register arguments as the first thing in the prolog.
assert(!IsUninitialized(compiler->compCalleeRegsPushed));
int totalFrameSize = (compiler->info.compIsVarArgs ? MAX_REG_ARG * REGSIZE_BYTES : 0) +
compiler->compCalleeRegsPushed * REGSIZE_BYTES + compiler->compLclFrameSize;
assert(totalFrameSize >= 0);
return totalFrameSize;
}
//---------------------------------------------------------------------
// genCallerSPtoFPdelta - return the offset from Caller-SP to the frame pointer.
// This number is going to be negative, since the Caller-SP is at a higher
// address than the frame pointer.
//
// There must be a frame pointer to call this function!
int CodeGenInterface::genCallerSPtoFPdelta() const
{
assert(isFramePointerUsed());
int callerSPtoFPdelta;
callerSPtoFPdelta = genCallerSPtoInitialSPdelta() + genSPtoFPdelta();
assert(callerSPtoFPdelta <= 0);
return callerSPtoFPdelta;
}
//---------------------------------------------------------------------
// genCallerSPtoInitialSPdelta - return the offset from Caller-SP to Initial SP.
//
// This number will be negative.
int CodeGenInterface::genCallerSPtoInitialSPdelta() const
{
int callerSPtoSPdelta = 0;
callerSPtoSPdelta -= genTotalFrameSize();
assert(callerSPtoSPdelta <= 0);
return callerSPtoSPdelta;
}
//---------------------------------------------------------------------
// SetSaveFpLrWithAllCalleeSavedRegisters - Set the variable that indicates if FP/LR registers
// are stored with the rest of the callee-saved registers.
//
void CodeGen::SetSaveFpLrWithAllCalleeSavedRegisters(bool value)
{
JITDUMP("Setting genSaveFpLrWithAllCalleeSavedRegisters to %s\n", dspBool(value));
genSaveFpLrWithAllCalleeSavedRegisters = value;
}
//---------------------------------------------------------------------
// IsSaveFpLrWithAllCalleeSavedRegisters - Return the value that indicates where FP/LR registers
// are stored in the prolog.
//
bool CodeGen::IsSaveFpLrWithAllCalleeSavedRegisters() const
{
return genSaveFpLrWithAllCalleeSavedRegisters;
}
/*****************************************************************************
* Emit a call to a helper function.
*
*/
void CodeGen::genEmitHelperCall(unsigned helper, int argSize, emitAttr retSize, regNumber callTargetReg /*= REG_NA */)
{
void* addr = nullptr;
void* pAddr = nullptr;
emitter::EmitCallType callType = emitter::EC_FUNC_TOKEN;
addr = compiler->compGetHelperFtn((CorInfoHelpFunc)helper, &pAddr);
regNumber callTarget = REG_NA;
if (addr == nullptr)
{
// This is call to a runtime helper.
// adrp x, [reloc:rel page addr]
// add x, x, [reloc:page offset]
// ldr x, [x]
// br x
if (callTargetReg == REG_NA)
{
// If a callTargetReg has not been explicitly provided, we will use REG_DEFAULT_HELPER_CALL_TARGET, but
// this is only a valid assumption if the helper call is known to kill REG_DEFAULT_HELPER_CALL_TARGET.
callTargetReg = REG_DEFAULT_HELPER_CALL_TARGET;
}
regMaskTP callTargetMask = genRegMask(callTargetReg);
regMaskTP callKillSet = compiler->compHelperCallKillSet((CorInfoHelpFunc)helper);
// assert that all registers in callTargetMask are in the callKillSet
noway_assert((callTargetMask & callKillSet) == callTargetMask);
callTarget = callTargetReg;
// adrp + add with relocations will be emitted
GetEmitter()->emitIns_R_AI(INS_adrp, EA_PTR_DSP_RELOC, callTarget,
(ssize_t)pAddr DEBUGARG((size_t)compiler->eeFindHelper(helper))
DEBUGARG(GTF_ICON_METHOD_HDL));
GetEmitter()->emitIns_R_R(INS_ldr, EA_PTRSIZE, callTarget, callTarget);
callType = emitter::EC_INDIR_R;
}
GetEmitter()->emitIns_Call(callType, compiler->eeFindHelper(helper), INDEBUG_LDISASM_COMMA(nullptr) addr, argSize,
retSize, EA_UNKNOWN, gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur, DebugInfo(), callTarget, /* ireg */
REG_NA, 0, 0, /* xreg, xmul, disp */
false /* isJump */
);
regMaskTP killMask = compiler->compHelperCallKillSet((CorInfoHelpFunc)helper);
regSet.verifyRegistersUsed(killMask);
}
#ifdef FEATURE_SIMD
//------------------------------------------------------------------------
// genSIMDIntrinsic: Generate code for a SIMD Intrinsic. This is the main
// routine which in turn calls appropriate genSIMDIntrinsicXXX() routine.
//
// Arguments:
// simdNode - The GT_SIMD node
//
// Return Value:
// None.
//
// Notes:
// Currently, we only recognize SIMDVector<float> and SIMDVector<int>, and
// a limited set of methods.
//
// TODO-CLEANUP Merge all versions of this function and move to new file simdcodegencommon.cpp.
void CodeGen::genSIMDIntrinsic(GenTreeSIMD* simdNode)
{
// NYI for unsupported base types
if (!varTypeIsArithmetic(simdNode->GetSimdBaseType()))
{
noway_assert(!"SIMD intrinsic with unsupported base type.");
}
switch (simdNode->GetSIMDIntrinsicId())
{
case SIMDIntrinsicInit:
genSIMDIntrinsicInit(simdNode);
break;
case SIMDIntrinsicInitN:
genSIMDIntrinsicInitN(simdNode);
break;
case SIMDIntrinsicCast:
genSIMDIntrinsicUnOp(simdNode);
break;
case SIMDIntrinsicSub:
case SIMDIntrinsicBitwiseAnd:
case SIMDIntrinsicBitwiseOr:
case SIMDIntrinsicEqual:
genSIMDIntrinsicBinOp(simdNode);
break;
case SIMDIntrinsicUpperSave:
genSIMDIntrinsicUpperSave(simdNode);
break;
case SIMDIntrinsicUpperRestore:
genSIMDIntrinsicUpperRestore(simdNode);
break;
default:
noway_assert(!"Unimplemented SIMD intrinsic.");
unreached();
}
}
insOpts CodeGen::genGetSimdInsOpt(emitAttr size, var_types elementType)
{
assert((size == EA_16BYTE) || (size == EA_8BYTE));
insOpts result = INS_OPTS_NONE;
switch (elementType)
{
case TYP_DOUBLE:
case TYP_ULONG:
case TYP_LONG:
result = (size == EA_16BYTE) ? INS_OPTS_2D : INS_OPTS_1D;
break;
case TYP_FLOAT:
case TYP_UINT:
case TYP_INT:
result = (size == EA_16BYTE) ? INS_OPTS_4S : INS_OPTS_2S;
break;
case TYP_USHORT:
case TYP_SHORT:
result = (size == EA_16BYTE) ? INS_OPTS_8H : INS_OPTS_4H;
break;
case TYP_UBYTE:
case TYP_BYTE:
result = (size == EA_16BYTE) ? INS_OPTS_16B : INS_OPTS_8B;
break;
default:
assert(!"Unsupported element type");
unreached();
}
return result;
}
// getOpForSIMDIntrinsic: return the opcode for the given SIMD Intrinsic
//
// Arguments:
// intrinsicId - SIMD intrinsic Id
// baseType - Base type of the SIMD vector
// ival - Out param. Any immediate byte operand that needs to be passed to SSE2 opcode
//
//
// Return Value:
// Instruction (op) to be used, and immed is set if instruction requires an immediate operand.
//
instruction CodeGen::getOpForSIMDIntrinsic(SIMDIntrinsicID intrinsicId, var_types baseType, unsigned* ival /*=nullptr*/)
{
instruction result = INS_invalid;
if (varTypeIsFloating(baseType))
{
switch (intrinsicId)
{
case SIMDIntrinsicBitwiseAnd:
result = INS_and;
break;
case SIMDIntrinsicBitwiseOr:
result = INS_orr;
break;
case SIMDIntrinsicCast:
result = INS_mov;
break;
case SIMDIntrinsicEqual:
result = INS_fcmeq;
break;
case SIMDIntrinsicSub:
result = INS_fsub;
break;
default:
assert(!"Unsupported SIMD intrinsic");
unreached();
}
}
else
{
bool isUnsigned = varTypeIsUnsigned(baseType);
switch (intrinsicId)
{
case SIMDIntrinsicBitwiseAnd:
result = INS_and;
break;
case SIMDIntrinsicBitwiseOr:
result = INS_orr;
break;
case SIMDIntrinsicCast:
result = INS_mov;
break;
case SIMDIntrinsicEqual:
result = INS_cmeq;
break;
case SIMDIntrinsicSub:
result = INS_sub;
break;
default:
assert(!"Unsupported SIMD intrinsic");
unreached();
}
}
noway_assert(result != INS_invalid);
return result;
}
//------------------------------------------------------------------------
// genSIMDIntrinsicInit: Generate code for SIMD Intrinsic Initialize.
//
// Arguments:
// simdNode - The GT_SIMD node
//
// Return Value:
// None.
//
void CodeGen::genSIMDIntrinsicInit(GenTreeSIMD* simdNode)
{
assert(simdNode->GetSIMDIntrinsicId() == SIMDIntrinsicInit);
GenTree* op1 = simdNode->Op(1);
var_types baseType = simdNode->GetSimdBaseType();
regNumber targetReg = simdNode->GetRegNum();
assert(targetReg != REG_NA);
var_types targetType = simdNode->TypeGet();
genConsumeMultiOpOperands(simdNode);
regNumber op1Reg = op1->IsIntegralConst(0) ? REG_ZR : op1->GetRegNum();
// TODO-ARM64-CQ Add LD1R to allow SIMDIntrinsicInit from contained memory
// TODO-ARM64-CQ Add MOVI to allow SIMDIntrinsicInit from contained immediate small constants
assert(op1->isContained() == op1->IsIntegralConst(0));
assert(!op1->isUsedFromMemory());
assert(genIsValidFloatReg(targetReg));
assert(genIsValidIntReg(op1Reg) || genIsValidFloatReg(op1Reg));
emitAttr attr = (simdNode->GetSimdSize() > 8) ? EA_16BYTE : EA_8BYTE;
insOpts opt = genGetSimdInsOpt(attr, baseType);
if (opt == INS_OPTS_1D)
{
GetEmitter()->emitIns_Mov(INS_mov, attr, targetReg, op1Reg, /* canSkip */ false);
}
else if (genIsValidIntReg(op1Reg))
{
GetEmitter()->emitIns_R_R(INS_dup, attr, targetReg, op1Reg, opt);
}
else
{
GetEmitter()->emitIns_R_R_I(INS_dup, attr, targetReg, op1Reg, 0, opt);
}
genProduceReg(simdNode);
}
//-------------------------------------------------------------------------------------------
// genSIMDIntrinsicInitN: Generate code for SIMD Intrinsic Initialize for the form that takes
// a number of arguments equal to the length of the Vector.
//
// Arguments:
// simdNode - The GT_SIMD node
//
// Return Value:
// None.
//
void CodeGen::genSIMDIntrinsicInitN(GenTreeSIMD* simdNode)
{
assert(simdNode->GetSIMDIntrinsicId() == SIMDIntrinsicInitN);
regNumber targetReg = simdNode->GetRegNum();
assert(targetReg != REG_NA);
var_types targetType = simdNode->TypeGet();
var_types baseType = simdNode->GetSimdBaseType();
emitAttr baseTypeSize = emitTypeSize(baseType);
regNumber vectorReg = targetReg;
size_t initCount = simdNode->GetOperandCount();
assert((initCount * baseTypeSize) <= simdNode->GetSimdSize());
if (varTypeIsFloating(baseType))
{
// Note that we cannot use targetReg before consuming all float source operands.
// Therefore use an internal temp register
vectorReg = simdNode->GetSingleTempReg(RBM_ALLFLOAT);
}
// We will first consume the list items in execution (left to right) order,
// and record the registers.
regNumber operandRegs[FP_REGSIZE_BYTES];
for (size_t i = 1; i <= initCount; i++)
{
GenTree* operand = simdNode->Op(i);
assert(operand->TypeIs(baseType));
assert(!operand->isContained());
operandRegs[i - 1] = genConsumeReg(operand);
}
if (initCount * baseTypeSize < EA_16BYTE)
{
GetEmitter()->emitIns_R_I(INS_movi, EA_16BYTE, vectorReg, 0x00, INS_OPTS_16B);
}
if (varTypeIsIntegral(baseType))
{
for (unsigned i = 0; i < initCount; i++)
{
GetEmitter()->emitIns_R_R_I(INS_ins, baseTypeSize, vectorReg, operandRegs[i], i);
}
}
else
{
for (unsigned i = 0; i < initCount; i++)
{
GetEmitter()->emitIns_R_R_I_I(INS_ins, baseTypeSize, vectorReg, operandRegs[i], i, 0);
}
}
// Load the initialized value.
GetEmitter()->emitIns_Mov(INS_mov, EA_16BYTE, targetReg, vectorReg, /* canSkip */ true);
genProduceReg(simdNode);
}
//----------------------------------------------------------------------------------
// genSIMDIntrinsicUnOp: Generate code for SIMD Intrinsic unary operations like sqrt.
//
// Arguments:
// simdNode - The GT_SIMD node
//
// Return Value:
// None.
//
void CodeGen::genSIMDIntrinsicUnOp(GenTreeSIMD* simdNode)
{
assert(simdNode->GetSIMDIntrinsicId() == SIMDIntrinsicCast);
GenTree* op1 = simdNode->Op(1);
var_types baseType = simdNode->GetSimdBaseType();
regNumber targetReg = simdNode->GetRegNum();
assert(targetReg != REG_NA);
var_types targetType = simdNode->TypeGet();
genConsumeMultiOpOperands(simdNode);
regNumber op1Reg = op1->GetRegNum();
assert(genIsValidFloatReg(op1Reg));
assert(genIsValidFloatReg(targetReg));
instruction ins = getOpForSIMDIntrinsic(simdNode->GetSIMDIntrinsicId(), baseType);
emitAttr attr = (simdNode->GetSimdSize() > 8) ? EA_16BYTE : EA_8BYTE;
if (GetEmitter()->IsMovInstruction(ins))
{
GetEmitter()->emitIns_Mov(ins, attr, targetReg, op1Reg, /* canSkip */ false, INS_OPTS_NONE);
}
else
{
GetEmitter()->emitIns_R_R(ins, attr, targetReg, op1Reg, genGetSimdInsOpt(attr, baseType));
}
genProduceReg(simdNode);
}
//--------------------------------------------------------------------------------
// genSIMDIntrinsicBinOp: Generate code for SIMD Intrinsic binary operations
// add, sub, mul, bit-wise And, AndNot and Or.
//
// Arguments:
// simdNode - The GT_SIMD node
//
// Return Value:
// None.
//
void CodeGen::genSIMDIntrinsicBinOp(GenTreeSIMD* simdNode)
{
assert((simdNode->GetSIMDIntrinsicId() == SIMDIntrinsicSub) ||
(simdNode->GetSIMDIntrinsicId() == SIMDIntrinsicBitwiseAnd) ||
(simdNode->GetSIMDIntrinsicId() == SIMDIntrinsicBitwiseOr) ||
(simdNode->GetSIMDIntrinsicId() == SIMDIntrinsicEqual));
GenTree* op1 = simdNode->Op(1);
GenTree* op2 = simdNode->Op(2);
var_types baseType = simdNode->GetSimdBaseType();
regNumber targetReg = simdNode->GetRegNum();
assert(targetReg != REG_NA);
var_types targetType = simdNode->TypeGet();
genConsumeMultiOpOperands(simdNode);
regNumber op1Reg = op1->GetRegNum();
regNumber op2Reg = op2->GetRegNum();
assert(genIsValidFloatReg(op1Reg));
assert(genIsValidFloatReg(op2Reg));
assert(genIsValidFloatReg(targetReg));
// TODO-ARM64-CQ Contain integer constants where posible
instruction ins = getOpForSIMDIntrinsic(simdNode->GetSIMDIntrinsicId(), baseType);
emitAttr attr = (simdNode->GetSimdSize() > 8) ? EA_16BYTE : EA_8BYTE;
insOpts opt = genGetSimdInsOpt(attr, baseType);
GetEmitter()->emitIns_R_R_R(ins, attr, targetReg, op1Reg, op2Reg, opt);
genProduceReg(simdNode);
}
//-----------------------------------------------------------------------------
// genSIMDIntrinsicUpperSave: save the upper half of a TYP_SIMD16 vector to
// the given register, if any, or to memory.
//
// Arguments:
// simdNode - The GT_SIMD node
//
// Return Value:
// None.
//
// Notes:
// The upper half of all SIMD registers are volatile, even the callee-save registers.
// When a 16-byte SIMD value is live across a call, the register allocator will use this intrinsic
// to cause the upper half to be saved. It will first attempt to find another, unused, callee-save
// register. If such a register cannot be found, it will save it to an available caller-save register.
// In that case, this node will be marked GTF_SPILL, which will cause this method to save
// the upper half to the lclVar's home location.
//
void CodeGen::genSIMDIntrinsicUpperSave(GenTreeSIMD* simdNode)
{
assert(simdNode->GetSIMDIntrinsicId() == SIMDIntrinsicUpperSave);
GenTree* op1 = simdNode->Op(1);
GenTreeLclVar* lclNode = op1->AsLclVar();
LclVarDsc* varDsc = compiler->lvaGetDesc(lclNode);
assert(emitTypeSize(varDsc->GetRegisterType(lclNode)) == 16);
regNumber targetReg = simdNode->GetRegNum();
regNumber op1Reg = genConsumeReg(op1);
assert(op1Reg != REG_NA);
assert(targetReg != REG_NA);
GetEmitter()->emitIns_R_R_I_I(INS_mov, EA_8BYTE, targetReg, op1Reg, 0, 1);
if ((simdNode->gtFlags & GTF_SPILL) != 0)
{
// This is not a normal spill; we'll spill it to the lclVar location.
// The localVar must have a stack home.
unsigned varNum = lclNode->GetLclNum();
assert(varDsc->lvOnFrame);
// We want to store this to the upper 8 bytes of this localVar's home.
int offset = 8;
emitAttr attr = emitTypeSize(TYP_SIMD8);
GetEmitter()->emitIns_S_R(INS_str, attr, targetReg, varNum, offset);
}
else
{
genProduceReg(simdNode);
}
}
//-----------------------------------------------------------------------------
// genSIMDIntrinsicUpperRestore: Restore the upper half of a TYP_SIMD16 vector to
// the given register, if any, or to memory.
//
// Arguments:
// simdNode - The GT_SIMD node
//
// Return Value:
// None.
//
// Notes:
// For consistency with genSIMDIntrinsicUpperSave, and to ensure that lclVar nodes always
// have their home register, this node has its targetReg on the lclVar child, and its source
// on the simdNode.
// Regarding spill, please see the note above on genSIMDIntrinsicUpperSave. If we have spilled
// an upper-half to the lclVar's home location, this node will be marked GTF_SPILLED.
//
void CodeGen::genSIMDIntrinsicUpperRestore(GenTreeSIMD* simdNode)
{
assert(simdNode->GetSIMDIntrinsicId() == SIMDIntrinsicUpperRestore);
GenTree* op1 = simdNode->Op(1);
assert(op1->IsLocal());
GenTreeLclVar* lclNode = op1->AsLclVar();
LclVarDsc* varDsc = compiler->lvaGetDesc(lclNode);
assert(emitTypeSize(varDsc->GetRegisterType(lclNode)) == 16);
regNumber srcReg = simdNode->GetRegNum();
regNumber lclVarReg = genConsumeReg(lclNode);
unsigned varNum = lclNode->GetLclNum();
assert(lclVarReg != REG_NA);
assert(srcReg != REG_NA);
if (simdNode->gtFlags & GTF_SPILLED)
{
// The localVar must have a stack home.
assert(varDsc->lvOnFrame);
// We will load this from the upper 8 bytes of this localVar's home.
int offset = 8;
emitAttr attr = emitTypeSize(TYP_SIMD8);
GetEmitter()->emitIns_R_S(INS_ldr, attr, srcReg, varNum, offset);
}
GetEmitter()->emitIns_R_R_I_I(INS_mov, EA_8BYTE, lclVarReg, srcReg, 1, 0);
}
//-----------------------------------------------------------------------------
// genStoreIndTypeSIMD12: store indirect a TYP_SIMD12 (i.e. Vector3) to memory.
// Since Vector3 is not a hardware supported write size, it is performed
// as two writes: 8 byte followed by 4-byte.
//
// Arguments:
// treeNode - tree node that is attempting to store indirect
//
//
// Return Value:
// None.
//
void CodeGen::genStoreIndTypeSIMD12(GenTree* treeNode)
{
assert(treeNode->OperGet() == GT_STOREIND);
GenTree* addr = treeNode->AsOp()->gtOp1;
GenTree* data = treeNode->AsOp()->gtOp2;
// addr and data should not be contained.
assert(!data->isContained());
assert(!addr->isContained());
#ifdef DEBUG
// Should not require a write barrier
GCInfo::WriteBarrierForm writeBarrierForm = gcInfo.gcIsWriteBarrierCandidate(treeNode, data);
assert(writeBarrierForm == GCInfo::WBF_NoBarrier);
#endif
genConsumeOperands(treeNode->AsOp());
// Need an additional integer register to extract upper 4 bytes from data.
regNumber tmpReg = treeNode->GetSingleTempReg();
assert(tmpReg != addr->GetRegNum());
// 8-byte write
GetEmitter()->emitIns_R_R(INS_str, EA_8BYTE, data->GetRegNum(), addr->GetRegNum());
// Extract upper 4-bytes from data
GetEmitter()->emitIns_R_R_I(INS_mov, EA_4BYTE, tmpReg, data->GetRegNum(), 2);
// 4-byte write
GetEmitter()->emitIns_R_R_I(INS_str, EA_4BYTE, tmpReg, addr->GetRegNum(), 8);
}
//-----------------------------------------------------------------------------
// genLoadIndTypeSIMD12: load indirect a TYP_SIMD12 (i.e. Vector3) value.
// Since Vector3 is not a hardware supported write size, it is performed
// as two loads: 8 byte followed by 4-byte.
//
// Arguments:
// treeNode - tree node of GT_IND
//
//
// Return Value:
// None.
//
void CodeGen::genLoadIndTypeSIMD12(GenTree* treeNode)
{
assert(treeNode->OperGet() == GT_IND);
GenTree* addr = treeNode->AsOp()->gtOp1;
regNumber targetReg = treeNode->GetRegNum();
assert(!addr->isContained());
regNumber operandReg = genConsumeReg(addr);
// Need an addtional int register to read upper 4 bytes, which is different from targetReg
regNumber tmpReg = treeNode->GetSingleTempReg();
// 8-byte read
GetEmitter()->emitIns_R_R(INS_ldr, EA_8BYTE, targetReg, addr->GetRegNum());
// 4-byte read
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, tmpReg, addr->GetRegNum(), 8);
// Insert upper 4-bytes into data
GetEmitter()->emitIns_R_R_I(INS_mov, EA_4BYTE, targetReg, tmpReg, 2);
genProduceReg(treeNode);
}
//-----------------------------------------------------------------------------
// genStoreLclTypeSIMD12: store a TYP_SIMD12 (i.e. Vector3) type field.
// Since Vector3 is not a hardware supported write size, it is performed
// as two stores: 8 byte followed by 4-byte.
//
// Arguments:
// treeNode - tree node that is attempting to store TYP_SIMD12 field
//
// Return Value:
// None.
//
void CodeGen::genStoreLclTypeSIMD12(GenTree* treeNode)
{
assert((treeNode->OperGet() == GT_STORE_LCL_FLD) || (treeNode->OperGet() == GT_STORE_LCL_VAR));
GenTreeLclVarCommon* lclVar = treeNode->AsLclVarCommon();
unsigned offs = lclVar->GetLclOffs();
unsigned varNum = lclVar->GetLclNum();
assert(varNum < compiler->lvaCount);
GenTree* op1 = lclVar->gtGetOp1();
if (op1->isContained())
{
// This is only possible for a zero-init.
assert(op1->IsIntegralConst(0) || op1->IsSIMDZero());
// store lower 8 bytes
GetEmitter()->emitIns_S_R(ins_Store(TYP_DOUBLE), EA_8BYTE, REG_ZR, varNum, offs);
// Store upper 4 bytes
GetEmitter()->emitIns_S_R(ins_Store(TYP_FLOAT), EA_4BYTE, REG_ZR, varNum, offs + 8);
return;
}
regNumber operandReg = genConsumeReg(op1);
// Need an additional integer register to extract upper 4 bytes from data.
regNumber tmpReg = lclVar->GetSingleTempReg();
GetEmitter()->emitStoreSIMD12ToLclOffset(varNum, offs, operandReg, tmpReg);
}
#endif // FEATURE_SIMD
#ifdef PROFILING_SUPPORTED
//-----------------------------------------------------------------------------------
// genProfilingEnterCallback: Generate the profiling function enter callback.
//
// Arguments:
// initReg - register to use as scratch register
// pInitRegZeroed - OUT parameter. *pInitRegZeroed set to 'false' if 'initReg' is
// set to non-zero value after this call.
//
// Return Value:
// None
//
void CodeGen::genProfilingEnterCallback(regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
if (!compiler->compIsProfilerHookNeeded())
{
return;
}
if (compiler->compProfilerMethHndIndirected)
{
instGen_Set_Reg_To_Imm(EA_PTR_DSP_RELOC, REG_PROFILER_ENTER_ARG_FUNC_ID,
(ssize_t)compiler->compProfilerMethHnd);
GetEmitter()->emitIns_R_R(INS_ldr, EA_PTRSIZE, REG_PROFILER_ENTER_ARG_FUNC_ID, REG_PROFILER_ENTER_ARG_FUNC_ID);
}
else
{
instGen_Set_Reg_To_Imm(EA_PTRSIZE, REG_PROFILER_ENTER_ARG_FUNC_ID, (ssize_t)compiler->compProfilerMethHnd);
}
int callerSPOffset = compiler->lvaToCallerSPRelativeOffset(0, isFramePointerUsed());
genInstrWithConstant(INS_add, EA_PTRSIZE, REG_PROFILER_ENTER_ARG_CALLER_SP, genFramePointerReg(),
(ssize_t)(-callerSPOffset), REG_PROFILER_ENTER_ARG_CALLER_SP);
genEmitHelperCall(CORINFO_HELP_PROF_FCN_ENTER, 0, EA_UNKNOWN);
if ((genRegMask(initReg) & RBM_PROFILER_ENTER_TRASH) != RBM_NONE)
{
*pInitRegZeroed = false;
}
}
//-----------------------------------------------------------------------------------
// genProfilingLeaveCallback: Generate the profiling function leave or tailcall callback.
// Technically, this is not part of the epilog; it is called when we are generating code for a GT_RETURN node.
//
// Arguments:
// helper - which helper to call. Either CORINFO_HELP_PROF_FCN_LEAVE or CORINFO_HELP_PROF_FCN_TAILCALL
//
// Return Value:
// None
//
void CodeGen::genProfilingLeaveCallback(unsigned helper)
{
assert((helper == CORINFO_HELP_PROF_FCN_LEAVE) || (helper == CORINFO_HELP_PROF_FCN_TAILCALL));
if (!compiler->compIsProfilerHookNeeded())
{
return;
}
compiler->info.compProfilerCallback = true;
if (compiler->compProfilerMethHndIndirected)
{
instGen_Set_Reg_To_Imm(EA_PTR_DSP_RELOC, REG_PROFILER_LEAVE_ARG_FUNC_ID,
(ssize_t)compiler->compProfilerMethHnd);
GetEmitter()->emitIns_R_R(INS_ldr, EA_PTRSIZE, REG_PROFILER_LEAVE_ARG_FUNC_ID, REG_PROFILER_LEAVE_ARG_FUNC_ID);
}
else
{
instGen_Set_Reg_To_Imm(EA_PTRSIZE, REG_PROFILER_LEAVE_ARG_FUNC_ID, (ssize_t)compiler->compProfilerMethHnd);
}
gcInfo.gcMarkRegSetNpt(RBM_PROFILER_LEAVE_ARG_FUNC_ID);
int callerSPOffset = compiler->lvaToCallerSPRelativeOffset(0, isFramePointerUsed());
genInstrWithConstant(INS_add, EA_PTRSIZE, REG_PROFILER_LEAVE_ARG_CALLER_SP, genFramePointerReg(),
(ssize_t)(-callerSPOffset), REG_PROFILER_LEAVE_ARG_CALLER_SP);
gcInfo.gcMarkRegSetNpt(RBM_PROFILER_LEAVE_ARG_CALLER_SP);
genEmitHelperCall(helper, 0, EA_UNKNOWN);
}
#endif // PROFILING_SUPPORTED
/*****************************************************************************
* Unit testing of the ARM64 emitter: generate a bunch of instructions into the prolog
* (it's as good a place as any), then use COMPlus_JitLateDisasm=* to see if the late
* disassembler thinks the instructions as the same as we do.
*/
// Uncomment "#define ALL_ARM64_EMITTER_UNIT_TESTS" to run all the unit tests here.
// After adding a unit test, and verifying it works, put it under this #ifdef, so we don't see it run every time.
//#define ALL_ARM64_EMITTER_UNIT_TESTS
#if defined(DEBUG)
void CodeGen::genArm64EmitterUnitTests()
{
if (!verbose)
{
return;
}
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
// Mark the "fake" instructions in the output.
printf("*************** In genArm64EmitterUnitTests()\n");
emitter* theEmitter = GetEmitter();
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
// We use this:
// genDefineTempLabel(genCreateTempLabel());
// to create artificial labels to help separate groups of tests.
//
// Loads/Stores basic general register
//
genDefineTempLabel(genCreateTempLabel());
// ldr/str Xt, [reg]
theEmitter->emitIns_R_R(INS_ldr, EA_8BYTE, REG_R8, REG_R9);
theEmitter->emitIns_R_R(INS_ldrb, EA_1BYTE, REG_R8, REG_R9);
theEmitter->emitIns_R_R(INS_ldrh, EA_2BYTE, REG_R8, REG_R9);
theEmitter->emitIns_R_R(INS_str, EA_8BYTE, REG_R8, REG_R9);
theEmitter->emitIns_R_R(INS_strb, EA_1BYTE, REG_R8, REG_R9);
theEmitter->emitIns_R_R(INS_strh, EA_2BYTE, REG_R8, REG_R9);
// ldr/str Wt, [reg]
theEmitter->emitIns_R_R(INS_ldr, EA_4BYTE, REG_R8, REG_R9);
theEmitter->emitIns_R_R(INS_ldrb, EA_1BYTE, REG_R8, REG_R9);
theEmitter->emitIns_R_R(INS_ldrh, EA_2BYTE, REG_R8, REG_R9);
theEmitter->emitIns_R_R(INS_str, EA_4BYTE, REG_R8, REG_R9);
theEmitter->emitIns_R_R(INS_strb, EA_1BYTE, REG_R8, REG_R9);
theEmitter->emitIns_R_R(INS_strh, EA_2BYTE, REG_R8, REG_R9);
theEmitter->emitIns_R_R(INS_ldrsb, EA_4BYTE, REG_R8, REG_R9); // target Wt
theEmitter->emitIns_R_R(INS_ldrsh, EA_4BYTE, REG_R8, REG_R9); // target Wt
theEmitter->emitIns_R_R(INS_ldrsb, EA_8BYTE, REG_R8, REG_R9); // target Xt
theEmitter->emitIns_R_R(INS_ldrsh, EA_8BYTE, REG_R8, REG_R9); // target Xt
theEmitter->emitIns_R_R(INS_ldrsw, EA_8BYTE, REG_R8, REG_R9); // target Xt
theEmitter->emitIns_R_R_I(INS_ldurb, EA_4BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_ldurh, EA_4BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_sturb, EA_4BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_sturh, EA_4BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_ldursb, EA_4BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_ldursb, EA_8BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_ldursh, EA_4BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_ldursh, EA_8BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_ldur, EA_8BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_ldur, EA_4BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_stur, EA_4BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_stur, EA_8BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_ldursw, EA_8BYTE, REG_R8, REG_R9, 1);
// SP and ZR tests
theEmitter->emitIns_R_R_I(INS_ldur, EA_8BYTE, REG_R8, REG_SP, 1);
theEmitter->emitIns_R_R_I(INS_ldurb, EA_8BYTE, REG_ZR, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_ldurh, EA_8BYTE, REG_ZR, REG_SP, 1);
// scaled
theEmitter->emitIns_R_R_I(INS_ldrb, EA_1BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_ldrh, EA_2BYTE, REG_R8, REG_R9, 2);
theEmitter->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_R8, REG_R9, 4);
theEmitter->emitIns_R_R_I(INS_ldr, EA_8BYTE, REG_R8, REG_R9, 8);
// pre-/post-indexed (unscaled)
theEmitter->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_R8, REG_R9, 1, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_R8, REG_R9, 1, INS_OPTS_PRE_INDEX);
theEmitter->emitIns_R_R_I(INS_ldr, EA_8BYTE, REG_R8, REG_R9, 1, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I(INS_ldr, EA_8BYTE, REG_R8, REG_R9, 1, INS_OPTS_PRE_INDEX);
// ldar/stlr Rt, [reg]
theEmitter->emitIns_R_R(INS_ldar, EA_8BYTE, REG_R9, REG_R8);
theEmitter->emitIns_R_R(INS_ldar, EA_4BYTE, REG_R7, REG_R10);
theEmitter->emitIns_R_R(INS_ldarb, EA_4BYTE, REG_R5, REG_R11);
theEmitter->emitIns_R_R(INS_ldarh, EA_4BYTE, REG_R5, REG_R12);
theEmitter->emitIns_R_R(INS_stlr, EA_8BYTE, REG_R9, REG_R8);
theEmitter->emitIns_R_R(INS_stlr, EA_4BYTE, REG_R7, REG_R13);
theEmitter->emitIns_R_R(INS_stlrb, EA_4BYTE, REG_R5, REG_R14);
theEmitter->emitIns_R_R(INS_stlrh, EA_4BYTE, REG_R3, REG_R15);
// ldapr Rt, [reg]
theEmitter->emitIns_R_R(INS_ldapr, EA_8BYTE, REG_R9, REG_R8);
theEmitter->emitIns_R_R(INS_ldapr, EA_4BYTE, REG_R7, REG_R10);
theEmitter->emitIns_R_R(INS_ldaprb, EA_4BYTE, REG_R5, REG_R11);
theEmitter->emitIns_R_R(INS_ldaprh, EA_4BYTE, REG_R5, REG_R12);
// ldaxr Rt, [reg]
theEmitter->emitIns_R_R(INS_ldaxr, EA_8BYTE, REG_R9, REG_R8);
theEmitter->emitIns_R_R(INS_ldaxr, EA_4BYTE, REG_R7, REG_R10);
theEmitter->emitIns_R_R(INS_ldaxrb, EA_4BYTE, REG_R5, REG_R11);
theEmitter->emitIns_R_R(INS_ldaxrh, EA_4BYTE, REG_R5, REG_R12);
// ldxr Rt, [reg]
theEmitter->emitIns_R_R(INS_ldxr, EA_8BYTE, REG_R9, REG_R8);
theEmitter->emitIns_R_R(INS_ldxr, EA_4BYTE, REG_R7, REG_R10);
theEmitter->emitIns_R_R(INS_ldxrb, EA_4BYTE, REG_R5, REG_R11);
theEmitter->emitIns_R_R(INS_ldxrh, EA_4BYTE, REG_R5, REG_R12);
// stxr Ws, Rt, [reg]
theEmitter->emitIns_R_R_R(INS_stxr, EA_8BYTE, REG_R1, REG_R9, REG_R8);
theEmitter->emitIns_R_R_R(INS_stxr, EA_4BYTE, REG_R3, REG_R7, REG_R13);
theEmitter->emitIns_R_R_R(INS_stxrb, EA_4BYTE, REG_R8, REG_R5, REG_R14);
theEmitter->emitIns_R_R_R(INS_stxrh, EA_4BYTE, REG_R12, REG_R3, REG_R15);
// stlxr Ws, Rt, [reg]
theEmitter->emitIns_R_R_R(INS_stlxr, EA_8BYTE, REG_R1, REG_R9, REG_R8);
theEmitter->emitIns_R_R_R(INS_stlxr, EA_4BYTE, REG_R3, REG_R7, REG_R13);
theEmitter->emitIns_R_R_R(INS_stlxrb, EA_4BYTE, REG_R8, REG_R5, REG_R14);
theEmitter->emitIns_R_R_R(INS_stlxrh, EA_4BYTE, REG_R12, REG_R3, REG_R15);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// Loads to and Stores from one, two, three, or four SIMD&FP registers
//
genDefineTempLabel(genCreateTempLabel());
// ld1 {Vt}, [Xn|SP]
theEmitter->emitIns_R_R(INS_ld1, EA_8BYTE, REG_V0, REG_R1, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_ld1, EA_16BYTE, REG_V2, REG_R3, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_ld1, EA_8BYTE, REG_V4, REG_R5, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_ld1, EA_16BYTE, REG_V6, REG_R7, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_ld1, EA_8BYTE, REG_V8, REG_R9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_ld1, EA_16BYTE, REG_V10, REG_R11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_ld1, EA_8BYTE, REG_V12, REG_R13, INS_OPTS_1D);
theEmitter->emitIns_R_R(INS_ld1, EA_16BYTE, REG_V14, REG_R15, INS_OPTS_2D);
// ld1 {Vt, Vt2}, [Xn|SP]
theEmitter->emitIns_R_R(INS_ld1_2regs, EA_8BYTE, REG_V0, REG_R2, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_ld1_2regs, EA_16BYTE, REG_V3, REG_R5, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_ld1_2regs, EA_8BYTE, REG_V6, REG_R8, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_ld1_2regs, EA_16BYTE, REG_V9, REG_R11, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_ld1_2regs, EA_8BYTE, REG_V12, REG_R14, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_ld1_2regs, EA_16BYTE, REG_V15, REG_R17, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_ld1_2regs, EA_8BYTE, REG_V18, REG_R20, INS_OPTS_1D);
theEmitter->emitIns_R_R(INS_ld1_2regs, EA_16BYTE, REG_V21, REG_R23, INS_OPTS_2D);
// ld1 {Vt, Vt2, Vt3}, [Xn|SP]
theEmitter->emitIns_R_R(INS_ld1_3regs, EA_8BYTE, REG_V0, REG_R3, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_ld1_3regs, EA_16BYTE, REG_V4, REG_R7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_ld1_3regs, EA_8BYTE, REG_V8, REG_R11, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_ld1_3regs, EA_16BYTE, REG_V12, REG_R15, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_ld1_3regs, EA_8BYTE, REG_V16, REG_R19, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_ld1_3regs, EA_16BYTE, REG_V20, REG_R23, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_ld1_3regs, EA_8BYTE, REG_V24, REG_R27, INS_OPTS_1D);
theEmitter->emitIns_R_R(INS_ld1_3regs, EA_16BYTE, REG_V28, REG_SP, INS_OPTS_2D);
// ld1 {Vt, Vt2, Vt3, Vt4}, [Xn|SP]
theEmitter->emitIns_R_R(INS_ld1_4regs, EA_8BYTE, REG_V0, REG_R4, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_ld1_4regs, EA_16BYTE, REG_V5, REG_R9, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_ld1_4regs, EA_8BYTE, REG_V10, REG_R14, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_ld1_4regs, EA_16BYTE, REG_V15, REG_R19, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_ld1_4regs, EA_8BYTE, REG_V20, REG_R24, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_ld1_4regs, EA_16BYTE, REG_V25, REG_R29, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_ld1_4regs, EA_8BYTE, REG_V30, REG_R2, INS_OPTS_1D);
theEmitter->emitIns_R_R(INS_ld1_4regs, EA_16BYTE, REG_V3, REG_R7, INS_OPTS_2D);
// ld2 {Vt, Vt2}, [Xn|SP]
theEmitter->emitIns_R_R(INS_ld2, EA_8BYTE, REG_V0, REG_R2, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_ld2, EA_16BYTE, REG_V3, REG_R5, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_ld2, EA_8BYTE, REG_V6, REG_R8, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_ld2, EA_16BYTE, REG_V9, REG_R11, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_ld2, EA_8BYTE, REG_V12, REG_R14, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_ld2, EA_16BYTE, REG_V15, REG_R17, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_ld2, EA_16BYTE, REG_V18, REG_R20, INS_OPTS_2D);
// ld3 {Vt, Vt2, Vt3}, [Xn|SP]
theEmitter->emitIns_R_R(INS_ld3, EA_8BYTE, REG_V0, REG_R3, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_ld3, EA_16BYTE, REG_V4, REG_R7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_ld3, EA_8BYTE, REG_V8, REG_R11, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_ld3, EA_16BYTE, REG_V12, REG_R15, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_ld3, EA_8BYTE, REG_V16, REG_R19, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_ld3, EA_16BYTE, REG_V20, REG_R23, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_ld3, EA_16BYTE, REG_V24, REG_R27, INS_OPTS_2D);
// ld4 {Vt, Vt2, Vt3, Vt4}, [Xn|SP]
theEmitter->emitIns_R_R(INS_ld4, EA_8BYTE, REG_V0, REG_R4, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_ld4, EA_16BYTE, REG_V5, REG_R9, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_ld4, EA_8BYTE, REG_V10, REG_R14, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_ld4, EA_16BYTE, REG_V15, REG_R19, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_ld4, EA_8BYTE, REG_V20, REG_R24, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_ld4, EA_16BYTE, REG_V25, REG_R29, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_ld4, EA_16BYTE, REG_V30, REG_R2, INS_OPTS_2D);
// st1 {Vt}, [Xn|SP]
theEmitter->emitIns_R_R(INS_st1, EA_8BYTE, REG_V0, REG_R1, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_st1, EA_16BYTE, REG_V2, REG_R3, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_st1, EA_8BYTE, REG_V4, REG_R5, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_st1, EA_16BYTE, REG_V6, REG_R7, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_st1, EA_8BYTE, REG_V8, REG_R9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_st1, EA_16BYTE, REG_V10, REG_R11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_st1, EA_8BYTE, REG_V12, REG_R13, INS_OPTS_1D);
theEmitter->emitIns_R_R(INS_st1, EA_16BYTE, REG_V14, REG_R15, INS_OPTS_2D);
// st1 {Vt, Vt2}, [Xn|SP]
theEmitter->emitIns_R_R(INS_st1_2regs, EA_8BYTE, REG_V0, REG_R2, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_st1_2regs, EA_16BYTE, REG_V3, REG_R5, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_st1_2regs, EA_8BYTE, REG_V6, REG_R8, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_st1_2regs, EA_16BYTE, REG_V9, REG_R11, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_st1_2regs, EA_8BYTE, REG_V12, REG_R14, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_st1_2regs, EA_16BYTE, REG_V15, REG_R17, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_st1_2regs, EA_8BYTE, REG_V18, REG_R20, INS_OPTS_1D);
theEmitter->emitIns_R_R(INS_st1_2regs, EA_16BYTE, REG_V21, REG_R23, INS_OPTS_2D);
// st1 {Vt, Vt2, Vt3}, [Xn|SP]
theEmitter->emitIns_R_R(INS_st1_3regs, EA_8BYTE, REG_V0, REG_R3, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_st1_3regs, EA_16BYTE, REG_V4, REG_R7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_st1_3regs, EA_8BYTE, REG_V8, REG_R11, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_st1_3regs, EA_16BYTE, REG_V12, REG_R15, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_st1_3regs, EA_8BYTE, REG_V16, REG_R19, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_st1_3regs, EA_16BYTE, REG_V20, REG_R23, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_st1_3regs, EA_8BYTE, REG_V24, REG_R27, INS_OPTS_1D);
theEmitter->emitIns_R_R(INS_st1_3regs, EA_16BYTE, REG_V28, REG_SP, INS_OPTS_2D);
// st1 {Vt, Vt2, Vt3, Vt4}, [Xn|SP]
theEmitter->emitIns_R_R(INS_st1_4regs, EA_8BYTE, REG_V0, REG_R4, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_st1_4regs, EA_16BYTE, REG_V5, REG_R9, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_st1_4regs, EA_8BYTE, REG_V10, REG_R14, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_st1_4regs, EA_16BYTE, REG_V15, REG_R19, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_st1_4regs, EA_8BYTE, REG_V20, REG_R24, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_st1_4regs, EA_16BYTE, REG_V25, REG_R29, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_st1_4regs, EA_8BYTE, REG_V30, REG_R2, INS_OPTS_1D);
theEmitter->emitIns_R_R(INS_st1_4regs, EA_16BYTE, REG_V3, REG_R7, INS_OPTS_2D);
// st2 {Vt, Vt2}, [Xn|SP]
theEmitter->emitIns_R_R(INS_st2, EA_8BYTE, REG_V0, REG_R2, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_st2, EA_16BYTE, REG_V3, REG_R5, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_st2, EA_8BYTE, REG_V6, REG_R8, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_st2, EA_16BYTE, REG_V9, REG_R11, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_st2, EA_8BYTE, REG_V12, REG_R14, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_st2, EA_16BYTE, REG_V15, REG_R17, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_st2, EA_16BYTE, REG_V18, REG_R20, INS_OPTS_2D);
// st3 {Vt, Vt2, Vt3}, [Xn|SP]
theEmitter->emitIns_R_R(INS_st3, EA_8BYTE, REG_V0, REG_R3, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_st3, EA_16BYTE, REG_V4, REG_R7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_st3, EA_8BYTE, REG_V8, REG_R11, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_st3, EA_16BYTE, REG_V12, REG_R15, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_st3, EA_8BYTE, REG_V16, REG_R19, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_st3, EA_16BYTE, REG_V20, REG_R23, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_st3, EA_16BYTE, REG_V24, REG_R27, INS_OPTS_2D);
// st4 {Vt, Vt2, Vt3, Vt4}, [Xn|SP]
theEmitter->emitIns_R_R(INS_st4, EA_8BYTE, REG_V0, REG_R4, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_st4, EA_16BYTE, REG_V5, REG_R9, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_st4, EA_8BYTE, REG_V10, REG_R14, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_st4, EA_16BYTE, REG_V15, REG_R19, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_st4, EA_8BYTE, REG_V20, REG_R24, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_st4, EA_16BYTE, REG_V25, REG_R29, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_st4, EA_16BYTE, REG_V30, REG_R2, INS_OPTS_2D);
// ld1r {Vt}, [Xn|SP]
theEmitter->emitIns_R_R(INS_ld1r, EA_8BYTE, REG_V0, REG_R1, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_ld1r, EA_16BYTE, REG_V2, REG_R3, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_ld1r, EA_8BYTE, REG_V4, REG_R5, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_ld1r, EA_16BYTE, REG_V6, REG_R7, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_ld1r, EA_8BYTE, REG_V8, REG_R9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_ld1r, EA_16BYTE, REG_V10, REG_R11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_ld1r, EA_8BYTE, REG_V12, REG_R13, INS_OPTS_1D);
theEmitter->emitIns_R_R(INS_ld1r, EA_16BYTE, REG_V14, REG_R15, INS_OPTS_2D);
// ld2r {Vt, Vt2}, [Xn|SP]
theEmitter->emitIns_R_R(INS_ld2r, EA_8BYTE, REG_V0, REG_R2, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_ld2r, EA_16BYTE, REG_V3, REG_R5, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_ld2r, EA_8BYTE, REG_V6, REG_R8, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_ld2r, EA_16BYTE, REG_V9, REG_R11, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_ld2r, EA_8BYTE, REG_V12, REG_R14, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_ld2r, EA_16BYTE, REG_V15, REG_R17, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_ld2r, EA_8BYTE, REG_V18, REG_R20, INS_OPTS_1D);
theEmitter->emitIns_R_R(INS_ld2r, EA_16BYTE, REG_V21, REG_R23, INS_OPTS_2D);
// ld3r {Vt, Vt2, Vt3}, [Xn|SP]
theEmitter->emitIns_R_R(INS_ld3r, EA_8BYTE, REG_V0, REG_R3, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_ld3r, EA_16BYTE, REG_V4, REG_R7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_ld3r, EA_8BYTE, REG_V8, REG_R11, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_ld3r, EA_16BYTE, REG_V12, REG_R15, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_ld3r, EA_8BYTE, REG_V16, REG_R19, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_ld3r, EA_16BYTE, REG_V20, REG_R23, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_ld3r, EA_8BYTE, REG_V24, REG_R27, INS_OPTS_1D);
theEmitter->emitIns_R_R(INS_ld3r, EA_16BYTE, REG_V28, REG_SP, INS_OPTS_2D);
// ld4r {Vt, Vt2, Vt3, Vt4}, [Xn|SP]
theEmitter->emitIns_R_R(INS_ld4r, EA_8BYTE, REG_V0, REG_R4, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_ld4r, EA_16BYTE, REG_V5, REG_R9, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_ld4r, EA_8BYTE, REG_V10, REG_R14, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_ld4r, EA_16BYTE, REG_V15, REG_R19, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_ld4r, EA_8BYTE, REG_V20, REG_R24, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_ld4r, EA_16BYTE, REG_V25, REG_R29, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_ld4r, EA_8BYTE, REG_V30, REG_R2, INS_OPTS_1D);
theEmitter->emitIns_R_R(INS_ld4r, EA_16BYTE, REG_V3, REG_R7, INS_OPTS_2D);
// tbl Vd, {Vt}, Vm
theEmitter->emitIns_R_R_R(INS_tbl, EA_8BYTE, REG_V0, REG_V1, REG_V6, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_tbl, EA_16BYTE, REG_V0, REG_V1, REG_V6, INS_OPTS_16B);
// tbx Vd, {Vt}, Vm
theEmitter->emitIns_R_R_R(INS_tbx, EA_8BYTE, REG_V0, REG_V1, REG_V6, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_tbx, EA_16BYTE, REG_V0, REG_V1, REG_V6, INS_OPTS_16B);
// tbl Vd, {Vt, Vt2}, Vm
theEmitter->emitIns_R_R_R(INS_tbl_2regs, EA_8BYTE, REG_V0, REG_V1, REG_V6, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_tbl_2regs, EA_16BYTE, REG_V0, REG_V1, REG_V6, INS_OPTS_16B);
// tbx Vd, {Vt, Vt2}, Vm
theEmitter->emitIns_R_R_R(INS_tbx_2regs, EA_8BYTE, REG_V0, REG_V1, REG_V6, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_tbx_2regs, EA_16BYTE, REG_V0, REG_V1, REG_V6, INS_OPTS_16B);
// tbl Vd, {Vt, Vt2, Vt3}, Vm
theEmitter->emitIns_R_R_R(INS_tbl_3regs, EA_8BYTE, REG_V0, REG_V1, REG_V6, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_tbl_3regs, EA_16BYTE, REG_V0, REG_V1, REG_V6, INS_OPTS_16B);
// tbx Vd, {Vt, Vt2, Vt3}, Vm
theEmitter->emitIns_R_R_R(INS_tbx_3regs, EA_8BYTE, REG_V0, REG_V1, REG_V6, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_tbx_3regs, EA_16BYTE, REG_V0, REG_V1, REG_V6, INS_OPTS_16B);
// tbl Vd, {Vt, Vt2, Vt3, Vt4}, Vm
theEmitter->emitIns_R_R_R(INS_tbl_4regs, EA_8BYTE, REG_V0, REG_V1, REG_V6, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_tbl_4regs, EA_16BYTE, REG_V0, REG_V1, REG_V6, INS_OPTS_16B);
// tbx Vd, {Vt, Vt2, Vt3, Vt4}, Vm
theEmitter->emitIns_R_R_R(INS_tbx_4regs, EA_8BYTE, REG_V0, REG_V1, REG_V6, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_tbx_4regs, EA_16BYTE, REG_V0, REG_V1, REG_V6, INS_OPTS_16B);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// Loads to and Stores from one, two, three, or four SIMD&FP registers
//
genDefineTempLabel(genCreateTempLabel());
// ld1 {Vt}, [Xn|SP], Xm
theEmitter->emitIns_R_R_R(INS_ld1, EA_8BYTE, REG_V0, REG_R1, REG_R2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_ld1, EA_16BYTE, REG_V3, REG_R4, REG_R5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_ld1, EA_8BYTE, REG_V6, REG_R7, REG_R8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_ld1, EA_16BYTE, REG_V9, REG_R10, REG_R11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_ld1, EA_8BYTE, REG_V12, REG_R13, REG_R14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_ld1, EA_16BYTE, REG_V15, REG_R16, REG_R17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_ld1, EA_8BYTE, REG_V18, REG_R19, REG_R20, INS_OPTS_1D);
theEmitter->emitIns_R_R_R(INS_ld1, EA_16BYTE, REG_V21, REG_R22, REG_R23, INS_OPTS_2D);
// ld1 {Vt, Vt2}, [Xn|SP], Xm
theEmitter->emitIns_R_R_R(INS_ld1_2regs, EA_8BYTE, REG_V0, REG_R2, REG_R3, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_ld1_2regs, EA_16BYTE, REG_V4, REG_R6, REG_R7, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_ld1_2regs, EA_8BYTE, REG_V8, REG_R10, REG_R11, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_ld1_2regs, EA_16BYTE, REG_V12, REG_R14, REG_R15, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_ld1_2regs, EA_8BYTE, REG_V16, REG_R18, REG_R19, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_ld1_2regs, EA_16BYTE, REG_V20, REG_R22, REG_R23, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_ld1_2regs, EA_8BYTE, REG_V24, REG_R26, REG_R27, INS_OPTS_1D);
theEmitter->emitIns_R_R_R(INS_ld1_2regs, EA_16BYTE, REG_V28, REG_SP, REG_R30, INS_OPTS_2D);
// ld1 {Vt, Vt2, Vt3}, [Xn|SP], Xm
theEmitter->emitIns_R_R_R(INS_ld1_3regs, EA_8BYTE, REG_V0, REG_R3, REG_R4, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_ld1_3regs, EA_16BYTE, REG_V5, REG_R8, REG_R9, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_ld1_3regs, EA_8BYTE, REG_V10, REG_R13, REG_R14, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_ld1_3regs, EA_16BYTE, REG_V15, REG_R18, REG_R19, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_ld1_3regs, EA_8BYTE, REG_V20, REG_R23, REG_R24, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_ld1_3regs, EA_16BYTE, REG_V25, REG_R28, REG_R29, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_ld1_3regs, EA_8BYTE, REG_V30, REG_R0, REG_R1, INS_OPTS_1D);
theEmitter->emitIns_R_R_R(INS_ld1_3regs, EA_16BYTE, REG_V2, REG_R5, REG_R6, INS_OPTS_2D);
// ld1 {Vt, Vt2, Vt3, Vt4}, [Xn|SP], Xm
theEmitter->emitIns_R_R_R(INS_ld1_4regs, EA_8BYTE, REG_V0, REG_R4, REG_R5, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_ld1_4regs, EA_16BYTE, REG_V6, REG_R10, REG_R11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_ld1_4regs, EA_8BYTE, REG_V12, REG_R16, REG_R17, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_ld1_4regs, EA_16BYTE, REG_V18, REG_R22, REG_R23, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_ld1_4regs, EA_8BYTE, REG_V24, REG_R28, REG_R29, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_ld1_4regs, EA_16BYTE, REG_V30, REG_R2, REG_R3, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_ld1_4regs, EA_8BYTE, REG_V4, REG_R8, REG_R9, INS_OPTS_1D);
theEmitter->emitIns_R_R_R(INS_ld1_4regs, EA_16BYTE, REG_V10, REG_R14, REG_R15, INS_OPTS_2D);
// ld2 {Vt, Vt2}, [Xn|SP], Xm
theEmitter->emitIns_R_R_R(INS_ld2, EA_8BYTE, REG_V0, REG_R2, REG_R3, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_ld2, EA_16BYTE, REG_V4, REG_R6, REG_R7, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_ld2, EA_8BYTE, REG_V8, REG_R10, REG_R11, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_ld2, EA_16BYTE, REG_V12, REG_R14, REG_R15, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_ld2, EA_8BYTE, REG_V16, REG_R18, REG_R19, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_ld2, EA_16BYTE, REG_V20, REG_R22, REG_R23, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_ld2, EA_16BYTE, REG_V24, REG_R26, REG_R27, INS_OPTS_2D);
// ld3 {Vt, Vt2, Vt3}, [Xn|SP], Xm
theEmitter->emitIns_R_R_R(INS_ld3, EA_8BYTE, REG_V0, REG_R3, REG_R4, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_ld3, EA_16BYTE, REG_V5, REG_R8, REG_R9, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_ld3, EA_8BYTE, REG_V10, REG_R13, REG_R14, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_ld3, EA_16BYTE, REG_V15, REG_R18, REG_R19, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_ld3, EA_8BYTE, REG_V20, REG_R23, REG_R24, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_ld3, EA_16BYTE, REG_V25, REG_R28, REG_R29, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_ld3, EA_16BYTE, REG_V30, REG_R0, REG_R1, INS_OPTS_2D);
// ld4 {Vt, Vt2, Vt3, Vt4}, [Xn|SP], Xm
theEmitter->emitIns_R_R_R(INS_ld4, EA_8BYTE, REG_V0, REG_R4, REG_R5, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_ld4, EA_16BYTE, REG_V6, REG_R10, REG_R11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_ld4, EA_8BYTE, REG_V12, REG_R16, REG_R17, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_ld4, EA_16BYTE, REG_V18, REG_R22, REG_R23, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_ld4, EA_8BYTE, REG_V24, REG_R28, REG_R29, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_ld4, EA_16BYTE, REG_V30, REG_R2, REG_R3, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_ld4, EA_16BYTE, REG_V4, REG_R8, REG_R9, INS_OPTS_2D);
// st1 {Vt}, [Xn|SP], Xm
theEmitter->emitIns_R_R_R(INS_st1, EA_8BYTE, REG_V0, REG_R1, REG_R2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_st1, EA_16BYTE, REG_V3, REG_R4, REG_R5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_st1, EA_8BYTE, REG_V6, REG_R7, REG_R8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_st1, EA_16BYTE, REG_V9, REG_R10, REG_R11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_st1, EA_8BYTE, REG_V12, REG_R13, REG_R14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_st1, EA_16BYTE, REG_V15, REG_R16, REG_R17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_st1, EA_8BYTE, REG_V18, REG_R19, REG_R20, INS_OPTS_1D);
theEmitter->emitIns_R_R_R(INS_st1, EA_16BYTE, REG_V21, REG_R22, REG_R23, INS_OPTS_2D);
// st1 {Vt, Vt2}, [Xn|SP], Xm
theEmitter->emitIns_R_R_R(INS_st1_2regs, EA_8BYTE, REG_V0, REG_R2, REG_R3, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_st1_2regs, EA_16BYTE, REG_V4, REG_R6, REG_R7, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_st1_2regs, EA_8BYTE, REG_V8, REG_R10, REG_R11, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_st1_2regs, EA_16BYTE, REG_V12, REG_R14, REG_R15, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_st1_2regs, EA_8BYTE, REG_V16, REG_R18, REG_R19, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_st1_2regs, EA_16BYTE, REG_V20, REG_R22, REG_R23, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_st1_2regs, EA_8BYTE, REG_V24, REG_R26, REG_R27, INS_OPTS_1D);
theEmitter->emitIns_R_R_R(INS_st1_2regs, EA_16BYTE, REG_V28, REG_SP, REG_R30, INS_OPTS_2D);
// st1 {Vt, Vt2, Vt3}, [Xn|SP], Xm
theEmitter->emitIns_R_R_R(INS_st1_3regs, EA_8BYTE, REG_V0, REG_R3, REG_R4, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_st1_3regs, EA_16BYTE, REG_V5, REG_R8, REG_R9, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_st1_3regs, EA_8BYTE, REG_V10, REG_R13, REG_R14, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_st1_3regs, EA_16BYTE, REG_V15, REG_R18, REG_R19, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_st1_3regs, EA_8BYTE, REG_V20, REG_R23, REG_R24, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_st1_3regs, EA_16BYTE, REG_V25, REG_R28, REG_R29, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_st1_3regs, EA_8BYTE, REG_V30, REG_R0, REG_R1, INS_OPTS_1D);
theEmitter->emitIns_R_R_R(INS_st1_3regs, EA_16BYTE, REG_V2, REG_R5, REG_R6, INS_OPTS_2D);
// st1 {Vt, Vt2, Vt3, Vt4}, [Xn|SP], Xm
theEmitter->emitIns_R_R_R(INS_st1_4regs, EA_8BYTE, REG_V0, REG_R4, REG_R5, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_st1_4regs, EA_16BYTE, REG_V6, REG_R10, REG_R11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_st1_4regs, EA_8BYTE, REG_V12, REG_R16, REG_R17, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_st1_4regs, EA_16BYTE, REG_V18, REG_R22, REG_R23, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_st1_4regs, EA_8BYTE, REG_V24, REG_R28, REG_R29, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_st1_4regs, EA_16BYTE, REG_V30, REG_R2, REG_R3, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_st1_4regs, EA_8BYTE, REG_V4, REG_R8, REG_R9, INS_OPTS_1D);
theEmitter->emitIns_R_R_R(INS_st1_4regs, EA_16BYTE, REG_V10, REG_R14, REG_R15, INS_OPTS_2D);
// st2 {Vt, Vt2}, [Xn|SP], Xm
theEmitter->emitIns_R_R_R(INS_st2, EA_8BYTE, REG_V0, REG_R2, REG_R3, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_st2, EA_16BYTE, REG_V4, REG_R6, REG_R7, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_st2, EA_8BYTE, REG_V8, REG_R10, REG_R11, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_st2, EA_16BYTE, REG_V12, REG_R14, REG_R15, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_st2, EA_8BYTE, REG_V16, REG_R18, REG_R19, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_st2, EA_16BYTE, REG_V20, REG_R22, REG_R23, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_st2, EA_16BYTE, REG_V24, REG_R26, REG_R27, INS_OPTS_2D);
// st3 {Vt, Vt2, Vt3}, [Xn|SP], Xm
theEmitter->emitIns_R_R_R(INS_st3, EA_8BYTE, REG_V0, REG_R3, REG_R4, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_st3, EA_16BYTE, REG_V5, REG_R8, REG_R9, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_st3, EA_8BYTE, REG_V10, REG_R13, REG_R14, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_st3, EA_16BYTE, REG_V15, REG_R18, REG_R19, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_st3, EA_8BYTE, REG_V20, REG_R23, REG_R24, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_st3, EA_16BYTE, REG_V25, REG_R28, REG_R29, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_st3, EA_16BYTE, REG_V30, REG_R0, REG_R1, INS_OPTS_2D);
// st4 {Vt, Vt2, Vt3, Vt4}, [Xn|SP], Xm
theEmitter->emitIns_R_R_R(INS_st4, EA_8BYTE, REG_V0, REG_R4, REG_R5, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_st4, EA_16BYTE, REG_V6, REG_R10, REG_R11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_st4, EA_8BYTE, REG_V12, REG_R16, REG_R17, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_st4, EA_16BYTE, REG_V18, REG_R22, REG_R23, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_st4, EA_8BYTE, REG_V24, REG_R28, REG_R29, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_st4, EA_16BYTE, REG_V30, REG_R2, REG_R3, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_st4, EA_16BYTE, REG_V4, REG_R8, REG_R9, INS_OPTS_2D);
// ld1r {Vt}, [Xn|SP], Xm
theEmitter->emitIns_R_R_R(INS_ld1r, EA_8BYTE, REG_V0, REG_R1, REG_R2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_ld1r, EA_16BYTE, REG_V3, REG_R4, REG_R5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_ld1r, EA_8BYTE, REG_V6, REG_R7, REG_R8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_ld1r, EA_16BYTE, REG_V9, REG_R10, REG_R11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_ld1r, EA_8BYTE, REG_V12, REG_R13, REG_R14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_ld1r, EA_16BYTE, REG_V15, REG_R16, REG_R17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_ld1r, EA_8BYTE, REG_V18, REG_R19, REG_R20, INS_OPTS_1D);
theEmitter->emitIns_R_R_R(INS_ld1r, EA_16BYTE, REG_V21, REG_R22, REG_R23, INS_OPTS_2D);
// ld2r {Vt, Vt2}, [Xn|SP], Xm
theEmitter->emitIns_R_R_R(INS_ld2r, EA_8BYTE, REG_V0, REG_R2, REG_R3, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_ld2r, EA_16BYTE, REG_V4, REG_R6, REG_R7, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_ld2r, EA_8BYTE, REG_V8, REG_R10, REG_R11, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_ld2r, EA_16BYTE, REG_V12, REG_R14, REG_R15, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_ld2r, EA_8BYTE, REG_V16, REG_R18, REG_R19, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_ld2r, EA_16BYTE, REG_V20, REG_R22, REG_R23, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_ld2r, EA_8BYTE, REG_V24, REG_R26, REG_R27, INS_OPTS_1D);
theEmitter->emitIns_R_R_R(INS_ld2r, EA_16BYTE, REG_V28, REG_SP, REG_R30, INS_OPTS_2D);
// ld3r {Vt, Vt2, Vt3}, [Xn|SP], Xm
theEmitter->emitIns_R_R_R(INS_ld3r, EA_8BYTE, REG_V0, REG_R3, REG_R4, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_ld3r, EA_16BYTE, REG_V5, REG_R8, REG_R9, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_ld3r, EA_8BYTE, REG_V10, REG_R13, REG_R14, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_ld3r, EA_16BYTE, REG_V15, REG_R18, REG_R19, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_ld3r, EA_8BYTE, REG_V20, REG_R23, REG_R24, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_ld3r, EA_16BYTE, REG_V25, REG_R28, REG_R29, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_ld3r, EA_8BYTE, REG_V30, REG_R0, REG_R1, INS_OPTS_1D);
theEmitter->emitIns_R_R_R(INS_ld3r, EA_16BYTE, REG_V2, REG_R5, REG_R6, INS_OPTS_2D);
// ld4r {Vt, Vt2, Vt3, Vt4}, [Xn|SP], Xm
theEmitter->emitIns_R_R_R(INS_ld4r, EA_8BYTE, REG_V0, REG_R4, REG_R5, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_ld4r, EA_16BYTE, REG_V6, REG_R10, REG_R11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_ld4r, EA_8BYTE, REG_V12, REG_R16, REG_R17, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_ld4r, EA_16BYTE, REG_V18, REG_R22, REG_R23, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_ld4r, EA_8BYTE, REG_V24, REG_R28, REG_R29, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_ld4r, EA_16BYTE, REG_V30, REG_R2, REG_R3, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_ld4r, EA_8BYTE, REG_V4, REG_R8, REG_R9, INS_OPTS_1D);
theEmitter->emitIns_R_R_R(INS_ld4r, EA_16BYTE, REG_V10, REG_R14, REG_R15, INS_OPTS_2D);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// Loads to and Stores from one, two, three, or four SIMD&FP registers
//
genDefineTempLabel(genCreateTempLabel());
// ld1 {Vt}, [Xn|SP], #imm
theEmitter->emitIns_R_R_I(INS_ld1, EA_8BYTE, REG_V0, REG_R1, 8, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_ld1, EA_16BYTE, REG_V2, REG_R3, 16, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_ld1, EA_8BYTE, REG_V4, REG_R5, 8, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_ld1, EA_16BYTE, REG_V6, REG_R7, 16, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_ld1, EA_8BYTE, REG_V8, REG_R9, 8, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_ld1, EA_16BYTE, REG_V10, REG_R11, 16, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_ld1, EA_8BYTE, REG_V12, REG_R13, 8, INS_OPTS_1D);
theEmitter->emitIns_R_R_I(INS_ld1, EA_16BYTE, REG_V14, REG_R15, 16, INS_OPTS_2D);
// ld1 {Vt, Vt2}, [Xn|SP], #imm
theEmitter->emitIns_R_R_I(INS_ld1_2regs, EA_8BYTE, REG_V0, REG_R2, 16, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_ld1_2regs, EA_16BYTE, REG_V3, REG_R5, 32, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_ld1_2regs, EA_8BYTE, REG_V6, REG_R8, 16, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_ld1_2regs, EA_16BYTE, REG_V9, REG_R11, 32, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_ld1_2regs, EA_8BYTE, REG_V12, REG_R14, 16, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_ld1_2regs, EA_16BYTE, REG_V15, REG_R17, 32, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_ld1_2regs, EA_8BYTE, REG_V18, REG_R20, 16, INS_OPTS_1D);
theEmitter->emitIns_R_R_I(INS_ld1_2regs, EA_16BYTE, REG_V21, REG_R23, 32, INS_OPTS_2D);
// ld1 {Vt, Vt2, Vt3}, [Xn|SP], #imm
theEmitter->emitIns_R_R_I(INS_ld1_3regs, EA_8BYTE, REG_V0, REG_R3, 24, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_ld1_3regs, EA_16BYTE, REG_V4, REG_R7, 48, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_ld1_3regs, EA_8BYTE, REG_V8, REG_R11, 24, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_ld1_3regs, EA_16BYTE, REG_V12, REG_R15, 48, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_ld1_3regs, EA_8BYTE, REG_V16, REG_R19, 24, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_ld1_3regs, EA_16BYTE, REG_V20, REG_R23, 48, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_ld1_3regs, EA_8BYTE, REG_V24, REG_R27, 24, INS_OPTS_1D);
theEmitter->emitIns_R_R_I(INS_ld1_3regs, EA_16BYTE, REG_V28, REG_SP, 48, INS_OPTS_2D);
// ld1 {Vt, Vt2, Vt3, Vt4}, [Xn|SP], #imm
theEmitter->emitIns_R_R_I(INS_ld1_4regs, EA_8BYTE, REG_V0, REG_R4, 32, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_ld1_4regs, EA_16BYTE, REG_V5, REG_R9, 64, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_ld1_4regs, EA_8BYTE, REG_V10, REG_R14, 32, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_ld1_4regs, EA_16BYTE, REG_V15, REG_R19, 64, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_ld1_4regs, EA_8BYTE, REG_V20, REG_R24, 32, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_ld1_4regs, EA_16BYTE, REG_V25, REG_R29, 64, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_ld1_4regs, EA_8BYTE, REG_V30, REG_R2, 32, INS_OPTS_1D);
theEmitter->emitIns_R_R_I(INS_ld1_4regs, EA_16BYTE, REG_V3, REG_R7, 64, INS_OPTS_2D);
// ld2 {Vt, Vt2}, [Xn|SP], #imm
theEmitter->emitIns_R_R_I(INS_ld2, EA_8BYTE, REG_V0, REG_R2, 16, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_ld2, EA_16BYTE, REG_V3, REG_R5, 32, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_ld2, EA_8BYTE, REG_V6, REG_R8, 16, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_ld2, EA_16BYTE, REG_V9, REG_R11, 32, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_ld2, EA_8BYTE, REG_V12, REG_R14, 16, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_ld2, EA_16BYTE, REG_V15, REG_R17, 32, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_ld2, EA_16BYTE, REG_V18, REG_R20, 32, INS_OPTS_2D);
// ld3 {Vt, Vt2, Vt3}, [Xn|SP], #imm
theEmitter->emitIns_R_R_I(INS_ld3, EA_8BYTE, REG_V0, REG_R3, 24, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_ld3, EA_16BYTE, REG_V4, REG_R7, 48, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_ld3, EA_8BYTE, REG_V8, REG_R11, 24, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_ld3, EA_16BYTE, REG_V12, REG_R15, 48, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_ld3, EA_8BYTE, REG_V16, REG_R19, 24, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_ld3, EA_16BYTE, REG_V20, REG_R23, 48, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_ld3, EA_16BYTE, REG_V24, REG_R27, 48, INS_OPTS_2D);
// ld4 {Vt, Vt2, Vt3, Vt4}, [Xn|SP], #imm
theEmitter->emitIns_R_R_I(INS_ld4, EA_8BYTE, REG_V0, REG_R4, 32, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_ld4, EA_16BYTE, REG_V5, REG_R9, 64, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_ld4, EA_8BYTE, REG_V10, REG_R14, 32, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_ld4, EA_16BYTE, REG_V15, REG_R19, 64, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_ld4, EA_8BYTE, REG_V20, REG_R24, 32, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_ld4, EA_16BYTE, REG_V25, REG_R29, 64, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_ld4, EA_16BYTE, REG_V30, REG_R2, 64, INS_OPTS_2D);
// st1 {Vt}, [Xn|SP], #imm
theEmitter->emitIns_R_R_I(INS_st1, EA_8BYTE, REG_V0, REG_R1, 8, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_st1, EA_16BYTE, REG_V2, REG_R3, 16, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_st1, EA_8BYTE, REG_V4, REG_R5, 8, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_st1, EA_16BYTE, REG_V6, REG_R7, 16, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_st1, EA_8BYTE, REG_V8, REG_R9, 8, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_st1, EA_16BYTE, REG_V10, REG_R11, 16, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_st1, EA_8BYTE, REG_V12, REG_R13, 8, INS_OPTS_1D);
theEmitter->emitIns_R_R_I(INS_st1, EA_16BYTE, REG_V14, REG_R15, 16, INS_OPTS_2D);
// st1 {Vt, Vt2}, [Xn|SP], #imm
theEmitter->emitIns_R_R_I(INS_st1_2regs, EA_8BYTE, REG_V0, REG_R2, 16, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_st1_2regs, EA_16BYTE, REG_V3, REG_R5, 32, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_st1_2regs, EA_8BYTE, REG_V6, REG_R8, 16, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_st1_2regs, EA_16BYTE, REG_V9, REG_R11, 32, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_st1_2regs, EA_8BYTE, REG_V12, REG_R14, 16, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_st1_2regs, EA_16BYTE, REG_V15, REG_R17, 32, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_st1_2regs, EA_8BYTE, REG_V18, REG_R20, 16, INS_OPTS_1D);
theEmitter->emitIns_R_R_I(INS_st1_2regs, EA_16BYTE, REG_V21, REG_R23, 32, INS_OPTS_2D);
// st1 {Vt, Vt2, Vt3}, [Xn|SP], #imm
theEmitter->emitIns_R_R_I(INS_st1_3regs, EA_8BYTE, REG_V0, REG_R3, 24, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_st1_3regs, EA_16BYTE, REG_V4, REG_R7, 48, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_st1_3regs, EA_8BYTE, REG_V8, REG_R11, 24, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_st1_3regs, EA_16BYTE, REG_V12, REG_R15, 48, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_st1_3regs, EA_8BYTE, REG_V16, REG_R19, 24, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_st1_3regs, EA_16BYTE, REG_V20, REG_R23, 48, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_st1_3regs, EA_8BYTE, REG_V24, REG_R27, 24, INS_OPTS_1D);
theEmitter->emitIns_R_R_I(INS_st1_3regs, EA_16BYTE, REG_V28, REG_SP, 48, INS_OPTS_2D);
// st1 {Vt, Vt2, Vt3, Vt4}, [Xn|SP], #imm
theEmitter->emitIns_R_R_I(INS_st1_4regs, EA_8BYTE, REG_V0, REG_R4, 32, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_st1_4regs, EA_16BYTE, REG_V5, REG_R9, 64, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_st1_4regs, EA_8BYTE, REG_V10, REG_R14, 32, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_st1_4regs, EA_16BYTE, REG_V15, REG_R19, 64, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_st1_4regs, EA_8BYTE, REG_V20, REG_R24, 32, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_st1_4regs, EA_16BYTE, REG_V25, REG_R29, 64, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_st1_4regs, EA_8BYTE, REG_V30, REG_R2, 32, INS_OPTS_1D);
theEmitter->emitIns_R_R_I(INS_st1_4regs, EA_16BYTE, REG_V3, REG_R7, 64, INS_OPTS_2D);
// st2 {Vt, Vt2}, [Xn|SP], #imm
theEmitter->emitIns_R_R_I(INS_st2, EA_8BYTE, REG_V0, REG_R2, 16, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_st2, EA_16BYTE, REG_V3, REG_R5, 32, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_st2, EA_8BYTE, REG_V6, REG_R8, 16, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_st2, EA_16BYTE, REG_V9, REG_R11, 32, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_st2, EA_8BYTE, REG_V12, REG_R14, 16, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_st2, EA_16BYTE, REG_V15, REG_R17, 32, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_st2, EA_16BYTE, REG_V18, REG_R20, 32, INS_OPTS_2D);
// st3 {Vt, Vt2, Vt3}, [Xn|SP], #imm
theEmitter->emitIns_R_R_I(INS_st3, EA_8BYTE, REG_V0, REG_R3, 24, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_st3, EA_16BYTE, REG_V4, REG_R7, 48, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_st3, EA_8BYTE, REG_V8, REG_R11, 24, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_st3, EA_16BYTE, REG_V12, REG_R15, 48, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_st3, EA_8BYTE, REG_V16, REG_R19, 24, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_st3, EA_16BYTE, REG_V20, REG_R23, 48, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_st3, EA_16BYTE, REG_V24, REG_R27, 48, INS_OPTS_2D);
// st4 {Vt, Vt2, Vt3, Vt4}, [Xn|SP], #imm
theEmitter->emitIns_R_R_I(INS_st4, EA_8BYTE, REG_V0, REG_R4, 32, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_st4, EA_16BYTE, REG_V5, REG_R9, 64, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_st4, EA_8BYTE, REG_V10, REG_R14, 32, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_st4, EA_16BYTE, REG_V15, REG_R19, 64, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_st4, EA_8BYTE, REG_V20, REG_R24, 32, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_st4, EA_16BYTE, REG_V25, REG_R29, 64, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_st4, EA_16BYTE, REG_V30, REG_R2, 64, INS_OPTS_2D);
// ld1r {Vt}, [Xn|SP], #imm
theEmitter->emitIns_R_R_I(INS_ld1r, EA_8BYTE, REG_V0, REG_R1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_ld1r, EA_16BYTE, REG_V2, REG_R3, 1, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_ld1r, EA_8BYTE, REG_V4, REG_R5, 2, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_ld1r, EA_16BYTE, REG_V6, REG_R7, 2, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_ld1r, EA_8BYTE, REG_V8, REG_R9, 4, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_ld1r, EA_16BYTE, REG_V10, REG_R11, 4, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_ld1r, EA_8BYTE, REG_V12, REG_R13, 8, INS_OPTS_1D);
theEmitter->emitIns_R_R_I(INS_ld1r, EA_16BYTE, REG_V14, REG_R15, 8, INS_OPTS_2D);
// ld2r {Vt, Vt2}, [Xn|SP], #imm
theEmitter->emitIns_R_R_I(INS_ld2r, EA_8BYTE, REG_V0, REG_R2, 2, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_ld2r, EA_16BYTE, REG_V3, REG_R5, 2, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_ld2r, EA_8BYTE, REG_V6, REG_R8, 4, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_ld2r, EA_16BYTE, REG_V9, REG_R11, 4, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_ld2r, EA_8BYTE, REG_V12, REG_R14, 8, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_ld2r, EA_16BYTE, REG_V15, REG_R17, 8, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_ld2r, EA_8BYTE, REG_V18, REG_R20, 16, INS_OPTS_1D);
theEmitter->emitIns_R_R_I(INS_ld2r, EA_16BYTE, REG_V21, REG_R23, 16, INS_OPTS_2D);
// ld3r {Vt, Vt2, Vt3}, [Xn|SP], #imm
theEmitter->emitIns_R_R_I(INS_ld3r, EA_8BYTE, REG_V0, REG_R3, 3, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_ld3r, EA_16BYTE, REG_V4, REG_R7, 3, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_ld3r, EA_8BYTE, REG_V8, REG_R11, 6, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_ld3r, EA_16BYTE, REG_V12, REG_R15, 6, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_ld3r, EA_8BYTE, REG_V16, REG_R19, 12, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_ld3r, EA_16BYTE, REG_V20, REG_R23, 12, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_ld3r, EA_8BYTE, REG_V24, REG_R27, 24, INS_OPTS_1D);
theEmitter->emitIns_R_R_I(INS_ld3r, EA_16BYTE, REG_V28, REG_SP, 24, INS_OPTS_2D);
// ld4r {Vt, Vt2, Vt3, Vt4}, [Xn|SP], #imm
theEmitter->emitIns_R_R_I(INS_ld4r, EA_8BYTE, REG_V0, REG_R4, 4, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_ld4r, EA_16BYTE, REG_V5, REG_R9, 4, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_ld4r, EA_8BYTE, REG_V10, REG_R14, 8, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_ld4r, EA_16BYTE, REG_V15, REG_R19, 8, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_ld4r, EA_8BYTE, REG_V20, REG_R24, 16, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_ld4r, EA_16BYTE, REG_V25, REG_R29, 16, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_ld4r, EA_8BYTE, REG_V30, REG_R2, 32, INS_OPTS_1D);
theEmitter->emitIns_R_R_I(INS_ld4r, EA_16BYTE, REG_V3, REG_R7, 32, INS_OPTS_2D);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// Loads to and Stores from one, two, three, or four SIMD&FP registers
//
genDefineTempLabel(genCreateTempLabel());
// ld1 {Vt}[#index], [Xn|SP]
theEmitter->emitIns_R_R_I(INS_ld1, EA_1BYTE, REG_V0, REG_R1, 3);
theEmitter->emitIns_R_R_I(INS_ld1, EA_2BYTE, REG_V2, REG_R3, 2);
theEmitter->emitIns_R_R_I(INS_ld1, EA_4BYTE, REG_V4, REG_R5, 1);
theEmitter->emitIns_R_R_I(INS_ld1, EA_8BYTE, REG_V6, REG_R7, 0);
// ld2 {Vt, Vt2}[#index], [Xn|SP]
theEmitter->emitIns_R_R_I(INS_ld2, EA_1BYTE, REG_V0, REG_R2, 4);
theEmitter->emitIns_R_R_I(INS_ld2, EA_2BYTE, REG_V3, REG_R5, 3);
theEmitter->emitIns_R_R_I(INS_ld2, EA_4BYTE, REG_V6, REG_R8, 2);
theEmitter->emitIns_R_R_I(INS_ld2, EA_8BYTE, REG_V9, REG_R11, 1);
// ld3 {Vt, Vt2, Vt3}[#index], [Xn|SP]
theEmitter->emitIns_R_R_I(INS_ld3, EA_1BYTE, REG_V0, REG_R3, 5);
theEmitter->emitIns_R_R_I(INS_ld3, EA_2BYTE, REG_V4, REG_R7, 4);
theEmitter->emitIns_R_R_I(INS_ld3, EA_4BYTE, REG_V8, REG_R11, 3);
theEmitter->emitIns_R_R_I(INS_ld3, EA_8BYTE, REG_V12, REG_R15, 0);
// ld4 {Vt, Vt2, Vt3, Vt4}[#index], [Xn|SP]
theEmitter->emitIns_R_R_I(INS_ld4, EA_1BYTE, REG_V0, REG_R4, 6);
theEmitter->emitIns_R_R_I(INS_ld4, EA_2BYTE, REG_V5, REG_R9, 5);
theEmitter->emitIns_R_R_I(INS_ld4, EA_4BYTE, REG_V10, REG_R14, 0);
theEmitter->emitIns_R_R_I(INS_ld4, EA_8BYTE, REG_V15, REG_R19, 1);
// st1 {Vt}[#index], [Xn|SP]
theEmitter->emitIns_R_R_I(INS_st1, EA_1BYTE, REG_V0, REG_R1, 7);
theEmitter->emitIns_R_R_I(INS_st1, EA_2BYTE, REG_V2, REG_R3, 6);
theEmitter->emitIns_R_R_I(INS_st1, EA_4BYTE, REG_V4, REG_R5, 1);
theEmitter->emitIns_R_R_I(INS_st1, EA_8BYTE, REG_V6, REG_R7, 0);
// st2 {Vt, Vt2}[#index], [Xn|SP]
theEmitter->emitIns_R_R_I(INS_st2, EA_1BYTE, REG_V0, REG_R2, 8);
theEmitter->emitIns_R_R_I(INS_st2, EA_2BYTE, REG_V3, REG_R5, 7);
theEmitter->emitIns_R_R_I(INS_st2, EA_4BYTE, REG_V6, REG_R8, 2);
theEmitter->emitIns_R_R_I(INS_st2, EA_8BYTE, REG_V9, REG_R11, 1);
// st3 {Vt, Vt2, Vt3}[#index], [Xn|SP]
theEmitter->emitIns_R_R_I(INS_st3, EA_1BYTE, REG_V0, REG_R3, 9);
theEmitter->emitIns_R_R_I(INS_st3, EA_2BYTE, REG_V4, REG_R7, 0);
theEmitter->emitIns_R_R_I(INS_st3, EA_4BYTE, REG_V8, REG_R11, 3);
theEmitter->emitIns_R_R_I(INS_st3, EA_8BYTE, REG_V12, REG_R15, 0);
// st4 {Vt, Vt2, Vt3, Vt4}[#index], [Xn|SP]
theEmitter->emitIns_R_R_I(INS_st4, EA_1BYTE, REG_V0, REG_R4, 10);
theEmitter->emitIns_R_R_I(INS_st4, EA_2BYTE, REG_V5, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_st4, EA_4BYTE, REG_V10, REG_R14, 0);
theEmitter->emitIns_R_R_I(INS_st4, EA_8BYTE, REG_V15, REG_R19, 1);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// Loads to and Stores from one, two, three, or four SIMD&FP registers
//
genDefineTempLabel(genCreateTempLabel());
// ld1 {Vt}[#index], [Xn|SP], Xm
theEmitter->emitIns_R_R_R_I(INS_ld1, EA_1BYTE, REG_V0, REG_R1, REG_R2, 3, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_ld1, EA_2BYTE, REG_V3, REG_R4, REG_R5, 2, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_ld1, EA_4BYTE, REG_V6, REG_R7, REG_R8, 1, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_ld1, EA_8BYTE, REG_V9, REG_R10, REG_R11, 0, INS_OPTS_POST_INDEX);
// ld2 {Vt, Vt2}[#index], [Xn|SP], Xm
theEmitter->emitIns_R_R_R_I(INS_ld2, EA_1BYTE, REG_V0, REG_R2, REG_R3, 4, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_ld2, EA_2BYTE, REG_V4, REG_R6, REG_R7, 3, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_ld2, EA_4BYTE, REG_V8, REG_R10, REG_R11, 2, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_ld2, EA_8BYTE, REG_V12, REG_R14, REG_R15, 1, INS_OPTS_POST_INDEX);
// ld3 {Vt, Vt2, Vt3}[#index], [Xn|SP], Xm
theEmitter->emitIns_R_R_R_I(INS_ld3, EA_1BYTE, REG_V0, REG_R3, REG_R4, 5, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_ld3, EA_2BYTE, REG_V5, REG_R8, REG_R9, 4, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_ld3, EA_4BYTE, REG_V10, REG_R13, REG_R14, 3, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_ld3, EA_8BYTE, REG_V15, REG_R18, REG_R19, 0, INS_OPTS_POST_INDEX);
// ld4 {Vt, Vt2, Vt3, Vt4}[#index], [Xn|SP], Xm
theEmitter->emitIns_R_R_R_I(INS_ld4, EA_1BYTE, REG_V0, REG_R4, REG_R5, 6, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_ld4, EA_2BYTE, REG_V6, REG_R10, REG_R11, 5, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_ld4, EA_4BYTE, REG_V12, REG_R16, REG_R17, 0, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_ld4, EA_8BYTE, REG_V18, REG_R22, REG_R23, 1, INS_OPTS_POST_INDEX);
// st1 {Vt}[#index], [Xn|SP], Xm
theEmitter->emitIns_R_R_R_I(INS_st1, EA_1BYTE, REG_V0, REG_R1, REG_R2, 7, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_st1, EA_2BYTE, REG_V3, REG_R4, REG_R5, 6, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_st1, EA_4BYTE, REG_V6, REG_R7, REG_R8, 1, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_st1, EA_8BYTE, REG_V9, REG_R10, REG_R11, 0, INS_OPTS_POST_INDEX);
// st2 {Vt, Vt2}[#index], [Xn|SP], Xm
theEmitter->emitIns_R_R_R_I(INS_st2, EA_1BYTE, REG_V0, REG_R2, REG_R3, 8, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_st2, EA_2BYTE, REG_V4, REG_R6, REG_R7, 7, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_st2, EA_4BYTE, REG_V8, REG_R10, REG_R11, 2, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_st2, EA_8BYTE, REG_V12, REG_R14, REG_R15, 1, INS_OPTS_POST_INDEX);
// st3 {Vt, Vt2, Vt3}[#index], [Xn|SP], Xm
theEmitter->emitIns_R_R_R_I(INS_st3, EA_1BYTE, REG_V0, REG_R3, REG_R4, 9, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_st3, EA_2BYTE, REG_V5, REG_R8, REG_R9, 0, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_st3, EA_4BYTE, REG_V10, REG_R13, REG_R14, 3, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_st3, EA_8BYTE, REG_V15, REG_R18, REG_R19, 0, INS_OPTS_POST_INDEX);
// st4 {Vt, Vt2, Vt3, Vt4}[#index], [Xn|SP], Xm
theEmitter->emitIns_R_R_R_I(INS_st4, EA_1BYTE, REG_V0, REG_R4, REG_R5, 10, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_st4, EA_2BYTE, REG_V6, REG_R10, REG_R11, 1, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_st4, EA_4BYTE, REG_V12, REG_R16, REG_R17, 0, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_st4, EA_8BYTE, REG_V18, REG_R22, REG_R23, 1, INS_OPTS_POST_INDEX);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// Loads to and Stores from one, two, three, or four SIMD&FP registers
//
genDefineTempLabel(genCreateTempLabel());
// ld1 {Vt}[#index], [Xn|SP], #imm
theEmitter->emitIns_R_R_I_I(INS_ld1, EA_1BYTE, REG_V0, REG_R1, 3, 1, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_ld1, EA_2BYTE, REG_V2, REG_R3, 2, 2, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_ld1, EA_4BYTE, REG_V4, REG_R5, 1, 4, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_ld1, EA_8BYTE, REG_V6, REG_R7, 0, 8, INS_OPTS_POST_INDEX);
// ld2 {Vt, Vt2}[#index], [Xn|SP], #imm
theEmitter->emitIns_R_R_I_I(INS_ld2, EA_1BYTE, REG_V0, REG_R2, 4, 2, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_ld2, EA_2BYTE, REG_V3, REG_R5, 3, 4, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_ld2, EA_4BYTE, REG_V6, REG_R8, 2, 8, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_ld2, EA_8BYTE, REG_V9, REG_R11, 1, 16, INS_OPTS_POST_INDEX);
// ld3 {Vt, Vt2, Vt3}[#index], [Xn|SP], #imm
theEmitter->emitIns_R_R_I_I(INS_ld3, EA_1BYTE, REG_V0, REG_R3, 5, 3, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_ld3, EA_2BYTE, REG_V4, REG_R7, 4, 6, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_ld3, EA_4BYTE, REG_V8, REG_R11, 3, 12, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_ld3, EA_8BYTE, REG_V12, REG_R15, 0, 24, INS_OPTS_POST_INDEX);
// ld4 {Vt, Vt2, Vt3, Vt4}[#index], [Xn|SP], #imm
theEmitter->emitIns_R_R_I_I(INS_ld4, EA_1BYTE, REG_V0, REG_R4, 6, 4, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_ld4, EA_2BYTE, REG_V5, REG_R9, 5, 8, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_ld4, EA_4BYTE, REG_V10, REG_R14, 0, 16, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_ld4, EA_8BYTE, REG_V15, REG_R19, 1, 32, INS_OPTS_POST_INDEX);
// st1 {Vt}[#index], [Xn|SP], #imm
theEmitter->emitIns_R_R_I_I(INS_st1, EA_1BYTE, REG_V0, REG_R1, 3, 1, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_st1, EA_2BYTE, REG_V2, REG_R3, 2, 2, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_st1, EA_4BYTE, REG_V4, REG_R5, 1, 4, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_st1, EA_8BYTE, REG_V6, REG_R7, 0, 8, INS_OPTS_POST_INDEX);
// st2 {Vt, Vt2}[#index], [Xn|SP], #imm
theEmitter->emitIns_R_R_I_I(INS_st2, EA_1BYTE, REG_V0, REG_R2, 4, 2, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_st2, EA_2BYTE, REG_V3, REG_R5, 3, 4, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_st2, EA_4BYTE, REG_V6, REG_R8, 2, 8, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_st2, EA_8BYTE, REG_V9, REG_R11, 1, 16, INS_OPTS_POST_INDEX);
// st3 {Vt, Vt2, Vt3}[#index], [Xn|SP], #imm
theEmitter->emitIns_R_R_I_I(INS_st3, EA_1BYTE, REG_V0, REG_R3, 5, 3, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_st3, EA_2BYTE, REG_V4, REG_R7, 4, 6, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_st3, EA_4BYTE, REG_V8, REG_R11, 3, 12, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_st3, EA_8BYTE, REG_V12, REG_R15, 0, 24, INS_OPTS_POST_INDEX);
// st4 {Vt, Vt2, Vt3, Vt4}[#index], [Xn|SP], #imm
theEmitter->emitIns_R_R_I_I(INS_st4, EA_1BYTE, REG_V0, REG_R4, 6, 4, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_st4, EA_2BYTE, REG_V5, REG_R9, 5, 8, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_st4, EA_4BYTE, REG_V10, REG_R14, 0, 16, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I_I(INS_st4, EA_8BYTE, REG_V15, REG_R19, 1, 32, INS_OPTS_POST_INDEX);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// Compares
//
genDefineTempLabel(genCreateTempLabel());
// cmp reg, reg
theEmitter->emitIns_R_R(INS_cmp, EA_8BYTE, REG_R8, REG_R9);
theEmitter->emitIns_R_R(INS_cmn, EA_8BYTE, REG_R8, REG_R9);
// cmp reg, imm
theEmitter->emitIns_R_I(INS_cmp, EA_8BYTE, REG_R8, 0);
theEmitter->emitIns_R_I(INS_cmp, EA_8BYTE, REG_R8, 4095);
theEmitter->emitIns_R_I(INS_cmp, EA_8BYTE, REG_R8, 1 << 12);
theEmitter->emitIns_R_I(INS_cmp, EA_8BYTE, REG_R8, 4095 << 12);
theEmitter->emitIns_R_I(INS_cmn, EA_8BYTE, REG_R8, 0);
theEmitter->emitIns_R_I(INS_cmn, EA_8BYTE, REG_R8, 4095);
theEmitter->emitIns_R_I(INS_cmn, EA_8BYTE, REG_R8, 1 << 12);
theEmitter->emitIns_R_I(INS_cmn, EA_8BYTE, REG_R8, 4095 << 12);
theEmitter->emitIns_R_I(INS_cmp, EA_8BYTE, REG_R8, -1);
theEmitter->emitIns_R_I(INS_cmp, EA_8BYTE, REG_R8, -0xfff);
theEmitter->emitIns_R_I(INS_cmp, EA_8BYTE, REG_R8, 0xfffffffffffff000LL);
theEmitter->emitIns_R_I(INS_cmp, EA_8BYTE, REG_R8, 0xffffffffff800000LL);
theEmitter->emitIns_R_I(INS_cmn, EA_8BYTE, REG_R8, -1);
theEmitter->emitIns_R_I(INS_cmn, EA_8BYTE, REG_R8, -0xfff);
theEmitter->emitIns_R_I(INS_cmn, EA_8BYTE, REG_R8, 0xfffffffffffff000LL);
theEmitter->emitIns_R_I(INS_cmn, EA_8BYTE, REG_R8, 0xffffffffff800000LL);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
// R_R
//
genDefineTempLabel(genCreateTempLabel());
theEmitter->emitIns_R_R(INS_cls, EA_8BYTE, REG_R1, REG_R12);
theEmitter->emitIns_R_R(INS_clz, EA_8BYTE, REG_R2, REG_R13);
theEmitter->emitIns_R_R(INS_rbit, EA_8BYTE, REG_R3, REG_R14);
theEmitter->emitIns_R_R(INS_rev, EA_8BYTE, REG_R4, REG_R15);
theEmitter->emitIns_R_R(INS_rev16, EA_8BYTE, REG_R5, REG_R0);
theEmitter->emitIns_R_R(INS_rev32, EA_8BYTE, REG_R6, REG_R1);
theEmitter->emitIns_R_R(INS_cls, EA_4BYTE, REG_R7, REG_R2);
theEmitter->emitIns_R_R(INS_clz, EA_4BYTE, REG_R8, REG_R3);
theEmitter->emitIns_R_R(INS_rbit, EA_4BYTE, REG_R9, REG_R4);
theEmitter->emitIns_R_R(INS_rev, EA_4BYTE, REG_R10, REG_R5);
theEmitter->emitIns_R_R(INS_rev16, EA_4BYTE, REG_R11, REG_R6);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_I
//
genDefineTempLabel(genCreateTempLabel());
// mov reg, imm(i16,hw)
theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0x0000000000001234);
theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0x0000000043210000);
theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0x0000567800000000);
theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0x8765000000000000);
theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0xFFFFFFFFFFFF1234);
theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0xFFFFFFFF4321FFFF);
theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0xFFFF5678FFFFFFFF);
theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0x8765FFFFFFFFFFFF);
theEmitter->emitIns_R_I(INS_mov, EA_4BYTE, REG_R8, 0x00001234);
theEmitter->emitIns_R_I(INS_mov, EA_4BYTE, REG_R8, 0x87650000);
theEmitter->emitIns_R_I(INS_mov, EA_4BYTE, REG_R8, 0xFFFF1234);
theEmitter->emitIns_R_I(INS_mov, EA_4BYTE, REG_R8, 0x4567FFFF);
// mov reg, imm(N,r,s)
theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0x00FFFFF000000000);
theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0x6666666666666666);
theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_SP, 0x7FFF00007FFF0000);
theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0x5555555555555555);
theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0xE003E003E003E003);
theEmitter->emitIns_R_I(INS_mov, EA_8BYTE, REG_R8, 0x0707070707070707);
theEmitter->emitIns_R_I(INS_mov, EA_4BYTE, REG_R8, 0x00FFFFF0);
theEmitter->emitIns_R_I(INS_mov, EA_4BYTE, REG_R8, 0x66666666);
theEmitter->emitIns_R_I(INS_mov, EA_4BYTE, REG_R8, 0x03FFC000);
theEmitter->emitIns_R_I(INS_mov, EA_4BYTE, REG_R8, 0x55555555);
theEmitter->emitIns_R_I(INS_mov, EA_4BYTE, REG_R8, 0xE003E003);
theEmitter->emitIns_R_I(INS_mov, EA_4BYTE, REG_R8, 0x07070707);
theEmitter->emitIns_R_I(INS_tst, EA_8BYTE, REG_R8, 0xE003E003E003E003);
theEmitter->emitIns_R_I(INS_tst, EA_8BYTE, REG_R8, 0x00FFFFF000000000);
theEmitter->emitIns_R_I(INS_tst, EA_8BYTE, REG_R8, 0x6666666666666666);
theEmitter->emitIns_R_I(INS_tst, EA_8BYTE, REG_R8, 0x0707070707070707);
theEmitter->emitIns_R_I(INS_tst, EA_8BYTE, REG_R8, 0x7FFF00007FFF0000);
theEmitter->emitIns_R_I(INS_tst, EA_8BYTE, REG_R8, 0x5555555555555555);
theEmitter->emitIns_R_I(INS_tst, EA_4BYTE, REG_R8, 0xE003E003);
theEmitter->emitIns_R_I(INS_tst, EA_4BYTE, REG_R8, 0x00FFFFF0);
theEmitter->emitIns_R_I(INS_tst, EA_4BYTE, REG_R8, 0x66666666);
theEmitter->emitIns_R_I(INS_tst, EA_4BYTE, REG_R8, 0x07070707);
theEmitter->emitIns_R_I(INS_tst, EA_4BYTE, REG_R8, 0xFFF00000);
theEmitter->emitIns_R_I(INS_tst, EA_4BYTE, REG_R8, 0x55555555);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_R
//
genDefineTempLabel(genCreateTempLabel());
// tst reg, reg
theEmitter->emitIns_R_R(INS_tst, EA_8BYTE, REG_R7, REG_R10);
// mov reg, reg
theEmitter->emitIns_Mov(INS_mov, EA_8BYTE, REG_R7, REG_R10, /* canSkip */ false);
theEmitter->emitIns_Mov(INS_mov, EA_8BYTE, REG_R8, REG_SP, /* canSkip */ false);
theEmitter->emitIns_Mov(INS_mov, EA_8BYTE, REG_SP, REG_R9, /* canSkip */ false);
theEmitter->emitIns_R_R(INS_mvn, EA_8BYTE, REG_R5, REG_R11);
theEmitter->emitIns_R_R(INS_neg, EA_8BYTE, REG_R4, REG_R12);
theEmitter->emitIns_R_R(INS_negs, EA_8BYTE, REG_R3, REG_R13);
theEmitter->emitIns_Mov(INS_mov, EA_4BYTE, REG_R7, REG_R10, /* canSkip */ false);
theEmitter->emitIns_R_R(INS_mvn, EA_4BYTE, REG_R5, REG_R11);
theEmitter->emitIns_R_R(INS_neg, EA_4BYTE, REG_R4, REG_R12);
theEmitter->emitIns_R_R(INS_negs, EA_4BYTE, REG_R3, REG_R13);
theEmitter->emitIns_Mov(INS_sxtb, EA_8BYTE, REG_R7, REG_R10, /* canSkip */ false);
theEmitter->emitIns_Mov(INS_sxth, EA_8BYTE, REG_R5, REG_R11, /* canSkip */ false);
theEmitter->emitIns_Mov(INS_sxtw, EA_8BYTE, REG_R4, REG_R12, /* canSkip */ false);
theEmitter->emitIns_Mov(INS_uxtb, EA_8BYTE, REG_R3, REG_R13, /* canSkip */ false); // map to Wt
theEmitter->emitIns_Mov(INS_uxth, EA_8BYTE, REG_R2, REG_R14, /* canSkip */ false); // map to Wt
theEmitter->emitIns_Mov(INS_sxtb, EA_4BYTE, REG_R7, REG_R10, /* canSkip */ false);
theEmitter->emitIns_Mov(INS_sxth, EA_4BYTE, REG_R5, REG_R11, /* canSkip */ false);
theEmitter->emitIns_Mov(INS_uxtb, EA_4BYTE, REG_R3, REG_R13, /* canSkip */ false);
theEmitter->emitIns_Mov(INS_uxth, EA_4BYTE, REG_R2, REG_R14, /* canSkip */ false);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_I_I
//
genDefineTempLabel(genCreateTempLabel());
// mov reg, imm(i16,hw)
theEmitter->emitIns_R_I_I(INS_mov, EA_8BYTE, REG_R8, 0x1234, 0, INS_OPTS_LSL);
theEmitter->emitIns_R_I_I(INS_mov, EA_8BYTE, REG_R8, 0x4321, 16, INS_OPTS_LSL);
theEmitter->emitIns_R_I_I(INS_movk, EA_8BYTE, REG_R8, 0x4321, 16, INS_OPTS_LSL);
theEmitter->emitIns_R_I_I(INS_movn, EA_8BYTE, REG_R8, 0x5678, 32, INS_OPTS_LSL);
theEmitter->emitIns_R_I_I(INS_movz, EA_8BYTE, REG_R8, 0x8765, 48, INS_OPTS_LSL);
theEmitter->emitIns_R_I_I(INS_movk, EA_4BYTE, REG_R8, 0x4321, 16, INS_OPTS_LSL);
theEmitter->emitIns_R_I_I(INS_movn, EA_4BYTE, REG_R8, 0x5678, 16, INS_OPTS_LSL);
theEmitter->emitIns_R_I_I(INS_movz, EA_4BYTE, REG_R8, 0x8765, 16, INS_OPTS_LSL);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_R_I
//
genDefineTempLabel(genCreateTempLabel());
theEmitter->emitIns_R_R_I(INS_lsl, EA_8BYTE, REG_R0, REG_R0, 1);
theEmitter->emitIns_R_R_I(INS_lsl, EA_4BYTE, REG_R9, REG_R3, 18);
theEmitter->emitIns_R_R_I(INS_lsr, EA_8BYTE, REG_R7, REG_R0, 37);
theEmitter->emitIns_R_R_I(INS_lsr, EA_4BYTE, REG_R0, REG_R1, 2);
theEmitter->emitIns_R_R_I(INS_asr, EA_8BYTE, REG_R2, REG_R3, 53);
theEmitter->emitIns_R_R_I(INS_asr, EA_4BYTE, REG_R9, REG_R3, 18);
theEmitter->emitIns_R_R_I(INS_and, EA_8BYTE, REG_R2, REG_R3, 0x5555555555555555);
theEmitter->emitIns_R_R_I(INS_ands, EA_8BYTE, REG_R1, REG_R5, 0x6666666666666666);
theEmitter->emitIns_R_R_I(INS_eor, EA_8BYTE, REG_R8, REG_R9, 0x0707070707070707);
theEmitter->emitIns_R_R_I(INS_orr, EA_8BYTE, REG_SP, REG_R3, 0xFFFC000000000000);
theEmitter->emitIns_R_R_I(INS_ands, EA_4BYTE, REG_R8, REG_R9, 0xE003E003);
theEmitter->emitIns_R_R_I(INS_ror, EA_8BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_ror, EA_8BYTE, REG_R8, REG_R9, 31);
theEmitter->emitIns_R_R_I(INS_ror, EA_8BYTE, REG_R8, REG_R9, 32);
theEmitter->emitIns_R_R_I(INS_ror, EA_8BYTE, REG_R8, REG_R9, 63);
theEmitter->emitIns_R_R_I(INS_ror, EA_4BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_ror, EA_4BYTE, REG_R8, REG_R9, 31);
theEmitter->emitIns_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, 0); // == mov
theEmitter->emitIns_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, -1);
theEmitter->emitIns_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, 0xfff);
theEmitter->emitIns_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, -0xfff);
theEmitter->emitIns_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, 0x1000);
theEmitter->emitIns_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, 0xfff000);
theEmitter->emitIns_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, 0xfffffffffffff000LL);
theEmitter->emitIns_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, 0xffffffffff800000LL);
theEmitter->emitIns_R_R_I(INS_add, EA_4BYTE, REG_R8, REG_R9, 0); // == mov
theEmitter->emitIns_R_R_I(INS_add, EA_4BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_add, EA_4BYTE, REG_R8, REG_R9, -1);
theEmitter->emitIns_R_R_I(INS_add, EA_4BYTE, REG_R8, REG_R9, 0xfff);
theEmitter->emitIns_R_R_I(INS_add, EA_4BYTE, REG_R8, REG_R9, -0xfff);
theEmitter->emitIns_R_R_I(INS_add, EA_4BYTE, REG_R8, REG_R9, 0x1000);
theEmitter->emitIns_R_R_I(INS_add, EA_4BYTE, REG_R8, REG_R9, 0xfff000);
theEmitter->emitIns_R_R_I(INS_add, EA_4BYTE, REG_R8, REG_R9, 0xfffffffffffff000LL);
theEmitter->emitIns_R_R_I(INS_add, EA_4BYTE, REG_R8, REG_R9, 0xffffffffff800000LL);
theEmitter->emitIns_R_R_I(INS_sub, EA_8BYTE, REG_R8, REG_R9, 0); // == mov
theEmitter->emitIns_R_R_I(INS_sub, EA_8BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_sub, EA_8BYTE, REG_R8, REG_R9, -1);
theEmitter->emitIns_R_R_I(INS_sub, EA_8BYTE, REG_R8, REG_R9, 0xfff);
theEmitter->emitIns_R_R_I(INS_sub, EA_8BYTE, REG_R8, REG_R9, -0xfff);
theEmitter->emitIns_R_R_I(INS_sub, EA_8BYTE, REG_R8, REG_R9, 0x1000);
theEmitter->emitIns_R_R_I(INS_sub, EA_8BYTE, REG_R8, REG_R9, 0xfff000);
theEmitter->emitIns_R_R_I(INS_sub, EA_8BYTE, REG_R8, REG_R9, 0xfffffffffffff000LL);
theEmitter->emitIns_R_R_I(INS_sub, EA_8BYTE, REG_R8, REG_R9, 0xffffffffff800000LL);
theEmitter->emitIns_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, 0); // == mov
theEmitter->emitIns_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, -1);
theEmitter->emitIns_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, 0xfff);
theEmitter->emitIns_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, -0xfff);
theEmitter->emitIns_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, 0x1000);
theEmitter->emitIns_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, 0xfff000);
theEmitter->emitIns_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, 0xfffffffffffff000LL);
theEmitter->emitIns_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, 0xffffffffff800000LL);
theEmitter->emitIns_R_R_I(INS_adds, EA_8BYTE, REG_R8, REG_R9, 0); // == mov
theEmitter->emitIns_R_R_I(INS_adds, EA_8BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_adds, EA_8BYTE, REG_R8, REG_R9, -1);
theEmitter->emitIns_R_R_I(INS_adds, EA_8BYTE, REG_R8, REG_R9, 0xfff);
theEmitter->emitIns_R_R_I(INS_adds, EA_8BYTE, REG_R8, REG_R9, -0xfff);
theEmitter->emitIns_R_R_I(INS_adds, EA_8BYTE, REG_R8, REG_R9, 0x1000);
theEmitter->emitIns_R_R_I(INS_adds, EA_8BYTE, REG_R8, REG_R9, 0xfff000);
theEmitter->emitIns_R_R_I(INS_adds, EA_8BYTE, REG_R8, REG_R9, 0xfffffffffffff000LL);
theEmitter->emitIns_R_R_I(INS_adds, EA_8BYTE, REG_R8, REG_R9, 0xffffffffff800000LL);
theEmitter->emitIns_R_R_I(INS_adds, EA_4BYTE, REG_R8, REG_R9, 0); // == mov
theEmitter->emitIns_R_R_I(INS_adds, EA_4BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_adds, EA_4BYTE, REG_R8, REG_R9, -1);
theEmitter->emitIns_R_R_I(INS_adds, EA_4BYTE, REG_R8, REG_R9, 0xfff);
theEmitter->emitIns_R_R_I(INS_adds, EA_4BYTE, REG_R8, REG_R9, -0xfff);
theEmitter->emitIns_R_R_I(INS_adds, EA_4BYTE, REG_R8, REG_R9, 0x1000);
theEmitter->emitIns_R_R_I(INS_adds, EA_4BYTE, REG_R8, REG_R9, 0xfff000);
theEmitter->emitIns_R_R_I(INS_adds, EA_4BYTE, REG_R8, REG_R9, 0xfffffffffffff000LL);
theEmitter->emitIns_R_R_I(INS_adds, EA_4BYTE, REG_R8, REG_R9, 0xffffffffff800000LL);
theEmitter->emitIns_R_R_I(INS_subs, EA_8BYTE, REG_R8, REG_R9, 0); // == mov
theEmitter->emitIns_R_R_I(INS_subs, EA_8BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_subs, EA_8BYTE, REG_R8, REG_R9, -1);
theEmitter->emitIns_R_R_I(INS_subs, EA_8BYTE, REG_R8, REG_R9, 0xfff);
theEmitter->emitIns_R_R_I(INS_subs, EA_8BYTE, REG_R8, REG_R9, -0xfff);
theEmitter->emitIns_R_R_I(INS_subs, EA_8BYTE, REG_R8, REG_R9, 0x1000);
theEmitter->emitIns_R_R_I(INS_subs, EA_8BYTE, REG_R8, REG_R9, 0xfff000);
theEmitter->emitIns_R_R_I(INS_subs, EA_8BYTE, REG_R8, REG_R9, 0xfffffffffffff000LL);
theEmitter->emitIns_R_R_I(INS_subs, EA_8BYTE, REG_R8, REG_R9, 0xffffffffff800000LL);
theEmitter->emitIns_R_R_I(INS_subs, EA_4BYTE, REG_R8, REG_R9, 0); // == mov
theEmitter->emitIns_R_R_I(INS_subs, EA_4BYTE, REG_R8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_subs, EA_4BYTE, REG_R8, REG_R9, -1);
theEmitter->emitIns_R_R_I(INS_subs, EA_4BYTE, REG_R8, REG_R9, 0xfff);
theEmitter->emitIns_R_R_I(INS_subs, EA_4BYTE, REG_R8, REG_R9, -0xfff);
theEmitter->emitIns_R_R_I(INS_subs, EA_4BYTE, REG_R8, REG_R9, 0x1000);
theEmitter->emitIns_R_R_I(INS_subs, EA_4BYTE, REG_R8, REG_R9, 0xfff000);
theEmitter->emitIns_R_R_I(INS_subs, EA_4BYTE, REG_R8, REG_R9, 0xfffffffffffff000LL);
theEmitter->emitIns_R_R_I(INS_subs, EA_4BYTE, REG_R8, REG_R9, 0xffffffffff800000LL);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_R_I cmp/txt
//
// cmp
theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 0);
theEmitter->emitIns_R_R_I(INS_cmp, EA_4BYTE, REG_R8, REG_R9, 0);
// CMP (shifted register)
theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 31, INS_OPTS_LSL);
theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 32, INS_OPTS_LSR);
theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 33, INS_OPTS_ASR);
theEmitter->emitIns_R_R_I(INS_cmp, EA_4BYTE, REG_R8, REG_R9, 21, INS_OPTS_LSL);
theEmitter->emitIns_R_R_I(INS_cmp, EA_4BYTE, REG_R8, REG_R9, 22, INS_OPTS_LSR);
theEmitter->emitIns_R_R_I(INS_cmp, EA_4BYTE, REG_R8, REG_R9, 23, INS_OPTS_ASR);
// TST (shifted register)
theEmitter->emitIns_R_R_I(INS_tst, EA_8BYTE, REG_R8, REG_R9, 31, INS_OPTS_LSL);
theEmitter->emitIns_R_R_I(INS_tst, EA_8BYTE, REG_R8, REG_R9, 32, INS_OPTS_LSR);
theEmitter->emitIns_R_R_I(INS_tst, EA_8BYTE, REG_R8, REG_R9, 33, INS_OPTS_ASR);
theEmitter->emitIns_R_R_I(INS_tst, EA_8BYTE, REG_R8, REG_R9, 34, INS_OPTS_ROR);
theEmitter->emitIns_R_R_I(INS_tst, EA_4BYTE, REG_R8, REG_R9, 21, INS_OPTS_LSL);
theEmitter->emitIns_R_R_I(INS_tst, EA_4BYTE, REG_R8, REG_R9, 22, INS_OPTS_LSR);
theEmitter->emitIns_R_R_I(INS_tst, EA_4BYTE, REG_R8, REG_R9, 23, INS_OPTS_ASR);
theEmitter->emitIns_R_R_I(INS_tst, EA_4BYTE, REG_R8, REG_R9, 24, INS_OPTS_ROR);
// CMP (extended register)
theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 0, INS_OPTS_UXTB);
theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 0, INS_OPTS_UXTH);
theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 0, INS_OPTS_UXTW); // "cmp x8, x9, UXTW"; msdis
// disassembles this "cmp x8,x9",
// which looks like an msdis issue.
theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 0, INS_OPTS_UXTX);
theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 0, INS_OPTS_SXTB);
theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 0, INS_OPTS_SXTH);
theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 0, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 0, INS_OPTS_SXTX);
// CMP 64-bit (extended register) and left shift
theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 1, INS_OPTS_UXTB);
theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 2, INS_OPTS_UXTH);
theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 3, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 4, INS_OPTS_UXTX);
theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 1, INS_OPTS_SXTB);
theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 2, INS_OPTS_SXTH);
theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 3, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_I(INS_cmp, EA_8BYTE, REG_R8, REG_R9, 4, INS_OPTS_SXTX);
// CMP 32-bit (extended register) and left shift
theEmitter->emitIns_R_R_I(INS_cmp, EA_4BYTE, REG_R8, REG_R9, 0, INS_OPTS_UXTB);
theEmitter->emitIns_R_R_I(INS_cmp, EA_4BYTE, REG_R8, REG_R9, 2, INS_OPTS_UXTH);
theEmitter->emitIns_R_R_I(INS_cmp, EA_4BYTE, REG_R8, REG_R9, 4, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_I(INS_cmp, EA_4BYTE, REG_R8, REG_R9, 0, INS_OPTS_SXTB);
theEmitter->emitIns_R_R_I(INS_cmp, EA_4BYTE, REG_R8, REG_R9, 2, INS_OPTS_SXTH);
theEmitter->emitIns_R_R_I(INS_cmp, EA_4BYTE, REG_R8, REG_R9, 4, INS_OPTS_SXTW);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_R_R
//
genDefineTempLabel(genCreateTempLabel());
theEmitter->emitIns_R_R_R(INS_lsl, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_lsr, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_asr, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ror, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_adc, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_adcs, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_sbc, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_sbcs, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_udiv, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_sdiv, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_mul, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_mneg, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_smull, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_smnegl, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_smulh, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_umull, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_umnegl, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_umulh, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_lslv, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_lsrv, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_asrv, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_rorv, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_lsl, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_lsr, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_asr, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ror, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_adc, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_adcs, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_sbc, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_sbcs, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_udiv, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_sdiv, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_mul, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_mneg, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_smull, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_smnegl, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_smulh, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_umull, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_umnegl, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_umulh, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_lslv, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_lsrv, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_asrv, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_rorv, EA_4BYTE, REG_R8, REG_R9, REG_R10);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// ARMv8.1 LSE Atomics
//
genDefineTempLabel(genCreateTempLabel());
theEmitter->emitIns_R_R_R(INS_casb, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_casab, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_casalb, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_caslb, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_cash, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_casah, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_casalh, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_caslh, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_cas, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_casa, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_casal, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_casl, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_cas, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_casa, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_casal, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_casl, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ldaddb, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ldaddab, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ldaddalb, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ldaddlb, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ldaddh, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ldaddah, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ldaddalh, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ldaddlh, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ldadd, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ldadda, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ldaddal, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ldaddl, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ldadd, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ldadda, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ldaddal, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ldclral, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ldclral, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ldsetal, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ldsetal, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_ldaddl, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_swpb, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_swpab, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_swpalb, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_swplb, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_swph, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_swpah, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_swpalh, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_swplh, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_swp, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_swpa, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_swpal, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_swpl, EA_4BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_swp, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_swpa, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_swpal, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R_R(INS_swpl, EA_8BYTE, REG_R8, REG_R9, REG_R10);
theEmitter->emitIns_R_R(INS_staddb, EA_4BYTE, REG_R8, REG_R10);
theEmitter->emitIns_R_R(INS_staddlb, EA_4BYTE, REG_R8, REG_R10);
theEmitter->emitIns_R_R(INS_staddh, EA_4BYTE, REG_R8, REG_R10);
theEmitter->emitIns_R_R(INS_staddlh, EA_4BYTE, REG_R8, REG_R10);
theEmitter->emitIns_R_R(INS_stadd, EA_4BYTE, REG_R8, REG_R10);
theEmitter->emitIns_R_R(INS_staddl, EA_4BYTE, REG_R8, REG_R10);
theEmitter->emitIns_R_R(INS_stadd, EA_8BYTE, REG_R8, REG_R10);
theEmitter->emitIns_R_R(INS_staddl, EA_8BYTE, REG_R8, REG_R10);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_R_I_I
//
genDefineTempLabel(genCreateTempLabel());
theEmitter->emitIns_R_R_I_I(INS_sbfm, EA_8BYTE, REG_R2, REG_R3, 4, 39);
theEmitter->emitIns_R_R_I_I(INS_bfm, EA_8BYTE, REG_R1, REG_R5, 20, 23);
theEmitter->emitIns_R_R_I_I(INS_ubfm, EA_8BYTE, REG_R8, REG_R9, 36, 7);
theEmitter->emitIns_R_R_I_I(INS_sbfiz, EA_8BYTE, REG_R2, REG_R3, 7, 37);
theEmitter->emitIns_R_R_I_I(INS_bfi, EA_8BYTE, REG_R1, REG_R5, 23, 21);
theEmitter->emitIns_R_R_I_I(INS_ubfiz, EA_8BYTE, REG_R8, REG_R9, 39, 5);
theEmitter->emitIns_R_R_I_I(INS_sbfx, EA_8BYTE, REG_R2, REG_R3, 10, 24);
theEmitter->emitIns_R_R_I_I(INS_bfxil, EA_8BYTE, REG_R1, REG_R5, 26, 16);
theEmitter->emitIns_R_R_I_I(INS_ubfx, EA_8BYTE, REG_R8, REG_R9, 42, 8);
theEmitter->emitIns_R_R_I_I(INS_sbfm, EA_4BYTE, REG_R2, REG_R3, 4, 19);
theEmitter->emitIns_R_R_I_I(INS_bfm, EA_4BYTE, REG_R1, REG_R5, 10, 13);
theEmitter->emitIns_R_R_I_I(INS_ubfm, EA_4BYTE, REG_R8, REG_R9, 16, 7);
theEmitter->emitIns_R_R_I_I(INS_sbfiz, EA_4BYTE, REG_R2, REG_R3, 5, 17);
theEmitter->emitIns_R_R_I_I(INS_bfi, EA_4BYTE, REG_R1, REG_R5, 13, 11);
theEmitter->emitIns_R_R_I_I(INS_ubfiz, EA_4BYTE, REG_R8, REG_R9, 19, 5);
theEmitter->emitIns_R_R_I_I(INS_sbfx, EA_4BYTE, REG_R2, REG_R3, 3, 14);
theEmitter->emitIns_R_R_I_I(INS_bfxil, EA_4BYTE, REG_R1, REG_R5, 11, 9);
theEmitter->emitIns_R_R_I_I(INS_ubfx, EA_4BYTE, REG_R8, REG_R9, 22, 8);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_R_R_I
//
genDefineTempLabel(genCreateTempLabel());
// ADD (extended register)
theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_UXTB);
theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_UXTH);
theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_UXTX);
theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_SXTB);
theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_SXTH);
theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_SXTX);
// ADD (extended register) and left shift
theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_UXTB);
theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_UXTH);
theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_UXTX);
theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_SXTB);
theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_SXTH);
theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_SXTX);
// ADD (shifted register)
theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 31, INS_OPTS_LSL);
theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 32, INS_OPTS_LSR);
theEmitter->emitIns_R_R_R_I(INS_add, EA_8BYTE, REG_R8, REG_R9, REG_R10, 33, INS_OPTS_ASR);
// EXTR (extract field from register pair)
theEmitter->emitIns_R_R_R_I(INS_extr, EA_8BYTE, REG_R8, REG_R9, REG_R10, 1);
theEmitter->emitIns_R_R_R_I(INS_extr, EA_8BYTE, REG_R8, REG_R9, REG_R10, 31);
theEmitter->emitIns_R_R_R_I(INS_extr, EA_8BYTE, REG_R8, REG_R9, REG_R10, 32);
theEmitter->emitIns_R_R_R_I(INS_extr, EA_8BYTE, REG_R8, REG_R9, REG_R10, 63);
theEmitter->emitIns_R_R_R_I(INS_extr, EA_4BYTE, REG_R8, REG_R9, REG_R10, 1);
theEmitter->emitIns_R_R_R_I(INS_extr, EA_4BYTE, REG_R8, REG_R9, REG_R10, 31);
// SUB (extended register)
theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_UXTB);
theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_UXTH);
theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_UXTX);
theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_SXTB);
theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_SXTH);
theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0, INS_OPTS_SXTX);
// SUB (extended register) and left shift
theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_UXTB);
theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_UXTH);
theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_UXTX);
theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_SXTB);
theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_SXTH);
theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_SXTX);
// SUB (shifted register)
theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 27, INS_OPTS_LSL);
theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 28, INS_OPTS_LSR);
theEmitter->emitIns_R_R_R_I(INS_sub, EA_4BYTE, REG_R8, REG_R9, REG_R10, 29, INS_OPTS_ASR);
// bit operations
theEmitter->emitIns_R_R_R_I(INS_and, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_ands, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_eor, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_orr, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_bic, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_bics, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_eon, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_orn, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_and, EA_8BYTE, REG_R8, REG_R9, REG_R10, 1, INS_OPTS_LSL);
theEmitter->emitIns_R_R_R_I(INS_ands, EA_8BYTE, REG_R8, REG_R9, REG_R10, 2, INS_OPTS_LSR);
theEmitter->emitIns_R_R_R_I(INS_eor, EA_8BYTE, REG_R8, REG_R9, REG_R10, 3, INS_OPTS_ASR);
theEmitter->emitIns_R_R_R_I(INS_orr, EA_8BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_ROR);
theEmitter->emitIns_R_R_R_I(INS_bic, EA_8BYTE, REG_R8, REG_R9, REG_R10, 5, INS_OPTS_LSL);
theEmitter->emitIns_R_R_R_I(INS_bics, EA_8BYTE, REG_R8, REG_R9, REG_R10, 6, INS_OPTS_LSR);
theEmitter->emitIns_R_R_R_I(INS_eon, EA_8BYTE, REG_R8, REG_R9, REG_R10, 7, INS_OPTS_ASR);
theEmitter->emitIns_R_R_R_I(INS_orn, EA_8BYTE, REG_R8, REG_R9, REG_R10, 8, INS_OPTS_ROR);
theEmitter->emitIns_R_R_R_I(INS_and, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_ands, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_eor, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_orr, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_bic, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_bics, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_eon, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_orn, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_and, EA_4BYTE, REG_R8, REG_R9, REG_R10, 1, INS_OPTS_LSL);
theEmitter->emitIns_R_R_R_I(INS_ands, EA_4BYTE, REG_R8, REG_R9, REG_R10, 2, INS_OPTS_LSR);
theEmitter->emitIns_R_R_R_I(INS_eor, EA_4BYTE, REG_R8, REG_R9, REG_R10, 3, INS_OPTS_ASR);
theEmitter->emitIns_R_R_R_I(INS_orr, EA_4BYTE, REG_R8, REG_R9, REG_R10, 4, INS_OPTS_ROR);
theEmitter->emitIns_R_R_R_I(INS_bic, EA_4BYTE, REG_R8, REG_R9, REG_R10, 5, INS_OPTS_LSL);
theEmitter->emitIns_R_R_R_I(INS_bics, EA_4BYTE, REG_R8, REG_R9, REG_R10, 6, INS_OPTS_LSR);
theEmitter->emitIns_R_R_R_I(INS_eon, EA_4BYTE, REG_R8, REG_R9, REG_R10, 7, INS_OPTS_ASR);
theEmitter->emitIns_R_R_R_I(INS_orn, EA_4BYTE, REG_R8, REG_R9, REG_R10, 8, INS_OPTS_ROR);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_R_R_I -- load/store pair
//
theEmitter->emitIns_R_R_R_I(INS_ldnp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_stnp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_ldnp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 8);
theEmitter->emitIns_R_R_R_I(INS_stnp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 8);
theEmitter->emitIns_R_R_R_I(INS_ldnp, EA_4BYTE, REG_R8, REG_R9, REG_SP, 0);
theEmitter->emitIns_R_R_R_I(INS_stnp, EA_4BYTE, REG_R8, REG_R9, REG_SP, 0);
theEmitter->emitIns_R_R_R_I(INS_ldnp, EA_4BYTE, REG_R8, REG_R9, REG_SP, 8);
theEmitter->emitIns_R_R_R_I(INS_stnp, EA_4BYTE, REG_R8, REG_R9, REG_SP, 8);
theEmitter->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 16);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 16);
theEmitter->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 16, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 16, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 16, INS_OPTS_PRE_INDEX);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_R8, REG_R9, REG_R10, 16, INS_OPTS_PRE_INDEX);
theEmitter->emitIns_R_R_R_I(INS_ldp, EA_4BYTE, REG_R8, REG_R9, REG_SP, 0);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_4BYTE, REG_R8, REG_R9, REG_SP, 0);
theEmitter->emitIns_R_R_R_I(INS_ldp, EA_4BYTE, REG_R8, REG_R9, REG_SP, 16);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_4BYTE, REG_R8, REG_R9, REG_SP, 16);
theEmitter->emitIns_R_R_R_I(INS_ldp, EA_4BYTE, REG_R8, REG_R9, REG_R10, 16, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_4BYTE, REG_R8, REG_R9, REG_R10, 16, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_ldp, EA_4BYTE, REG_R8, REG_R9, REG_R10, 16, INS_OPTS_PRE_INDEX);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_4BYTE, REG_R8, REG_R9, REG_R10, 16, INS_OPTS_PRE_INDEX);
theEmitter->emitIns_R_R_R_I(INS_ldpsw, EA_4BYTE, REG_R8, REG_R9, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_ldpsw, EA_4BYTE, REG_R8, REG_R9, REG_R10, 16);
theEmitter->emitIns_R_R_R_I(INS_ldpsw, EA_4BYTE, REG_R8, REG_R9, REG_R10, 16, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_ldpsw, EA_4BYTE, REG_R8, REG_R9, REG_R10, 16, INS_OPTS_PRE_INDEX);
// SP and ZR tests
theEmitter->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, REG_ZR, REG_R1, REG_SP, 0);
theEmitter->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, REG_R0, REG_ZR, REG_SP, 16);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_ZR, REG_R1, REG_SP, 0);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_R0, REG_ZR, REG_SP, 16);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_ZR, REG_ZR, REG_SP, 16, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_ZR, REG_ZR, REG_R8, 16, INS_OPTS_PRE_INDEX);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_R_R_Ext -- load/store shifted/extend
//
genDefineTempLabel(genCreateTempLabel());
// LDR (register)
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL, 3);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW, 3);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW, 3);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX, 3);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX, 3);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL, 2);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW, 2);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW, 2);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX, 2);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX, 2);
theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9);
theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL);
theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL, 1);
theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW, 1);
theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW, 1);
theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX, 1);
theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldrh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX, 1);
theEmitter->emitIns_R_R_R_Ext(INS_ldrb, EA_1BYTE, REG_R8, REG_SP, REG_R9);
theEmitter->emitIns_R_R_R_Ext(INS_ldrb, EA_1BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldrb, EA_1BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldrb, EA_1BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldrb, EA_1BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsw, EA_4BYTE, REG_R8, REG_SP, REG_R9);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsw, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsw, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL, 2);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsw, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsw, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW, 2);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsw, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsw, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW, 2);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsw, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsw, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX, 2);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsw, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsw, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX, 2);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsh, EA_4BYTE, REG_R8, REG_SP, REG_R9);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsh, EA_8BYTE, REG_R8, REG_SP, REG_R9);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsh, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsh, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL, 1);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsh, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsh, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW, 1);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsh, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsh, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW, 1);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsh, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsh, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX, 1);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsh, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsh, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX, 1);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsb, EA_4BYTE, REG_R8, REG_SP, REG_R9);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsb, EA_8BYTE, REG_R8, REG_SP, REG_R9);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsb, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsb, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsb, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldrsb, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX);
// STR (register)
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9);
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL);
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL, 3);
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW, 3);
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW, 3);
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX);
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX, 3);
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX);
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_8BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX, 3);
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9);
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL);
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL, 2);
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW, 2);
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW, 2);
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX);
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX, 2);
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX);
theEmitter->emitIns_R_R_R_Ext(INS_str, EA_4BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX, 2);
theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9);
theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL);
theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_LSL, 1);
theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW, 1);
theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW, 1);
theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX);
theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX, 1);
theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX);
theEmitter->emitIns_R_R_R_Ext(INS_strh, EA_2BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX, 1);
theEmitter->emitIns_R_R_R_Ext(INS_strb, EA_1BYTE, REG_R8, REG_SP, REG_R9);
theEmitter->emitIns_R_R_R_Ext(INS_strb, EA_1BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_R_Ext(INS_strb, EA_1BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_R_Ext(INS_strb, EA_1BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_SXTX);
theEmitter->emitIns_R_R_R_Ext(INS_strb, EA_1BYTE, REG_R8, REG_SP, REG_R9, INS_OPTS_UXTX);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_R_R_R
//
genDefineTempLabel(genCreateTempLabel());
theEmitter->emitIns_R_R_R_R(INS_madd, EA_4BYTE, REG_R0, REG_R12, REG_R27, REG_R10);
theEmitter->emitIns_R_R_R_R(INS_msub, EA_4BYTE, REG_R1, REG_R13, REG_R28, REG_R11);
theEmitter->emitIns_R_R_R_R(INS_smaddl, EA_4BYTE, REG_R2, REG_R14, REG_R0, REG_R12);
theEmitter->emitIns_R_R_R_R(INS_smsubl, EA_4BYTE, REG_R3, REG_R15, REG_R1, REG_R13);
theEmitter->emitIns_R_R_R_R(INS_umaddl, EA_4BYTE, REG_R4, REG_R19, REG_R2, REG_R14);
theEmitter->emitIns_R_R_R_R(INS_umsubl, EA_4BYTE, REG_R5, REG_R20, REG_R3, REG_R15);
theEmitter->emitIns_R_R_R_R(INS_madd, EA_8BYTE, REG_R6, REG_R21, REG_R4, REG_R19);
theEmitter->emitIns_R_R_R_R(INS_msub, EA_8BYTE, REG_R7, REG_R22, REG_R5, REG_R20);
theEmitter->emitIns_R_R_R_R(INS_smaddl, EA_8BYTE, REG_R8, REG_R23, REG_R6, REG_R21);
theEmitter->emitIns_R_R_R_R(INS_smsubl, EA_8BYTE, REG_R9, REG_R24, REG_R7, REG_R22);
theEmitter->emitIns_R_R_R_R(INS_umaddl, EA_8BYTE, REG_R10, REG_R25, REG_R8, REG_R23);
theEmitter->emitIns_R_R_R_R(INS_umsubl, EA_8BYTE, REG_R11, REG_R26, REG_R9, REG_R24);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
// R_COND
//
// cset reg, cond
theEmitter->emitIns_R_COND(INS_cset, EA_8BYTE, REG_R9, INS_COND_EQ); // eq
theEmitter->emitIns_R_COND(INS_cset, EA_4BYTE, REG_R8, INS_COND_NE); // ne
theEmitter->emitIns_R_COND(INS_cset, EA_4BYTE, REG_R7, INS_COND_HS); // hs
theEmitter->emitIns_R_COND(INS_cset, EA_8BYTE, REG_R6, INS_COND_LO); // lo
theEmitter->emitIns_R_COND(INS_cset, EA_8BYTE, REG_R5, INS_COND_MI); // mi
theEmitter->emitIns_R_COND(INS_cset, EA_4BYTE, REG_R4, INS_COND_PL); // pl
theEmitter->emitIns_R_COND(INS_cset, EA_4BYTE, REG_R3, INS_COND_VS); // vs
theEmitter->emitIns_R_COND(INS_cset, EA_8BYTE, REG_R2, INS_COND_VC); // vc
theEmitter->emitIns_R_COND(INS_cset, EA_8BYTE, REG_R1, INS_COND_HI); // hi
theEmitter->emitIns_R_COND(INS_cset, EA_4BYTE, REG_R0, INS_COND_LS); // ls
theEmitter->emitIns_R_COND(INS_cset, EA_4BYTE, REG_R9, INS_COND_GE); // ge
theEmitter->emitIns_R_COND(INS_cset, EA_8BYTE, REG_R8, INS_COND_LT); // lt
theEmitter->emitIns_R_COND(INS_cset, EA_8BYTE, REG_R7, INS_COND_GT); // gt
theEmitter->emitIns_R_COND(INS_cset, EA_4BYTE, REG_R6, INS_COND_LE); // le
// csetm reg, cond
theEmitter->emitIns_R_COND(INS_csetm, EA_4BYTE, REG_R9, INS_COND_EQ); // eq
theEmitter->emitIns_R_COND(INS_csetm, EA_8BYTE, REG_R8, INS_COND_NE); // ne
theEmitter->emitIns_R_COND(INS_csetm, EA_8BYTE, REG_R7, INS_COND_HS); // hs
theEmitter->emitIns_R_COND(INS_csetm, EA_4BYTE, REG_R6, INS_COND_LO); // lo
theEmitter->emitIns_R_COND(INS_csetm, EA_4BYTE, REG_R5, INS_COND_MI); // mi
theEmitter->emitIns_R_COND(INS_csetm, EA_8BYTE, REG_R4, INS_COND_PL); // pl
theEmitter->emitIns_R_COND(INS_csetm, EA_8BYTE, REG_R3, INS_COND_VS); // vs
theEmitter->emitIns_R_COND(INS_csetm, EA_4BYTE, REG_R2, INS_COND_VC); // vc
theEmitter->emitIns_R_COND(INS_csetm, EA_4BYTE, REG_R1, INS_COND_HI); // hi
theEmitter->emitIns_R_COND(INS_csetm, EA_8BYTE, REG_R0, INS_COND_LS); // ls
theEmitter->emitIns_R_COND(INS_csetm, EA_8BYTE, REG_R9, INS_COND_GE); // ge
theEmitter->emitIns_R_COND(INS_csetm, EA_4BYTE, REG_R8, INS_COND_LT); // lt
theEmitter->emitIns_R_COND(INS_csetm, EA_4BYTE, REG_R7, INS_COND_GT); // gt
theEmitter->emitIns_R_COND(INS_csetm, EA_8BYTE, REG_R6, INS_COND_LE); // le
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
// R_R_COND
//
// cinc reg, reg, cond
// cinv reg, reg, cond
// cneg reg, reg, cond
theEmitter->emitIns_R_R_COND(INS_cinc, EA_8BYTE, REG_R0, REG_R4, INS_COND_EQ); // eq
theEmitter->emitIns_R_R_COND(INS_cinv, EA_4BYTE, REG_R1, REG_R5, INS_COND_NE); // ne
theEmitter->emitIns_R_R_COND(INS_cneg, EA_4BYTE, REG_R2, REG_R6, INS_COND_HS); // hs
theEmitter->emitIns_R_R_COND(INS_cinc, EA_8BYTE, REG_R3, REG_R7, INS_COND_LO); // lo
theEmitter->emitIns_R_R_COND(INS_cinv, EA_4BYTE, REG_R4, REG_R8, INS_COND_MI); // mi
theEmitter->emitIns_R_R_COND(INS_cneg, EA_8BYTE, REG_R5, REG_R9, INS_COND_PL); // pl
theEmitter->emitIns_R_R_COND(INS_cinc, EA_8BYTE, REG_R6, REG_R0, INS_COND_VS); // vs
theEmitter->emitIns_R_R_COND(INS_cinv, EA_4BYTE, REG_R7, REG_R1, INS_COND_VC); // vc
theEmitter->emitIns_R_R_COND(INS_cneg, EA_8BYTE, REG_R8, REG_R2, INS_COND_HI); // hi
theEmitter->emitIns_R_R_COND(INS_cinc, EA_4BYTE, REG_R9, REG_R3, INS_COND_LS); // ls
theEmitter->emitIns_R_R_COND(INS_cinv, EA_4BYTE, REG_R0, REG_R4, INS_COND_GE); // ge
theEmitter->emitIns_R_R_COND(INS_cneg, EA_8BYTE, REG_R2, REG_R5, INS_COND_LT); // lt
theEmitter->emitIns_R_R_COND(INS_cinc, EA_4BYTE, REG_R2, REG_R6, INS_COND_GT); // gt
theEmitter->emitIns_R_R_COND(INS_cinv, EA_8BYTE, REG_R3, REG_R7, INS_COND_LE); // le
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
// R_R_R_COND
//
// csel reg, reg, reg, cond
// csinc reg, reg, reg, cond
// csinv reg, reg, reg, cond
// csneg reg, reg, reg, cond
theEmitter->emitIns_R_R_R_COND(INS_csel, EA_8BYTE, REG_R0, REG_R4, REG_R8, INS_COND_EQ); // eq
theEmitter->emitIns_R_R_R_COND(INS_csinc, EA_4BYTE, REG_R1, REG_R5, REG_R9, INS_COND_NE); // ne
theEmitter->emitIns_R_R_R_COND(INS_csinv, EA_4BYTE, REG_R2, REG_R6, REG_R0, INS_COND_HS); // hs
theEmitter->emitIns_R_R_R_COND(INS_csneg, EA_8BYTE, REG_R3, REG_R7, REG_R1, INS_COND_LO); // lo
theEmitter->emitIns_R_R_R_COND(INS_csel, EA_4BYTE, REG_R4, REG_R8, REG_R2, INS_COND_MI); // mi
theEmitter->emitIns_R_R_R_COND(INS_csinc, EA_8BYTE, REG_R5, REG_R9, REG_R3, INS_COND_PL); // pl
theEmitter->emitIns_R_R_R_COND(INS_csinv, EA_8BYTE, REG_R6, REG_R0, REG_R4, INS_COND_VS); // vs
theEmitter->emitIns_R_R_R_COND(INS_csneg, EA_4BYTE, REG_R7, REG_R1, REG_R5, INS_COND_VC); // vc
theEmitter->emitIns_R_R_R_COND(INS_csel, EA_8BYTE, REG_R8, REG_R2, REG_R6, INS_COND_HI); // hi
theEmitter->emitIns_R_R_R_COND(INS_csinc, EA_4BYTE, REG_R9, REG_R3, REG_R7, INS_COND_LS); // ls
theEmitter->emitIns_R_R_R_COND(INS_csinv, EA_4BYTE, REG_R0, REG_R4, REG_R8, INS_COND_GE); // ge
theEmitter->emitIns_R_R_R_COND(INS_csneg, EA_8BYTE, REG_R2, REG_R5, REG_R9, INS_COND_LT); // lt
theEmitter->emitIns_R_R_R_COND(INS_csel, EA_4BYTE, REG_R2, REG_R6, REG_R0, INS_COND_GT); // gt
theEmitter->emitIns_R_R_R_COND(INS_csinc, EA_8BYTE, REG_R3, REG_R7, REG_R1, INS_COND_LE); // le
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
// R_R_FLAGS_COND
//
// ccmp reg1, reg2, nzcv, cond
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R9, REG_R3, INS_FLAGS_V, INS_COND_EQ); // eq
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R8, REG_R2, INS_FLAGS_C, INS_COND_NE); // ne
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R7, REG_R1, INS_FLAGS_Z, INS_COND_HS); // hs
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R6, REG_R0, INS_FLAGS_N, INS_COND_LO); // lo
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R5, REG_R3, INS_FLAGS_CV, INS_COND_MI); // mi
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R4, REG_R2, INS_FLAGS_ZV, INS_COND_PL); // pl
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R3, REG_R1, INS_FLAGS_ZC, INS_COND_VS); // vs
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R2, REG_R0, INS_FLAGS_NV, INS_COND_VC); // vc
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R1, REG_R3, INS_FLAGS_NC, INS_COND_HI); // hi
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R0, REG_R2, INS_FLAGS_NZ, INS_COND_LS); // ls
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R9, REG_R1, INS_FLAGS_NONE, INS_COND_GE); // ge
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R8, REG_R0, INS_FLAGS_NZV, INS_COND_LT); // lt
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R7, REG_R3, INS_FLAGS_NZC, INS_COND_GT); // gt
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R6, REG_R2, INS_FLAGS_NZCV, INS_COND_LE); // le
// ccmp reg1, imm, nzcv, cond
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R9, 3, INS_FLAGS_V, INS_COND_EQ); // eq
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R8, 2, INS_FLAGS_C, INS_COND_NE); // ne
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R7, 1, INS_FLAGS_Z, INS_COND_HS); // hs
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R6, 0, INS_FLAGS_N, INS_COND_LO); // lo
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R5, 31, INS_FLAGS_CV, INS_COND_MI); // mi
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R4, 28, INS_FLAGS_ZV, INS_COND_PL); // pl
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R3, 25, INS_FLAGS_ZC, INS_COND_VS); // vs
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R2, 22, INS_FLAGS_NV, INS_COND_VC); // vc
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R1, 19, INS_FLAGS_NC, INS_COND_HI); // hi
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R0, 16, INS_FLAGS_NZ, INS_COND_LS); // ls
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R9, 13, INS_FLAGS_NONE, INS_COND_GE); // ge
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R8, 10, INS_FLAGS_NZV, INS_COND_LT); // lt
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R7, 7, INS_FLAGS_NZC, INS_COND_GT); // gt
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R6, 4, INS_FLAGS_NZCV, INS_COND_LE); // le
// ccmp reg1, imm, nzcv, cond -- encoded as ccmn
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R9, -3, INS_FLAGS_V, INS_COND_EQ); // eq
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R8, -2, INS_FLAGS_C, INS_COND_NE); // ne
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R7, -1, INS_FLAGS_Z, INS_COND_HS); // hs
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R6, -5, INS_FLAGS_N, INS_COND_LO); // lo
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R5, -31, INS_FLAGS_CV, INS_COND_MI); // mi
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R4, -28, INS_FLAGS_ZV, INS_COND_PL); // pl
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R3, -25, INS_FLAGS_ZC, INS_COND_VS); // vs
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R2, -22, INS_FLAGS_NV, INS_COND_VC); // vc
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R1, -19, INS_FLAGS_NC, INS_COND_HI); // hi
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R0, -16, INS_FLAGS_NZ, INS_COND_LS); // ls
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R9, -13, INS_FLAGS_NONE, INS_COND_GE); // ge
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R8, -10, INS_FLAGS_NZV, INS_COND_LT); // lt
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_8BYTE, REG_R7, -7, INS_FLAGS_NZC, INS_COND_GT); // gt
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmp, EA_4BYTE, REG_R6, -4, INS_FLAGS_NZCV, INS_COND_LE); // le
// ccmn reg1, reg2, nzcv, cond
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R9, REG_R3, INS_FLAGS_V, INS_COND_EQ); // eq
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R8, REG_R2, INS_FLAGS_C, INS_COND_NE); // ne
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R7, REG_R1, INS_FLAGS_Z, INS_COND_HS); // hs
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R6, REG_R0, INS_FLAGS_N, INS_COND_LO); // lo
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R5, REG_R3, INS_FLAGS_CV, INS_COND_MI); // mi
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R4, REG_R2, INS_FLAGS_ZV, INS_COND_PL); // pl
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R3, REG_R1, INS_FLAGS_ZC, INS_COND_VS); // vs
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R2, REG_R0, INS_FLAGS_NV, INS_COND_VC); // vc
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R1, REG_R3, INS_FLAGS_NC, INS_COND_HI); // hi
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R0, REG_R2, INS_FLAGS_NZ, INS_COND_LS); // ls
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R9, REG_R1, INS_FLAGS_NONE, INS_COND_GE); // ge
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R8, REG_R0, INS_FLAGS_NZV, INS_COND_LT); // lt
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R7, REG_R3, INS_FLAGS_NZC, INS_COND_GT); // gt
theEmitter->emitIns_R_R_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R6, REG_R2, INS_FLAGS_NZCV, INS_COND_LE); // le
// ccmn reg1, imm, nzcv, cond
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R9, 3, INS_FLAGS_V, INS_COND_EQ); // eq
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R8, 2, INS_FLAGS_C, INS_COND_NE); // ne
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R7, 1, INS_FLAGS_Z, INS_COND_HS); // hs
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R6, 0, INS_FLAGS_N, INS_COND_LO); // lo
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R5, 31, INS_FLAGS_CV, INS_COND_MI); // mi
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R4, 28, INS_FLAGS_ZV, INS_COND_PL); // pl
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R3, 25, INS_FLAGS_ZC, INS_COND_VS); // vs
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R2, 22, INS_FLAGS_NV, INS_COND_VC); // vc
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R1, 19, INS_FLAGS_NC, INS_COND_HI); // hi
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R0, 16, INS_FLAGS_NZ, INS_COND_LS); // ls
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R9, 13, INS_FLAGS_NONE, INS_COND_GE); // ge
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R8, 10, INS_FLAGS_NZV, INS_COND_LT); // lt
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_8BYTE, REG_R7, 7, INS_FLAGS_NZC, INS_COND_GT); // gt
theEmitter->emitIns_R_I_FLAGS_COND(INS_ccmn, EA_4BYTE, REG_R6, 4, INS_FLAGS_NZCV, INS_COND_LE); // le
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// Branch to register
//
genDefineTempLabel(genCreateTempLabel());
theEmitter->emitIns_R(INS_br, EA_PTRSIZE, REG_R8);
theEmitter->emitIns_R(INS_ret, EA_PTRSIZE, REG_R8);
theEmitter->emitIns_R(INS_ret, EA_PTRSIZE, REG_LR);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// Misc
//
genDefineTempLabel(genCreateTempLabel());
theEmitter->emitIns_I(INS_brk, EA_PTRSIZE, 0);
theEmitter->emitIns_I(INS_brk, EA_PTRSIZE, 65535);
theEmitter->emitIns_BARR(INS_dsb, INS_BARRIER_OSHLD);
theEmitter->emitIns_BARR(INS_dmb, INS_BARRIER_OSHST);
theEmitter->emitIns_BARR(INS_isb, INS_BARRIER_OSH);
theEmitter->emitIns_BARR(INS_dmb, INS_BARRIER_NSHLD);
theEmitter->emitIns_BARR(INS_isb, INS_BARRIER_NSHST);
theEmitter->emitIns_BARR(INS_dsb, INS_BARRIER_NSH);
theEmitter->emitIns_BARR(INS_isb, INS_BARRIER_ISHLD);
theEmitter->emitIns_BARR(INS_dsb, INS_BARRIER_ISHST);
theEmitter->emitIns_BARR(INS_dmb, INS_BARRIER_ISH);
theEmitter->emitIns_BARR(INS_dsb, INS_BARRIER_LD);
theEmitter->emitIns_BARR(INS_dmb, INS_BARRIER_ST);
theEmitter->emitIns_BARR(INS_isb, INS_BARRIER_SY);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
////////////////////////////////////////////////////////////////////////////////
//
// SIMD and Floating point
//
////////////////////////////////////////////////////////////////////////////////
//
// Load/Stores vector register
//
genDefineTempLabel(genCreateTempLabel());
// ldr/str Vt, [reg]
theEmitter->emitIns_R_R(INS_ldr, EA_8BYTE, REG_V1, REG_R9);
theEmitter->emitIns_R_R(INS_str, EA_8BYTE, REG_V2, REG_R8);
theEmitter->emitIns_R_R(INS_ldr, EA_4BYTE, REG_V3, REG_R7);
theEmitter->emitIns_R_R(INS_str, EA_4BYTE, REG_V4, REG_R6);
theEmitter->emitIns_R_R(INS_ldr, EA_2BYTE, REG_V5, REG_R5);
theEmitter->emitIns_R_R(INS_str, EA_2BYTE, REG_V6, REG_R4);
theEmitter->emitIns_R_R(INS_ldr, EA_1BYTE, REG_V7, REG_R3);
theEmitter->emitIns_R_R(INS_str, EA_1BYTE, REG_V8, REG_R2);
theEmitter->emitIns_R_R(INS_ldr, EA_16BYTE, REG_V9, REG_R1);
theEmitter->emitIns_R_R(INS_str, EA_16BYTE, REG_V10, REG_R0);
// ldr/str Vt, [reg+cns] -- scaled
theEmitter->emitIns_R_R_I(INS_ldr, EA_1BYTE, REG_V8, REG_R9, 1);
theEmitter->emitIns_R_R_I(INS_ldr, EA_2BYTE, REG_V8, REG_R9, 2);
theEmitter->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_V8, REG_R9, 4);
theEmitter->emitIns_R_R_I(INS_ldr, EA_8BYTE, REG_V8, REG_R9, 8);
theEmitter->emitIns_R_R_I(INS_ldr, EA_16BYTE, REG_V8, REG_R9, 16);
theEmitter->emitIns_R_R_I(INS_ldr, EA_1BYTE, REG_V7, REG_R10, 1);
theEmitter->emitIns_R_R_I(INS_ldr, EA_2BYTE, REG_V7, REG_R10, 2);
theEmitter->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_V7, REG_R10, 4);
theEmitter->emitIns_R_R_I(INS_ldr, EA_8BYTE, REG_V7, REG_R10, 8);
theEmitter->emitIns_R_R_I(INS_ldr, EA_16BYTE, REG_V7, REG_R10, 16);
// ldr/str Vt, [reg],cns -- post-indexed (unscaled)
// ldr/str Vt, [reg+cns]! -- post-indexed (unscaled)
theEmitter->emitIns_R_R_I(INS_ldr, EA_1BYTE, REG_V8, REG_R9, 1, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I(INS_ldr, EA_2BYTE, REG_V8, REG_R9, 1, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_V8, REG_R9, 1, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I(INS_ldr, EA_8BYTE, REG_V8, REG_R9, 1, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I(INS_ldr, EA_16BYTE, REG_V8, REG_R9, 1, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I(INS_ldr, EA_1BYTE, REG_V8, REG_R9, 1, INS_OPTS_PRE_INDEX);
theEmitter->emitIns_R_R_I(INS_ldr, EA_2BYTE, REG_V8, REG_R9, 1, INS_OPTS_PRE_INDEX);
theEmitter->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_V8, REG_R9, 1, INS_OPTS_PRE_INDEX);
theEmitter->emitIns_R_R_I(INS_ldr, EA_8BYTE, REG_V8, REG_R9, 1, INS_OPTS_PRE_INDEX);
theEmitter->emitIns_R_R_I(INS_ldr, EA_16BYTE, REG_V8, REG_R9, 1, INS_OPTS_PRE_INDEX);
theEmitter->emitIns_R_R_I(INS_str, EA_1BYTE, REG_V8, REG_R9, 1, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I(INS_str, EA_2BYTE, REG_V8, REG_R9, 1, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I(INS_str, EA_4BYTE, REG_V8, REG_R9, 1, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I(INS_str, EA_8BYTE, REG_V8, REG_R9, 1, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I(INS_str, EA_16BYTE, REG_V8, REG_R9, 1, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_I(INS_str, EA_1BYTE, REG_V8, REG_R9, 1, INS_OPTS_PRE_INDEX);
theEmitter->emitIns_R_R_I(INS_str, EA_2BYTE, REG_V8, REG_R9, 1, INS_OPTS_PRE_INDEX);
theEmitter->emitIns_R_R_I(INS_str, EA_4BYTE, REG_V8, REG_R9, 1, INS_OPTS_PRE_INDEX);
theEmitter->emitIns_R_R_I(INS_str, EA_8BYTE, REG_V8, REG_R9, 1, INS_OPTS_PRE_INDEX);
theEmitter->emitIns_R_R_I(INS_str, EA_16BYTE, REG_V8, REG_R9, 1, INS_OPTS_PRE_INDEX);
theEmitter->emitIns_R_R_I(INS_ldur, EA_1BYTE, REG_V8, REG_R9, 2);
theEmitter->emitIns_R_R_I(INS_ldur, EA_2BYTE, REG_V8, REG_R9, 3);
theEmitter->emitIns_R_R_I(INS_ldur, EA_4BYTE, REG_V8, REG_R9, 5);
theEmitter->emitIns_R_R_I(INS_ldur, EA_8BYTE, REG_V8, REG_R9, 9);
theEmitter->emitIns_R_R_I(INS_ldur, EA_16BYTE, REG_V8, REG_R9, 17);
theEmitter->emitIns_R_R_I(INS_stur, EA_1BYTE, REG_V7, REG_R10, 2);
theEmitter->emitIns_R_R_I(INS_stur, EA_2BYTE, REG_V7, REG_R10, 3);
theEmitter->emitIns_R_R_I(INS_stur, EA_4BYTE, REG_V7, REG_R10, 5);
theEmitter->emitIns_R_R_I(INS_stur, EA_8BYTE, REG_V7, REG_R10, 9);
theEmitter->emitIns_R_R_I(INS_stur, EA_16BYTE, REG_V7, REG_R10, 17);
// load/store pair
theEmitter->emitIns_R_R_R(INS_ldnp, EA_8BYTE, REG_V0, REG_V1, REG_R10);
theEmitter->emitIns_R_R_R_I(INS_stnp, EA_8BYTE, REG_V1, REG_V2, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_ldnp, EA_8BYTE, REG_V2, REG_V3, REG_R10, 8);
theEmitter->emitIns_R_R_R_I(INS_stnp, EA_8BYTE, REG_V3, REG_V4, REG_R10, 24);
theEmitter->emitIns_R_R_R(INS_ldnp, EA_4BYTE, REG_V4, REG_V5, REG_SP);
theEmitter->emitIns_R_R_R_I(INS_stnp, EA_4BYTE, REG_V5, REG_V6, REG_SP, 0);
theEmitter->emitIns_R_R_R_I(INS_ldnp, EA_4BYTE, REG_V6, REG_V7, REG_SP, 4);
theEmitter->emitIns_R_R_R_I(INS_stnp, EA_4BYTE, REG_V7, REG_V8, REG_SP, 12);
theEmitter->emitIns_R_R_R(INS_ldnp, EA_16BYTE, REG_V8, REG_V9, REG_R10);
theEmitter->emitIns_R_R_R_I(INS_stnp, EA_16BYTE, REG_V9, REG_V10, REG_R10, 0);
theEmitter->emitIns_R_R_R_I(INS_ldnp, EA_16BYTE, REG_V10, REG_V11, REG_R10, 16);
theEmitter->emitIns_R_R_R_I(INS_stnp, EA_16BYTE, REG_V11, REG_V12, REG_R10, 48);
theEmitter->emitIns_R_R_R(INS_ldp, EA_8BYTE, REG_V0, REG_V1, REG_R10);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_V1, REG_V2, REG_SP, 0);
theEmitter->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, REG_V2, REG_V3, REG_SP, 8);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_V3, REG_V4, REG_R10, 16);
theEmitter->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, REG_V4, REG_V5, REG_R10, 24, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_V5, REG_V6, REG_SP, 32, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, REG_V6, REG_V7, REG_SP, 40, INS_OPTS_PRE_INDEX);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_8BYTE, REG_V7, REG_V8, REG_R10, 48, INS_OPTS_PRE_INDEX);
theEmitter->emitIns_R_R_R(INS_ldp, EA_4BYTE, REG_V0, REG_V1, REG_R10);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_4BYTE, REG_V1, REG_V2, REG_SP, 0);
theEmitter->emitIns_R_R_R_I(INS_ldp, EA_4BYTE, REG_V2, REG_V3, REG_SP, 4);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_4BYTE, REG_V3, REG_V4, REG_R10, 8);
theEmitter->emitIns_R_R_R_I(INS_ldp, EA_4BYTE, REG_V4, REG_V5, REG_R10, 12, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_4BYTE, REG_V5, REG_V6, REG_SP, 16, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_ldp, EA_4BYTE, REG_V6, REG_V7, REG_SP, 20, INS_OPTS_PRE_INDEX);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_4BYTE, REG_V7, REG_V8, REG_R10, 24, INS_OPTS_PRE_INDEX);
theEmitter->emitIns_R_R_R(INS_ldp, EA_16BYTE, REG_V0, REG_V1, REG_R10);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_16BYTE, REG_V1, REG_V2, REG_SP, 0);
theEmitter->emitIns_R_R_R_I(INS_ldp, EA_16BYTE, REG_V2, REG_V3, REG_SP, 16);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_16BYTE, REG_V3, REG_V4, REG_R10, 32);
theEmitter->emitIns_R_R_R_I(INS_ldp, EA_16BYTE, REG_V4, REG_V5, REG_R10, 48, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_16BYTE, REG_V5, REG_V6, REG_SP, 64, INS_OPTS_POST_INDEX);
theEmitter->emitIns_R_R_R_I(INS_ldp, EA_16BYTE, REG_V6, REG_V7, REG_SP, 80, INS_OPTS_PRE_INDEX);
theEmitter->emitIns_R_R_R_I(INS_stp, EA_16BYTE, REG_V7, REG_V8, REG_R10, 96, INS_OPTS_PRE_INDEX);
// LDR (register)
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V1, REG_SP, REG_R9);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V2, REG_R7, REG_R9, INS_OPTS_LSL);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V3, REG_R7, REG_R9, INS_OPTS_LSL, 3);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V4, REG_R7, REG_R9, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V5, REG_R7, REG_R9, INS_OPTS_SXTW, 3);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V6, REG_SP, REG_R9, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V7, REG_R7, REG_R9, INS_OPTS_UXTW, 3);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V8, REG_R7, REG_R9, INS_OPTS_SXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V9, REG_R7, REG_R9, INS_OPTS_SXTX, 3);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V10, REG_R7, REG_R9, INS_OPTS_UXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_8BYTE, REG_V11, REG_SP, REG_R9, INS_OPTS_UXTX, 3);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V1, REG_SP, REG_R9);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V2, REG_R7, REG_R9, INS_OPTS_LSL);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V3, REG_R7, REG_R9, INS_OPTS_LSL, 2);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V4, REG_R7, REG_R9, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V5, REG_R7, REG_R9, INS_OPTS_SXTW, 2);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V6, REG_SP, REG_R9, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V7, REG_R7, REG_R9, INS_OPTS_UXTW, 2);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V8, REG_R7, REG_R9, INS_OPTS_SXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V9, REG_R7, REG_R9, INS_OPTS_SXTX, 2);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V10, REG_R7, REG_R9, INS_OPTS_UXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_4BYTE, REG_V11, REG_SP, REG_R9, INS_OPTS_UXTX, 2);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V1, REG_SP, REG_R9);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V2, REG_R7, REG_R9, INS_OPTS_LSL);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V3, REG_R7, REG_R9, INS_OPTS_LSL, 4);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V4, REG_R7, REG_R9, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V5, REG_R7, REG_R9, INS_OPTS_SXTW, 4);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V6, REG_SP, REG_R9, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V7, REG_R7, REG_R9, INS_OPTS_UXTW, 4);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V8, REG_R7, REG_R9, INS_OPTS_SXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V9, REG_R7, REG_R9, INS_OPTS_SXTX, 4);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V10, REG_R7, REG_R9, INS_OPTS_UXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_16BYTE, REG_V11, REG_SP, REG_R9, INS_OPTS_UXTX, 4);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V1, REG_SP, REG_R9);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V2, REG_R7, REG_R9, INS_OPTS_LSL);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V3, REG_R7, REG_R9, INS_OPTS_LSL, 1);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V4, REG_R7, REG_R9, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V5, REG_R7, REG_R9, INS_OPTS_SXTW, 1);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V6, REG_SP, REG_R9, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V7, REG_R7, REG_R9, INS_OPTS_UXTW, 1);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V8, REG_R7, REG_R9, INS_OPTS_SXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V9, REG_R7, REG_R9, INS_OPTS_SXTX, 1);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V10, REG_R7, REG_R9, INS_OPTS_UXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_2BYTE, REG_V11, REG_SP, REG_R9, INS_OPTS_UXTX, 1);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_1BYTE, REG_V1, REG_R7, REG_R9);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_1BYTE, REG_V2, REG_SP, REG_R9, INS_OPTS_SXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_1BYTE, REG_V3, REG_R7, REG_R9, INS_OPTS_UXTW);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_1BYTE, REG_V4, REG_SP, REG_R9, INS_OPTS_SXTX);
theEmitter->emitIns_R_R_R_Ext(INS_ldr, EA_1BYTE, REG_V5, REG_R7, REG_R9, INS_OPTS_UXTX);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_R mov and aliases for mov
//
// mov vector to vector
theEmitter->emitIns_Mov(INS_mov, EA_8BYTE, REG_V0, REG_V1, /* canSkip */ false);
theEmitter->emitIns_Mov(INS_mov, EA_16BYTE, REG_V2, REG_V3, /* canSkip */ false);
theEmitter->emitIns_Mov(INS_mov, EA_4BYTE, REG_V12, REG_V13, /* canSkip */ false);
theEmitter->emitIns_Mov(INS_mov, EA_2BYTE, REG_V14, REG_V15, /* canSkip */ false);
theEmitter->emitIns_Mov(INS_mov, EA_1BYTE, REG_V16, REG_V17, /* canSkip */ false);
// mov vector to general
theEmitter->emitIns_Mov(INS_mov, EA_8BYTE, REG_R0, REG_V4, /* canSkip */ false);
theEmitter->emitIns_Mov(INS_mov, EA_4BYTE, REG_R1, REG_V5, /* canSkip */ false);
theEmitter->emitIns_Mov(INS_mov, EA_2BYTE, REG_R2, REG_V6, /* canSkip */ false);
theEmitter->emitIns_Mov(INS_mov, EA_1BYTE, REG_R3, REG_V7, /* canSkip */ false);
// mov general to vector
theEmitter->emitIns_Mov(INS_mov, EA_8BYTE, REG_V8, REG_R4, /* canSkip */ false);
theEmitter->emitIns_Mov(INS_mov, EA_4BYTE, REG_V9, REG_R5, /* canSkip */ false);
theEmitter->emitIns_Mov(INS_mov, EA_2BYTE, REG_V10, REG_R6, /* canSkip */ false);
theEmitter->emitIns_Mov(INS_mov, EA_1BYTE, REG_V11, REG_R7, /* canSkip */ false);
// mov vector[index] to vector
theEmitter->emitIns_R_R_I(INS_mov, EA_8BYTE, REG_V0, REG_V1, 1);
theEmitter->emitIns_R_R_I(INS_mov, EA_4BYTE, REG_V2, REG_V3, 3);
theEmitter->emitIns_R_R_I(INS_mov, EA_2BYTE, REG_V4, REG_V5, 7);
theEmitter->emitIns_R_R_I(INS_mov, EA_1BYTE, REG_V6, REG_V7, 15);
// mov to general from vector[index]
theEmitter->emitIns_R_R_I(INS_mov, EA_8BYTE, REG_R8, REG_V16, 1);
theEmitter->emitIns_R_R_I(INS_mov, EA_4BYTE, REG_R9, REG_V17, 2);
theEmitter->emitIns_R_R_I(INS_mov, EA_2BYTE, REG_R10, REG_V18, 3);
theEmitter->emitIns_R_R_I(INS_mov, EA_1BYTE, REG_R11, REG_V19, 4);
// mov to vector[index] from general
theEmitter->emitIns_R_R_I(INS_mov, EA_8BYTE, REG_V20, REG_R12, 1);
theEmitter->emitIns_R_R_I(INS_mov, EA_4BYTE, REG_V21, REG_R13, 2);
theEmitter->emitIns_R_R_I(INS_mov, EA_2BYTE, REG_V22, REG_R14, 6);
theEmitter->emitIns_R_R_I(INS_mov, EA_1BYTE, REG_V23, REG_R15, 8);
// mov vector[index] to vector[index2]
theEmitter->emitIns_R_R_I_I(INS_mov, EA_8BYTE, REG_V8, REG_V9, 1, 0);
theEmitter->emitIns_R_R_I_I(INS_mov, EA_4BYTE, REG_V10, REG_V11, 2, 1);
theEmitter->emitIns_R_R_I_I(INS_mov, EA_2BYTE, REG_V12, REG_V13, 5, 2);
theEmitter->emitIns_R_R_I_I(INS_mov, EA_1BYTE, REG_V14, REG_V15, 12, 3);
//////////////////////////////////////////////////////////////////////////////////
// mov/dup scalar
theEmitter->emitIns_R_R_I(INS_dup, EA_8BYTE, REG_V24, REG_V25, 1);
theEmitter->emitIns_R_R_I(INS_dup, EA_4BYTE, REG_V26, REG_V27, 3);
theEmitter->emitIns_R_R_I(INS_dup, EA_2BYTE, REG_V28, REG_V29, 7);
theEmitter->emitIns_R_R_I(INS_dup, EA_1BYTE, REG_V30, REG_V31, 15);
// mov/ins vector element
theEmitter->emitIns_R_R_I_I(INS_ins, EA_8BYTE, REG_V0, REG_V1, 0, 1);
theEmitter->emitIns_R_R_I_I(INS_ins, EA_4BYTE, REG_V2, REG_V3, 2, 2);
theEmitter->emitIns_R_R_I_I(INS_ins, EA_2BYTE, REG_V4, REG_V5, 4, 3);
theEmitter->emitIns_R_R_I_I(INS_ins, EA_1BYTE, REG_V6, REG_V7, 8, 4);
// umov to general from vector element
theEmitter->emitIns_R_R_I(INS_umov, EA_8BYTE, REG_R0, REG_V8, 1);
theEmitter->emitIns_R_R_I(INS_umov, EA_4BYTE, REG_R1, REG_V9, 2);
theEmitter->emitIns_R_R_I(INS_umov, EA_2BYTE, REG_R2, REG_V10, 4);
theEmitter->emitIns_R_R_I(INS_umov, EA_1BYTE, REG_R3, REG_V11, 8);
// ins to vector element from general
theEmitter->emitIns_R_R_I(INS_ins, EA_8BYTE, REG_V12, REG_R4, 1);
theEmitter->emitIns_R_R_I(INS_ins, EA_4BYTE, REG_V13, REG_R5, 3);
theEmitter->emitIns_R_R_I(INS_ins, EA_2BYTE, REG_V14, REG_R6, 7);
theEmitter->emitIns_R_R_I(INS_ins, EA_1BYTE, REG_V15, REG_R7, 15);
// smov to general from vector element
theEmitter->emitIns_R_R_I(INS_smov, EA_4BYTE, REG_R5, REG_V17, 2);
theEmitter->emitIns_R_R_I(INS_smov, EA_2BYTE, REG_R6, REG_V18, 4);
theEmitter->emitIns_R_R_I(INS_smov, EA_1BYTE, REG_R7, REG_V19, 8);
// ext extract vector from pair of vectors
theEmitter->emitIns_R_R_R_I(INS_ext, EA_8BYTE, REG_V0, REG_V1, REG_V2, 3, INS_OPTS_8B);
theEmitter->emitIns_R_R_R_I(INS_ext, EA_8BYTE, REG_V4, REG_V5, REG_V6, 7, INS_OPTS_8B);
theEmitter->emitIns_R_R_R_I(INS_ext, EA_16BYTE, REG_V8, REG_V9, REG_V10, 11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R_I(INS_ext, EA_16BYTE, REG_V12, REG_V13, REG_V14, 15, INS_OPTS_16B);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_I movi and mvni
//
// movi imm8 (vector)
theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V0, 0x00, INS_OPTS_8B);
theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V1, 0xFF, INS_OPTS_8B);
theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V2, 0x00, INS_OPTS_16B);
theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V3, 0xFF, INS_OPTS_16B);
theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V4, 0x007F, INS_OPTS_4H);
theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V5, 0x7F00, INS_OPTS_4H); // LSL 8
theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V6, 0x003F, INS_OPTS_8H);
theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V7, 0x3F00, INS_OPTS_8H); // LSL 8
theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V8, 0x1F, INS_OPTS_2S);
theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V9, 0x1F00, INS_OPTS_2S); // LSL 8
theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V10, 0x1F0000, INS_OPTS_2S); // LSL 16
theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V11, 0x1F000000, INS_OPTS_2S); // LSL 24
theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V12, 0x1FFF, INS_OPTS_2S); // MSL 8
theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V13, 0x1FFFFF, INS_OPTS_2S); // MSL 16
theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V14, 0x37, INS_OPTS_4S);
theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V15, 0x3700, INS_OPTS_4S); // LSL 8
theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V16, 0x370000, INS_OPTS_4S); // LSL 16
theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V17, 0x37000000, INS_OPTS_4S); // LSL 24
theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V18, 0x37FF, INS_OPTS_4S); // MSL 8
theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V19, 0x37FFFF, INS_OPTS_4S); // MSL 16
theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V20, 0xFF80, INS_OPTS_4H); // mvni
theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V21, 0xFFC0, INS_OPTS_8H); // mvni
theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V22, 0xFFFFFFE0, INS_OPTS_2S); // mvni
theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V23, 0xFFFFF0FF, INS_OPTS_4S); // mvni LSL 8
theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V24, 0xFFF8FFFF, INS_OPTS_2S); // mvni LSL 16
theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V25, 0xFCFFFFFF, INS_OPTS_4S); // mvni LSL 24
theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V26, 0xFFFFFE00, INS_OPTS_2S); // mvni MSL 8
theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V27, 0xFFFC0000, INS_OPTS_4S); // mvni MSL 16
theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V28, 0x00FF00FF00FF00FF, INS_OPTS_1D);
theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V29, 0x00FFFF0000FFFF00, INS_OPTS_2D);
theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V30, 0xFF000000FF000000);
theEmitter->emitIns_R_I(INS_movi, EA_16BYTE, REG_V31, 0x0, INS_OPTS_2D);
// We were not encoding immediate of movi that was int.MaxValue or int.MaxValue / 2.
theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V16, 0x7fffffff, INS_OPTS_2S);
theEmitter->emitIns_R_I(INS_movi, EA_8BYTE, REG_V16, 0x3fffffff, INS_OPTS_2S);
theEmitter->emitIns_R_I(INS_mvni, EA_8BYTE, REG_V0, 0x0022, INS_OPTS_4H);
theEmitter->emitIns_R_I(INS_mvni, EA_8BYTE, REG_V1, 0x2200, INS_OPTS_4H); // LSL 8
theEmitter->emitIns_R_I(INS_mvni, EA_16BYTE, REG_V2, 0x0033, INS_OPTS_8H);
theEmitter->emitIns_R_I(INS_mvni, EA_16BYTE, REG_V3, 0x3300, INS_OPTS_8H); // LSL 8
theEmitter->emitIns_R_I(INS_mvni, EA_8BYTE, REG_V4, 0x42, INS_OPTS_2S);
theEmitter->emitIns_R_I(INS_mvni, EA_8BYTE, REG_V5, 0x4200, INS_OPTS_2S); // LSL 8
theEmitter->emitIns_R_I(INS_mvni, EA_8BYTE, REG_V6, 0x420000, INS_OPTS_2S); // LSL 16
theEmitter->emitIns_R_I(INS_mvni, EA_8BYTE, REG_V7, 0x42000000, INS_OPTS_2S); // LSL 24
theEmitter->emitIns_R_I(INS_mvni, EA_8BYTE, REG_V8, 0x42FF, INS_OPTS_2S); // MSL 8
theEmitter->emitIns_R_I(INS_mvni, EA_8BYTE, REG_V9, 0x42FFFF, INS_OPTS_2S); // MSL 16
theEmitter->emitIns_R_I(INS_mvni, EA_16BYTE, REG_V10, 0x5D, INS_OPTS_4S);
theEmitter->emitIns_R_I(INS_mvni, EA_16BYTE, REG_V11, 0x5D00, INS_OPTS_4S); // LSL 8
theEmitter->emitIns_R_I(INS_mvni, EA_16BYTE, REG_V12, 0x5D0000, INS_OPTS_4S); // LSL 16
theEmitter->emitIns_R_I(INS_mvni, EA_16BYTE, REG_V13, 0x5D000000, INS_OPTS_4S); // LSL 24
theEmitter->emitIns_R_I(INS_mvni, EA_16BYTE, REG_V14, 0x5DFF, INS_OPTS_4S); // MSL 8
theEmitter->emitIns_R_I(INS_mvni, EA_16BYTE, REG_V15, 0x5DFFFF, INS_OPTS_4S); // MSL 16
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_I orr/bic vector immediate
//
theEmitter->emitIns_R_I(INS_orr, EA_8BYTE, REG_V0, 0x0022, INS_OPTS_4H);
theEmitter->emitIns_R_I(INS_orr, EA_8BYTE, REG_V1, 0x2200, INS_OPTS_4H); // LSL 8
theEmitter->emitIns_R_I(INS_orr, EA_16BYTE, REG_V2, 0x0033, INS_OPTS_8H);
theEmitter->emitIns_R_I(INS_orr, EA_16BYTE, REG_V3, 0x3300, INS_OPTS_8H); // LSL 8
theEmitter->emitIns_R_I(INS_orr, EA_8BYTE, REG_V4, 0x42, INS_OPTS_2S);
theEmitter->emitIns_R_I(INS_orr, EA_8BYTE, REG_V5, 0x4200, INS_OPTS_2S); // LSL 8
theEmitter->emitIns_R_I(INS_orr, EA_8BYTE, REG_V6, 0x420000, INS_OPTS_2S); // LSL 16
theEmitter->emitIns_R_I(INS_orr, EA_8BYTE, REG_V7, 0x42000000, INS_OPTS_2S); // LSL 24
theEmitter->emitIns_R_I(INS_orr, EA_16BYTE, REG_V10, 0x5D, INS_OPTS_4S);
theEmitter->emitIns_R_I(INS_orr, EA_16BYTE, REG_V11, 0x5D00, INS_OPTS_4S); // LSL 8
theEmitter->emitIns_R_I(INS_orr, EA_16BYTE, REG_V12, 0x5D0000, INS_OPTS_4S); // LSL 16
theEmitter->emitIns_R_I(INS_orr, EA_16BYTE, REG_V13, 0x5D000000, INS_OPTS_4S); // LSL 24
theEmitter->emitIns_R_I(INS_bic, EA_8BYTE, REG_V0, 0x0022, INS_OPTS_4H);
theEmitter->emitIns_R_I(INS_bic, EA_8BYTE, REG_V1, 0x2200, INS_OPTS_4H); // LSL 8
theEmitter->emitIns_R_I(INS_bic, EA_16BYTE, REG_V2, 0x0033, INS_OPTS_8H);
theEmitter->emitIns_R_I(INS_bic, EA_16BYTE, REG_V3, 0x3300, INS_OPTS_8H); // LSL 8
theEmitter->emitIns_R_I(INS_bic, EA_8BYTE, REG_V4, 0x42, INS_OPTS_2S);
theEmitter->emitIns_R_I(INS_bic, EA_8BYTE, REG_V5, 0x4200, INS_OPTS_2S); // LSL 8
theEmitter->emitIns_R_I(INS_bic, EA_8BYTE, REG_V6, 0x420000, INS_OPTS_2S); // LSL 16
theEmitter->emitIns_R_I(INS_bic, EA_8BYTE, REG_V7, 0x42000000, INS_OPTS_2S); // LSL 24
theEmitter->emitIns_R_I(INS_bic, EA_16BYTE, REG_V10, 0x5D, INS_OPTS_4S);
theEmitter->emitIns_R_I(INS_bic, EA_16BYTE, REG_V11, 0x5D00, INS_OPTS_4S); // LSL 8
theEmitter->emitIns_R_I(INS_bic, EA_16BYTE, REG_V12, 0x5D0000, INS_OPTS_4S); // LSL 16
theEmitter->emitIns_R_I(INS_bic, EA_16BYTE, REG_V13, 0x5D000000, INS_OPTS_4S); // LSL 24
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_F cmp/fmov immediate
//
// fmov imm8 (scalar)
theEmitter->emitIns_R_F(INS_fmov, EA_8BYTE, REG_V14, 1.0);
theEmitter->emitIns_R_F(INS_fmov, EA_4BYTE, REG_V15, -1.0);
theEmitter->emitIns_R_F(INS_fmov, EA_4BYTE, REG_V0, 2.0); // encodes imm8 == 0
theEmitter->emitIns_R_F(INS_fmov, EA_4BYTE, REG_V16, 10.0);
theEmitter->emitIns_R_F(INS_fmov, EA_8BYTE, REG_V17, -10.0);
theEmitter->emitIns_R_F(INS_fmov, EA_8BYTE, REG_V18, 31); // Largest encodable value
theEmitter->emitIns_R_F(INS_fmov, EA_4BYTE, REG_V19, -31);
theEmitter->emitIns_R_F(INS_fmov, EA_4BYTE, REG_V20, 1.25);
theEmitter->emitIns_R_F(INS_fmov, EA_8BYTE, REG_V21, -1.25);
theEmitter->emitIns_R_F(INS_fmov, EA_8BYTE, REG_V22, 0.125); // Smallest encodable value
theEmitter->emitIns_R_F(INS_fmov, EA_4BYTE, REG_V23, -0.125);
// fmov imm8 (vector)
theEmitter->emitIns_R_F(INS_fmov, EA_8BYTE, REG_V0, 2.0, INS_OPTS_2S);
theEmitter->emitIns_R_F(INS_fmov, EA_8BYTE, REG_V24, 1.0, INS_OPTS_2S);
theEmitter->emitIns_R_F(INS_fmov, EA_16BYTE, REG_V25, 1.0, INS_OPTS_4S);
theEmitter->emitIns_R_F(INS_fmov, EA_16BYTE, REG_V26, 1.0, INS_OPTS_2D);
theEmitter->emitIns_R_F(INS_fmov, EA_8BYTE, REG_V27, -10.0, INS_OPTS_2S);
theEmitter->emitIns_R_F(INS_fmov, EA_16BYTE, REG_V28, -10.0, INS_OPTS_4S);
theEmitter->emitIns_R_F(INS_fmov, EA_16BYTE, REG_V29, -10.0, INS_OPTS_2D);
theEmitter->emitIns_R_F(INS_fmov, EA_8BYTE, REG_V30, 31.0, INS_OPTS_2S);
theEmitter->emitIns_R_F(INS_fmov, EA_16BYTE, REG_V31, 31.0, INS_OPTS_4S);
theEmitter->emitIns_R_F(INS_fmov, EA_16BYTE, REG_V0, 31.0, INS_OPTS_2D);
theEmitter->emitIns_R_F(INS_fmov, EA_8BYTE, REG_V1, -0.125, INS_OPTS_2S);
theEmitter->emitIns_R_F(INS_fmov, EA_16BYTE, REG_V2, -0.125, INS_OPTS_4S);
theEmitter->emitIns_R_F(INS_fmov, EA_16BYTE, REG_V3, -0.125, INS_OPTS_2D);
// fcmp with 0.0
theEmitter->emitIns_R_F(INS_fcmp, EA_8BYTE, REG_V12, 0.0);
theEmitter->emitIns_R_F(INS_fcmp, EA_4BYTE, REG_V13, 0.0);
theEmitter->emitIns_R_F(INS_fcmpe, EA_8BYTE, REG_V14, 0.0);
theEmitter->emitIns_R_F(INS_fcmpe, EA_4BYTE, REG_V15, 0.0);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_R cmeq/fmov/fcmp/fcvt
//
// cmeq scalar
theEmitter->emitIns_R_R(INS_cmeq, EA_8BYTE, REG_V0, REG_V1);
// fmov to vector to vector
theEmitter->emitIns_Mov(INS_fmov, EA_8BYTE, REG_V0, REG_V2, /* canSkip */ false);
theEmitter->emitIns_Mov(INS_fmov, EA_4BYTE, REG_V1, REG_V3, /* canSkip */ false);
// fmov to vector to general
theEmitter->emitIns_Mov(INS_fmov, EA_8BYTE, REG_R0, REG_V4, /* canSkip */ false);
theEmitter->emitIns_Mov(INS_fmov, EA_4BYTE, REG_R1, REG_V5, /* canSkip */ false);
// using the optional conversion specifier
theEmitter->emitIns_Mov(INS_fmov, EA_8BYTE, REG_R2, REG_V6, /* canSkip */ false, INS_OPTS_D_TO_8BYTE);
theEmitter->emitIns_Mov(INS_fmov, EA_4BYTE, REG_R3, REG_V7, /* canSkip */ false, INS_OPTS_S_TO_4BYTE);
// fmov to general to vector
theEmitter->emitIns_Mov(INS_fmov, EA_8BYTE, REG_V8, REG_R4, /* canSkip */ false);
theEmitter->emitIns_Mov(INS_fmov, EA_4BYTE, REG_V9, REG_R5, /* canSkip */ false);
// using the optional conversion specifier
theEmitter->emitIns_Mov(INS_fmov, EA_4BYTE, REG_V11, REG_R7, /* canSkip */ false, INS_OPTS_4BYTE_TO_S);
theEmitter->emitIns_Mov(INS_fmov, EA_8BYTE, REG_V10, REG_R6, /* canSkip */ false, INS_OPTS_8BYTE_TO_D);
// fcmp/fcmpe
theEmitter->emitIns_R_R(INS_fcmp, EA_8BYTE, REG_V8, REG_V16);
theEmitter->emitIns_R_R(INS_fcmp, EA_4BYTE, REG_V9, REG_V17);
theEmitter->emitIns_R_R(INS_fcmpe, EA_8BYTE, REG_V10, REG_V18);
theEmitter->emitIns_R_R(INS_fcmpe, EA_4BYTE, REG_V11, REG_V19);
// fcvt
theEmitter->emitIns_R_R(INS_fcvt, EA_8BYTE, REG_V24, REG_V25, INS_OPTS_S_TO_D); // Single to Double
theEmitter->emitIns_R_R(INS_fcvt, EA_4BYTE, REG_V26, REG_V27, INS_OPTS_D_TO_S); // Double to Single
theEmitter->emitIns_R_R(INS_fcvt, EA_4BYTE, REG_V1, REG_V2, INS_OPTS_H_TO_S);
theEmitter->emitIns_R_R(INS_fcvt, EA_8BYTE, REG_V3, REG_V4, INS_OPTS_H_TO_D);
theEmitter->emitIns_R_R(INS_fcvt, EA_2BYTE, REG_V5, REG_V6, INS_OPTS_S_TO_H);
theEmitter->emitIns_R_R(INS_fcvt, EA_2BYTE, REG_V7, REG_V8, INS_OPTS_D_TO_H);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_R floating point conversions
//
// fcvtas scalar
theEmitter->emitIns_R_R(INS_fcvtas, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_fcvtas, EA_8BYTE, REG_V2, REG_V3);
// fcvtas scalar to general
theEmitter->emitIns_R_R(INS_fcvtas, EA_4BYTE, REG_R0, REG_V4, INS_OPTS_S_TO_4BYTE);
theEmitter->emitIns_R_R(INS_fcvtas, EA_4BYTE, REG_R1, REG_V5, INS_OPTS_D_TO_4BYTE);
theEmitter->emitIns_R_R(INS_fcvtas, EA_8BYTE, REG_R2, REG_V6, INS_OPTS_S_TO_8BYTE);
theEmitter->emitIns_R_R(INS_fcvtas, EA_8BYTE, REG_R3, REG_V7, INS_OPTS_D_TO_8BYTE);
// fcvtas vector
theEmitter->emitIns_R_R(INS_fcvtas, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fcvtas, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_fcvtas, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
// fcvtau scalar
theEmitter->emitIns_R_R(INS_fcvtau, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_fcvtau, EA_8BYTE, REG_V2, REG_V3);
// fcvtau scalar to general
theEmitter->emitIns_R_R(INS_fcvtau, EA_4BYTE, REG_R0, REG_V4, INS_OPTS_S_TO_4BYTE);
theEmitter->emitIns_R_R(INS_fcvtau, EA_4BYTE, REG_R1, REG_V5, INS_OPTS_D_TO_4BYTE);
theEmitter->emitIns_R_R(INS_fcvtau, EA_8BYTE, REG_R2, REG_V6, INS_OPTS_S_TO_8BYTE);
theEmitter->emitIns_R_R(INS_fcvtau, EA_8BYTE, REG_R3, REG_V7, INS_OPTS_D_TO_8BYTE);
// fcvtau vector
theEmitter->emitIns_R_R(INS_fcvtau, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fcvtau, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_fcvtau, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
////////////////////////////////////////////////////////////////////////////////
// fcvtms scalar
theEmitter->emitIns_R_R(INS_fcvtms, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_fcvtms, EA_8BYTE, REG_V2, REG_V3);
// fcvtms scalar to general
theEmitter->emitIns_R_R(INS_fcvtms, EA_4BYTE, REG_R0, REG_V4, INS_OPTS_S_TO_4BYTE);
theEmitter->emitIns_R_R(INS_fcvtms, EA_4BYTE, REG_R1, REG_V5, INS_OPTS_D_TO_4BYTE);
theEmitter->emitIns_R_R(INS_fcvtms, EA_8BYTE, REG_R2, REG_V6, INS_OPTS_S_TO_8BYTE);
theEmitter->emitIns_R_R(INS_fcvtms, EA_8BYTE, REG_R3, REG_V7, INS_OPTS_D_TO_8BYTE);
// fcvtms vector
theEmitter->emitIns_R_R(INS_fcvtms, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fcvtms, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_fcvtms, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
// fcvtmu scalar
theEmitter->emitIns_R_R(INS_fcvtmu, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_fcvtmu, EA_8BYTE, REG_V2, REG_V3);
// fcvtmu scalar to general
theEmitter->emitIns_R_R(INS_fcvtmu, EA_4BYTE, REG_R0, REG_V4, INS_OPTS_S_TO_4BYTE);
theEmitter->emitIns_R_R(INS_fcvtmu, EA_4BYTE, REG_R1, REG_V5, INS_OPTS_D_TO_4BYTE);
theEmitter->emitIns_R_R(INS_fcvtmu, EA_8BYTE, REG_R2, REG_V6, INS_OPTS_S_TO_8BYTE);
theEmitter->emitIns_R_R(INS_fcvtmu, EA_8BYTE, REG_R3, REG_V7, INS_OPTS_D_TO_8BYTE);
// fcvtmu vector
theEmitter->emitIns_R_R(INS_fcvtmu, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fcvtmu, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_fcvtmu, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
////////////////////////////////////////////////////////////////////////////////
// fcvtns scalar
theEmitter->emitIns_R_R(INS_fcvtns, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_fcvtns, EA_8BYTE, REG_V2, REG_V3);
// fcvtns scalar to general
theEmitter->emitIns_R_R(INS_fcvtns, EA_4BYTE, REG_R0, REG_V4, INS_OPTS_S_TO_4BYTE);
theEmitter->emitIns_R_R(INS_fcvtns, EA_4BYTE, REG_R1, REG_V5, INS_OPTS_D_TO_4BYTE);
theEmitter->emitIns_R_R(INS_fcvtns, EA_8BYTE, REG_R2, REG_V6, INS_OPTS_S_TO_8BYTE);
theEmitter->emitIns_R_R(INS_fcvtns, EA_8BYTE, REG_R3, REG_V7, INS_OPTS_D_TO_8BYTE);
// fcvtns vector
theEmitter->emitIns_R_R(INS_fcvtns, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fcvtns, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_fcvtns, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
// fcvtnu scalar
theEmitter->emitIns_R_R(INS_fcvtnu, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_fcvtnu, EA_8BYTE, REG_V2, REG_V3);
// fcvtnu scalar to general
theEmitter->emitIns_R_R(INS_fcvtnu, EA_4BYTE, REG_R0, REG_V4, INS_OPTS_S_TO_4BYTE);
theEmitter->emitIns_R_R(INS_fcvtnu, EA_4BYTE, REG_R1, REG_V5, INS_OPTS_D_TO_4BYTE);
theEmitter->emitIns_R_R(INS_fcvtnu, EA_8BYTE, REG_R2, REG_V6, INS_OPTS_S_TO_8BYTE);
theEmitter->emitIns_R_R(INS_fcvtnu, EA_8BYTE, REG_R3, REG_V7, INS_OPTS_D_TO_8BYTE);
// fcvtnu vector
theEmitter->emitIns_R_R(INS_fcvtnu, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fcvtnu, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_fcvtnu, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
////////////////////////////////////////////////////////////////////////////////
// fcvtps scalar
theEmitter->emitIns_R_R(INS_fcvtps, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_fcvtps, EA_8BYTE, REG_V2, REG_V3);
// fcvtps scalar to general
theEmitter->emitIns_R_R(INS_fcvtps, EA_4BYTE, REG_R0, REG_V4, INS_OPTS_S_TO_4BYTE);
theEmitter->emitIns_R_R(INS_fcvtps, EA_4BYTE, REG_R1, REG_V5, INS_OPTS_D_TO_4BYTE);
theEmitter->emitIns_R_R(INS_fcvtps, EA_8BYTE, REG_R2, REG_V6, INS_OPTS_S_TO_8BYTE);
theEmitter->emitIns_R_R(INS_fcvtps, EA_8BYTE, REG_R3, REG_V7, INS_OPTS_D_TO_8BYTE);
// fcvtps vector
theEmitter->emitIns_R_R(INS_fcvtps, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fcvtps, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_fcvtps, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
// fcvtpu scalar
theEmitter->emitIns_R_R(INS_fcvtpu, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_fcvtpu, EA_8BYTE, REG_V2, REG_V3);
// fcvtpu scalar to general
theEmitter->emitIns_R_R(INS_fcvtpu, EA_4BYTE, REG_R0, REG_V4, INS_OPTS_S_TO_4BYTE);
theEmitter->emitIns_R_R(INS_fcvtpu, EA_4BYTE, REG_R1, REG_V5, INS_OPTS_D_TO_4BYTE);
theEmitter->emitIns_R_R(INS_fcvtpu, EA_8BYTE, REG_R2, REG_V6, INS_OPTS_S_TO_8BYTE);
theEmitter->emitIns_R_R(INS_fcvtpu, EA_8BYTE, REG_R3, REG_V7, INS_OPTS_D_TO_8BYTE);
// fcvtpu vector
theEmitter->emitIns_R_R(INS_fcvtpu, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fcvtpu, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_fcvtpu, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
////////////////////////////////////////////////////////////////////////////////
// fcvtzs scalar
theEmitter->emitIns_R_R(INS_fcvtzs, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_fcvtzs, EA_8BYTE, REG_V2, REG_V3);
// fcvtzs scalar to general
theEmitter->emitIns_R_R(INS_fcvtzs, EA_4BYTE, REG_R0, REG_V4, INS_OPTS_S_TO_4BYTE);
theEmitter->emitIns_R_R(INS_fcvtzs, EA_4BYTE, REG_R1, REG_V5, INS_OPTS_D_TO_4BYTE);
theEmitter->emitIns_R_R(INS_fcvtzs, EA_8BYTE, REG_R2, REG_V6, INS_OPTS_S_TO_8BYTE);
theEmitter->emitIns_R_R(INS_fcvtzs, EA_8BYTE, REG_R3, REG_V7, INS_OPTS_D_TO_8BYTE);
// fcvtzs vector
theEmitter->emitIns_R_R(INS_fcvtzs, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fcvtzs, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_fcvtzs, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
// fcvtzu scalar
theEmitter->emitIns_R_R(INS_fcvtzu, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_fcvtzu, EA_8BYTE, REG_V2, REG_V3);
// fcvtzu scalar to general
theEmitter->emitIns_R_R(INS_fcvtzu, EA_4BYTE, REG_R0, REG_V4, INS_OPTS_S_TO_4BYTE);
theEmitter->emitIns_R_R(INS_fcvtzu, EA_4BYTE, REG_R1, REG_V5, INS_OPTS_D_TO_4BYTE);
theEmitter->emitIns_R_R(INS_fcvtzu, EA_8BYTE, REG_R2, REG_V6, INS_OPTS_S_TO_8BYTE);
theEmitter->emitIns_R_R(INS_fcvtzu, EA_8BYTE, REG_R3, REG_V7, INS_OPTS_D_TO_8BYTE);
// fcvtzu vector
theEmitter->emitIns_R_R(INS_fcvtzu, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fcvtzu, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_fcvtzu, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
////////////////////////////////////////////////////////////////////////////////
// scvtf scalar
theEmitter->emitIns_R_R(INS_scvtf, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_scvtf, EA_8BYTE, REG_V2, REG_V3);
// scvtf scalar from general
theEmitter->emitIns_R_R(INS_scvtf, EA_4BYTE, REG_V4, REG_R0, INS_OPTS_4BYTE_TO_S);
theEmitter->emitIns_R_R(INS_scvtf, EA_4BYTE, REG_V5, REG_R1, INS_OPTS_8BYTE_TO_S);
theEmitter->emitIns_R_R(INS_scvtf, EA_8BYTE, REG_V6, REG_R2, INS_OPTS_4BYTE_TO_D);
theEmitter->emitIns_R_R(INS_scvtf, EA_8BYTE, REG_V7, REG_R3, INS_OPTS_8BYTE_TO_D);
// scvtf vector
theEmitter->emitIns_R_R(INS_scvtf, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_scvtf, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_scvtf, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
// ucvtf scalar
theEmitter->emitIns_R_R(INS_ucvtf, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_ucvtf, EA_8BYTE, REG_V2, REG_V3);
// ucvtf scalar from general
theEmitter->emitIns_R_R(INS_ucvtf, EA_4BYTE, REG_V4, REG_R0, INS_OPTS_4BYTE_TO_S);
theEmitter->emitIns_R_R(INS_ucvtf, EA_4BYTE, REG_V5, REG_R1, INS_OPTS_8BYTE_TO_S);
theEmitter->emitIns_R_R(INS_ucvtf, EA_8BYTE, REG_V6, REG_R2, INS_OPTS_4BYTE_TO_D);
theEmitter->emitIns_R_R(INS_ucvtf, EA_8BYTE, REG_V7, REG_R3, INS_OPTS_8BYTE_TO_D);
// ucvtf vector
theEmitter->emitIns_R_R(INS_ucvtf, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_ucvtf, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_ucvtf, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_R floating point operations, one dest, one source
//
// fabs scalar
theEmitter->emitIns_R_R(INS_fabs, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_fabs, EA_8BYTE, REG_V2, REG_V3);
// fabs vector
theEmitter->emitIns_R_R(INS_fabs, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fabs, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_fabs, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_2D);
// fmaxp scalar
theEmitter->emitIns_R_R(INS_fmaxp, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fmaxp, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_2D);
// fmaxnmp scalar
theEmitter->emitIns_R_R(INS_fmaxnmp, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fmaxnmp, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_2D);
// fmaxnmv vector
theEmitter->emitIns_R_R(INS_fmaxnmv, EA_16BYTE, REG_V0, REG_V1, INS_OPTS_4S);
// fmaxv vector
theEmitter->emitIns_R_R(INS_fmaxv, EA_16BYTE, REG_V0, REG_V1, INS_OPTS_4S);
// fminp scalar
theEmitter->emitIns_R_R(INS_fminp, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fminp, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_2D);
// fminnmp scalar
theEmitter->emitIns_R_R(INS_fminnmp, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fminnmp, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_2D);
// fminnmv vector
theEmitter->emitIns_R_R(INS_fminnmv, EA_16BYTE, REG_V0, REG_V1, INS_OPTS_4S);
// fminv vector
theEmitter->emitIns_R_R(INS_fminv, EA_16BYTE, REG_V0, REG_V1, INS_OPTS_4S);
// fneg scalar
theEmitter->emitIns_R_R(INS_fneg, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_fneg, EA_8BYTE, REG_V2, REG_V3);
// fneg vector
theEmitter->emitIns_R_R(INS_fneg, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fneg, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_fneg, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_2D);
// fsqrt scalar
theEmitter->emitIns_R_R(INS_fsqrt, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_fsqrt, EA_8BYTE, REG_V2, REG_V3);
// fsqrt vector
theEmitter->emitIns_R_R(INS_fsqrt, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fsqrt, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_fsqrt, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_2D);
// faddp scalar
theEmitter->emitIns_R_R(INS_faddp, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_faddp, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_2D);
// fcmeq Vd, Vn, #0.0
theEmitter->emitIns_R_R(INS_fcmeq, EA_4BYTE, REG_V0, REG_V1); // scalar 4BYTE
theEmitter->emitIns_R_R(INS_fcmeq, EA_8BYTE, REG_V2, REG_V3); // scalar 8BYTE
// fcmge Vd, Vn, #0.0
theEmitter->emitIns_R_R(INS_fcmge, EA_4BYTE, REG_V0, REG_V1); // scalar 4BYTE
theEmitter->emitIns_R_R(INS_fcmge, EA_8BYTE, REG_V2, REG_V3); // scalar 8BYTE
// fcmgt Vd, Vn, #0.0
theEmitter->emitIns_R_R(INS_fcmgt, EA_4BYTE, REG_V0, REG_V1); // scalar 4BYTE
theEmitter->emitIns_R_R(INS_fcmgt, EA_8BYTE, REG_V2, REG_V3); // scalar 8BYTE
// fcmle Vd, Vn, #0.0
theEmitter->emitIns_R_R(INS_fcmle, EA_4BYTE, REG_V0, REG_V1); // scalar 4BYTE
theEmitter->emitIns_R_R(INS_fcmle, EA_8BYTE, REG_V2, REG_V3); // scalar 8BYTE
// fcmlt Vd, Vn, #0.0
theEmitter->emitIns_R_R(INS_fcmlt, EA_4BYTE, REG_V0, REG_V1); // scalar 4BYTE
theEmitter->emitIns_R_R(INS_fcmlt, EA_8BYTE, REG_V2, REG_V3); // scalar 8BYTE
// frecpe scalar
theEmitter->emitIns_R_R(INS_frecpe, EA_4BYTE, REG_V0, REG_V1); // scalar 4BYTE
theEmitter->emitIns_R_R(INS_frecpe, EA_8BYTE, REG_V2, REG_V3); // scalar 8BYTE
theEmitter->emitIns_R_R(INS_frecpe, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_frecpe, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_frecpe, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_2D);
// frecpx scalar
theEmitter->emitIns_R_R(INS_frecpx, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_frecpx, EA_8BYTE, REG_V2, REG_V3);
// frsqrte
theEmitter->emitIns_R_R(INS_frsqrte, EA_4BYTE, REG_V0, REG_V1); // scalar 4BYTE
theEmitter->emitIns_R_R(INS_frsqrte, EA_8BYTE, REG_V2, REG_V3); // scalar 8BYTE
theEmitter->emitIns_R_R(INS_frsqrte, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_frsqrte, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_frsqrte, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_2D);
// fcvtl{2} vector
theEmitter->emitIns_R_R(INS_fcvtl, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_fcvtl2, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_fcvtl, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fcvtl2, EA_16BYTE, REG_V5, REG_V6, INS_OPTS_4S);
// fcvtn{2} vector
theEmitter->emitIns_R_R(INS_fcvtn, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_fcvtn2, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_fcvtn, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fcvtn2, EA_16BYTE, REG_V5, REG_V6, INS_OPTS_4S);
// fcvtxn scalar
theEmitter->emitIns_R_R(INS_fcvtxn, EA_4BYTE, REG_V0, REG_V1);
// fcvtxn{2} vector
theEmitter->emitIns_R_R(INS_fcvtxn, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_fcvtxn2, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_4S);
#endif
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
genDefineTempLabel(genCreateTempLabel());
// abs scalar
theEmitter->emitIns_R_R(INS_abs, EA_8BYTE, REG_V2, REG_V3);
// abs vector
theEmitter->emitIns_R_R(INS_abs, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_abs, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_abs, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_abs, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_abs, EA_8BYTE, REG_V12, REG_V13, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_abs, EA_16BYTE, REG_V14, REG_V15, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_abs, EA_16BYTE, REG_V16, REG_V17, INS_OPTS_2D);
// addv vector
theEmitter->emitIns_R_R(INS_addv, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_addv, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_addv, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_addv, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_addv, EA_16BYTE, REG_V14, REG_V15, INS_OPTS_4S);
// cnt vector
theEmitter->emitIns_R_R(INS_cnt, EA_8BYTE, REG_V22, REG_V23, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_cnt, EA_16BYTE, REG_V24, REG_V25, INS_OPTS_16B);
// cls vector
theEmitter->emitIns_R_R(INS_cls, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_cls, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_cls, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_cls, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_cls, EA_8BYTE, REG_V12, REG_V13, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_cls, EA_16BYTE, REG_V14, REG_V15, INS_OPTS_4S);
// clz vector
theEmitter->emitIns_R_R(INS_clz, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_clz, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_clz, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_clz, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_clz, EA_8BYTE, REG_V12, REG_V13, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_clz, EA_16BYTE, REG_V14, REG_V15, INS_OPTS_4S);
// mvn vector
theEmitter->emitIns_R_R(INS_mvn, EA_8BYTE, REG_V4, REG_V5);
theEmitter->emitIns_R_R(INS_mvn, EA_8BYTE, REG_V6, REG_V7, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_mvn, EA_16BYTE, REG_V8, REG_V9);
theEmitter->emitIns_R_R(INS_mvn, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_16B);
// neg scalar
theEmitter->emitIns_R_R(INS_neg, EA_8BYTE, REG_V2, REG_V3);
// neg vector
theEmitter->emitIns_R_R(INS_neg, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_neg, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_neg, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_neg, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_neg, EA_8BYTE, REG_V12, REG_V13, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_neg, EA_16BYTE, REG_V14, REG_V15, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_neg, EA_16BYTE, REG_V16, REG_V17, INS_OPTS_2D);
// not vector (the same encoding as mvn)
theEmitter->emitIns_R_R(INS_not, EA_8BYTE, REG_V12, REG_V13);
theEmitter->emitIns_R_R(INS_not, EA_8BYTE, REG_V14, REG_V15, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_not, EA_16BYTE, REG_V16, REG_V17);
theEmitter->emitIns_R_R(INS_not, EA_16BYTE, REG_V18, REG_V19, INS_OPTS_16B);
// rbit vector
theEmitter->emitIns_R_R(INS_rbit, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_rbit, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_16B);
// rev16 vector
theEmitter->emitIns_R_R(INS_rev16, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_rev16, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_16B);
// rev32 vector
theEmitter->emitIns_R_R(INS_rev32, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_rev32, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_rev32, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_rev32, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_8H);
// rev64 vector
theEmitter->emitIns_R_R(INS_rev64, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_rev64, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_rev64, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_rev64, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_rev64, EA_8BYTE, REG_V12, REG_V13, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_rev64, EA_16BYTE, REG_V14, REG_V15, INS_OPTS_4S);
// sadalp vector
theEmitter->emitIns_R_R(INS_sadalp, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_sadalp, EA_8BYTE, REG_V2, REG_V3, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_sadalp, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_sadalp, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_sadalp, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_sadalp, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
// saddlp vector
theEmitter->emitIns_R_R(INS_saddlp, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_saddlp, EA_8BYTE, REG_V2, REG_V3, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_saddlp, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_saddlp, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_saddlp, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_saddlp, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
// saddlv vector
theEmitter->emitIns_R_R(INS_saddlv, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_saddlv, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_saddlv, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_saddlv, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_saddlv, EA_16BYTE, REG_V14, REG_V15, INS_OPTS_4S);
// smaxv vector
theEmitter->emitIns_R_R(INS_smaxv, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_smaxv, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_smaxv, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_smaxv, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_smaxv, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_4S);
// sminv vector
theEmitter->emitIns_R_R(INS_sminv, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_sminv, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_sminv, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_sminv, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_sminv, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_4S);
// sqabs scalar
theEmitter->emitIns_R_R(INS_sqabs, EA_1BYTE, REG_V0, REG_V1, INS_OPTS_NONE);
theEmitter->emitIns_R_R(INS_sqabs, EA_2BYTE, REG_V2, REG_V3, INS_OPTS_NONE);
theEmitter->emitIns_R_R(INS_sqabs, EA_4BYTE, REG_V4, REG_V5, INS_OPTS_NONE);
theEmitter->emitIns_R_R(INS_sqabs, EA_8BYTE, REG_V6, REG_V7, INS_OPTS_NONE);
// sqabs vector
theEmitter->emitIns_R_R(INS_sqabs, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_sqabs, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_sqabs, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_sqabs, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_sqabs, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_sqabs, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_sqabs, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
// sqneg scalar
theEmitter->emitIns_R_R(INS_sqneg, EA_1BYTE, REG_V0, REG_V1, INS_OPTS_NONE);
theEmitter->emitIns_R_R(INS_sqneg, EA_2BYTE, REG_V2, REG_V3, INS_OPTS_NONE);
theEmitter->emitIns_R_R(INS_sqneg, EA_4BYTE, REG_V4, REG_V5, INS_OPTS_NONE);
theEmitter->emitIns_R_R(INS_sqneg, EA_8BYTE, REG_V6, REG_V7, INS_OPTS_NONE);
// sqneg vector
theEmitter->emitIns_R_R(INS_sqneg, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_sqneg, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_sqneg, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_sqneg, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_sqneg, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_sqneg, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_sqneg, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
// sqxtn scalar
theEmitter->emitIns_R_R(INS_sqxtn, EA_1BYTE, REG_V0, REG_V1, INS_OPTS_NONE);
theEmitter->emitIns_R_R(INS_sqxtn, EA_2BYTE, REG_V2, REG_V3, INS_OPTS_NONE);
theEmitter->emitIns_R_R(INS_sqxtn, EA_4BYTE, REG_V4, REG_V5, INS_OPTS_NONE);
// sqxtn vector
theEmitter->emitIns_R_R(INS_sqxtn, EA_8BYTE, REG_V0, REG_V6, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_sqxtn, EA_8BYTE, REG_V1, REG_V7, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_sqxtn, EA_8BYTE, REG_V2, REG_V8, INS_OPTS_2S);
// sqxtn2 vector
theEmitter->emitIns_R_R(INS_sqxtn2, EA_16BYTE, REG_V3, REG_V9, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_sqxtn2, EA_16BYTE, REG_V4, REG_V10, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_sqxtn2, EA_16BYTE, REG_V5, REG_V11, INS_OPTS_4S);
// sqxtun scalar
theEmitter->emitIns_R_R(INS_sqxtun, EA_1BYTE, REG_V0, REG_V1, INS_OPTS_NONE);
theEmitter->emitIns_R_R(INS_sqxtun, EA_2BYTE, REG_V2, REG_V3, INS_OPTS_NONE);
theEmitter->emitIns_R_R(INS_sqxtun, EA_4BYTE, REG_V4, REG_V5, INS_OPTS_NONE);
// sqxtun vector
theEmitter->emitIns_R_R(INS_sqxtun, EA_8BYTE, REG_V0, REG_V6, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_sqxtun, EA_8BYTE, REG_V1, REG_V7, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_sqxtun, EA_8BYTE, REG_V2, REG_V8, INS_OPTS_2S);
// sqxtun2 vector
theEmitter->emitIns_R_R(INS_sqxtun2, EA_16BYTE, REG_V3, REG_V9, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_sqxtun2, EA_16BYTE, REG_V4, REG_V10, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_sqxtun2, EA_16BYTE, REG_V5, REG_V11, INS_OPTS_4S);
// suqadd scalar
theEmitter->emitIns_R_R(INS_suqadd, EA_1BYTE, REG_V0, REG_V1, INS_OPTS_NONE);
theEmitter->emitIns_R_R(INS_suqadd, EA_2BYTE, REG_V2, REG_V3, INS_OPTS_NONE);
theEmitter->emitIns_R_R(INS_suqadd, EA_4BYTE, REG_V4, REG_V5, INS_OPTS_NONE);
theEmitter->emitIns_R_R(INS_suqadd, EA_8BYTE, REG_V6, REG_V7, INS_OPTS_NONE);
// suqadd vector
theEmitter->emitIns_R_R(INS_suqadd, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_suqadd, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_suqadd, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_suqadd, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_suqadd, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_suqadd, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_suqadd, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
// uadalp vector
theEmitter->emitIns_R_R(INS_uadalp, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_uadalp, EA_8BYTE, REG_V2, REG_V3, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_uadalp, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_uadalp, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_uadalp, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_uadalp, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
// uaddlp vector
theEmitter->emitIns_R_R(INS_uaddlp, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_uaddlp, EA_8BYTE, REG_V2, REG_V3, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_uaddlp, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_uaddlp, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_uaddlp, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_uaddlp, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
// uaddlv vector
theEmitter->emitIns_R_R(INS_uaddlv, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_uaddlv, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_uaddlv, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_uaddlv, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_uaddlv, EA_16BYTE, REG_V14, REG_V15, INS_OPTS_4S);
// umaxv vector
theEmitter->emitIns_R_R(INS_umaxv, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_umaxv, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_umaxv, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_umaxv, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_umaxv, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_4S);
// uminv vector
theEmitter->emitIns_R_R(INS_uminv, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_uminv, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_uminv, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_uminv, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_uminv, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_4S);
// uqxtn scalar
theEmitter->emitIns_R_R(INS_uqxtn, EA_1BYTE, REG_V0, REG_V1, INS_OPTS_NONE);
theEmitter->emitIns_R_R(INS_uqxtn, EA_2BYTE, REG_V2, REG_V3, INS_OPTS_NONE);
theEmitter->emitIns_R_R(INS_uqxtn, EA_4BYTE, REG_V4, REG_V5, INS_OPTS_NONE);
// uqxtn vector
theEmitter->emitIns_R_R(INS_uqxtn, EA_8BYTE, REG_V0, REG_V6, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_uqxtn, EA_8BYTE, REG_V1, REG_V7, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_uqxtn, EA_8BYTE, REG_V2, REG_V8, INS_OPTS_2S);
// uqxtn2 vector
theEmitter->emitIns_R_R(INS_uqxtn2, EA_16BYTE, REG_V3, REG_V9, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_uqxtn2, EA_16BYTE, REG_V4, REG_V10, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_uqxtn2, EA_16BYTE, REG_V5, REG_V11, INS_OPTS_4S);
// urecpe vector
theEmitter->emitIns_R_R(INS_urecpe, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_urecpe, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_4S);
// ursqrte vector
theEmitter->emitIns_R_R(INS_ursqrte, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_ursqrte, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_4S);
// usqadd scalar
theEmitter->emitIns_R_R(INS_usqadd, EA_1BYTE, REG_V0, REG_V1, INS_OPTS_NONE);
theEmitter->emitIns_R_R(INS_usqadd, EA_2BYTE, REG_V2, REG_V3, INS_OPTS_NONE);
theEmitter->emitIns_R_R(INS_usqadd, EA_4BYTE, REG_V4, REG_V5, INS_OPTS_NONE);
theEmitter->emitIns_R_R(INS_usqadd, EA_8BYTE, REG_V6, REG_V7, INS_OPTS_NONE);
// usqadd vector
theEmitter->emitIns_R_R(INS_usqadd, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_usqadd, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_usqadd, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_usqadd, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_usqadd, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_usqadd, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_usqadd, EA_16BYTE, REG_V12, REG_V13, INS_OPTS_2D);
// xtn vector
theEmitter->emitIns_R_R(INS_xtn, EA_8BYTE, REG_V0, REG_V6, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_xtn, EA_8BYTE, REG_V1, REG_V7, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_xtn, EA_8BYTE, REG_V2, REG_V8, INS_OPTS_2S);
// xtn2 vector
theEmitter->emitIns_R_R(INS_xtn2, EA_16BYTE, REG_V3, REG_V9, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_xtn2, EA_16BYTE, REG_V4, REG_V10, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_xtn2, EA_16BYTE, REG_V5, REG_V11, INS_OPTS_4S);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_R floating point round to int, one dest, one source
//
// frinta scalar
theEmitter->emitIns_R_R(INS_frinta, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_frinta, EA_8BYTE, REG_V2, REG_V3);
// frinta vector
theEmitter->emitIns_R_R(INS_frinta, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_frinta, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_frinta, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_2D);
// frinti scalar
theEmitter->emitIns_R_R(INS_frinti, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_frinti, EA_8BYTE, REG_V2, REG_V3);
// frinti vector
theEmitter->emitIns_R_R(INS_frinti, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_frinti, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_frinti, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_2D);
// frintm scalar
theEmitter->emitIns_R_R(INS_frintm, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_frintm, EA_8BYTE, REG_V2, REG_V3);
// frintm vector
theEmitter->emitIns_R_R(INS_frintm, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_frintm, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_frintm, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_2D);
// frintn scalar
theEmitter->emitIns_R_R(INS_frintn, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_frintn, EA_8BYTE, REG_V2, REG_V3);
// frintn vector
theEmitter->emitIns_R_R(INS_frintn, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_frintn, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_frintn, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_2D);
// frintp scalar
theEmitter->emitIns_R_R(INS_frintp, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_frintp, EA_8BYTE, REG_V2, REG_V3);
// frintp vector
theEmitter->emitIns_R_R(INS_frintp, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_frintp, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_frintp, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_2D);
// frintx scalar
theEmitter->emitIns_R_R(INS_frintx, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_frintx, EA_8BYTE, REG_V2, REG_V3);
// frintx vector
theEmitter->emitIns_R_R(INS_frintx, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_frintx, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_frintx, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_2D);
// frintz scalar
theEmitter->emitIns_R_R(INS_frintz, EA_4BYTE, REG_V0, REG_V1);
theEmitter->emitIns_R_R(INS_frintz, EA_8BYTE, REG_V2, REG_V3);
// frintz vector
theEmitter->emitIns_R_R(INS_frintz, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_frintz, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_4S);
theEmitter->emitIns_R_R(INS_frintz, EA_16BYTE, REG_V8, REG_V9, INS_OPTS_2D);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_R_R floating point operations, one dest, two source
//
genDefineTempLabel(genCreateTempLabel());
// fadd
theEmitter->emitIns_R_R_R(INS_fadd, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
theEmitter->emitIns_R_R_R(INS_fadd, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
theEmitter->emitIns_R_R_R(INS_fadd, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_fadd, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fadd, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2D);
// fsub
theEmitter->emitIns_R_R_R(INS_fsub, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
theEmitter->emitIns_R_R_R(INS_fsub, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
theEmitter->emitIns_R_R_R(INS_fsub, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_fsub, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fsub, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2D);
// fdiv
theEmitter->emitIns_R_R_R(INS_fdiv, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
theEmitter->emitIns_R_R_R(INS_fdiv, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
theEmitter->emitIns_R_R_R(INS_fdiv, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_fdiv, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fdiv, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2D);
// fmax
theEmitter->emitIns_R_R_R(INS_fmax, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
theEmitter->emitIns_R_R_R(INS_fmax, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
theEmitter->emitIns_R_R_R(INS_fmax, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_fmax, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fmax, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2D);
// fmaxp
theEmitter->emitIns_R_R_R(INS_fmaxp, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_fmaxp, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fmaxp, EA_16BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2D);
// fmaxnm
theEmitter->emitIns_R_R_R(INS_fmaxnm, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
theEmitter->emitIns_R_R_R(INS_fmaxnm, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
theEmitter->emitIns_R_R_R(INS_fmaxnm, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_fmaxnm, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fmaxnm, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2D);
// fmaxnmp vector
theEmitter->emitIns_R_R_R(INS_fmaxnmp, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_fmaxnmp, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fmaxnmp, EA_16BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2D);
// fmin
theEmitter->emitIns_R_R_R(INS_fmin, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
theEmitter->emitIns_R_R_R(INS_fmin, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
theEmitter->emitIns_R_R_R(INS_fmin, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_fmin, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fmin, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2D);
// fminp
theEmitter->emitIns_R_R_R(INS_fminp, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_fminp, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fminp, EA_16BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2D);
// fminnm
theEmitter->emitIns_R_R_R(INS_fminnm, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
theEmitter->emitIns_R_R_R(INS_fminnm, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
theEmitter->emitIns_R_R_R(INS_fminnm, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_fminnm, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fminnm, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2D);
// fminnmp vector
theEmitter->emitIns_R_R_R(INS_fminnmp, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_fminnmp, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fminnmp, EA_16BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2D);
// fabd
theEmitter->emitIns_R_R_R(INS_fabd, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
theEmitter->emitIns_R_R_R(INS_fabd, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
theEmitter->emitIns_R_R_R(INS_fabd, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_fabd, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fabd, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2D);
// frecps
theEmitter->emitIns_R_R_R(INS_frecps, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
theEmitter->emitIns_R_R_R(INS_frecps, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
theEmitter->emitIns_R_R_R(INS_frecps, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_frecps, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_frecps, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2D);
// frsqrts
theEmitter->emitIns_R_R_R(INS_frsqrts, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
theEmitter->emitIns_R_R_R(INS_frsqrts, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
theEmitter->emitIns_R_R_R(INS_frsqrts, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_frsqrts, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_frsqrts, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2D);
genDefineTempLabel(genCreateTempLabel());
theEmitter->emitIns_R_R_R(INS_fmul, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
theEmitter->emitIns_R_R_R(INS_fmul, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
theEmitter->emitIns_R_R_R(INS_fmul, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_fmul, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fmul, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2D);
theEmitter->emitIns_R_R_R_I(INS_fmul, EA_4BYTE, REG_V15, REG_V16, REG_V17, 3); // scalar by element 4BYTE
theEmitter->emitIns_R_R_R_I(INS_fmul, EA_8BYTE, REG_V18, REG_V19, REG_V20, 1); // scalar by element 8BYTE
theEmitter->emitIns_R_R_R_I(INS_fmul, EA_8BYTE, REG_V21, REG_V22, REG_V23, 0, INS_OPTS_2S);
theEmitter->emitIns_R_R_R_I(INS_fmul, EA_16BYTE, REG_V24, REG_V25, REG_V26, 2, INS_OPTS_4S);
theEmitter->emitIns_R_R_R_I(INS_fmul, EA_16BYTE, REG_V27, REG_V28, REG_V29, 0, INS_OPTS_2D);
theEmitter->emitIns_R_R_R(INS_fmulx, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
theEmitter->emitIns_R_R_R(INS_fmulx, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
theEmitter->emitIns_R_R_R(INS_fmulx, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_fmulx, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fmulx, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2D);
theEmitter->emitIns_R_R_R_I(INS_fmulx, EA_4BYTE, REG_V15, REG_V16, REG_V17, 3); // scalar by element 4BYTE
theEmitter->emitIns_R_R_R_I(INS_fmulx, EA_8BYTE, REG_V18, REG_V19, REG_V20, 1); // scalar by element 8BYTE
theEmitter->emitIns_R_R_R_I(INS_fmulx, EA_8BYTE, REG_V21, REG_V22, REG_V23, 0, INS_OPTS_2S);
theEmitter->emitIns_R_R_R_I(INS_fmulx, EA_16BYTE, REG_V24, REG_V25, REG_V26, 2, INS_OPTS_4S);
theEmitter->emitIns_R_R_R_I(INS_fmulx, EA_16BYTE, REG_V27, REG_V28, REG_V29, 0, INS_OPTS_2D);
theEmitter->emitIns_R_R_R(INS_fnmul, EA_4BYTE, REG_V0, REG_V1, REG_V2); // scalar 4BYTE
theEmitter->emitIns_R_R_R(INS_fnmul, EA_8BYTE, REG_V3, REG_V4, REG_V5); // scalar 8BYTE
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_R_I vector operations, one dest, one source reg, one immed
//
// Some of the tests cases below might appear redundant since they emit same combinations of instruction x size x
// vector arrangements. However, these are added to verify that the split constant encoding works with both - small
// and large constants.
genDefineTempLabel(genCreateTempLabel());
// sshr scalar
theEmitter->emitIns_R_R_I(INS_sshr, EA_8BYTE, REG_V0, REG_V1, 1);
theEmitter->emitIns_R_R_I(INS_sshr, EA_8BYTE, REG_V2, REG_V3, 14);
theEmitter->emitIns_R_R_I(INS_sshr, EA_8BYTE, REG_V4, REG_V5, 27);
theEmitter->emitIns_R_R_I(INS_sshr, EA_8BYTE, REG_V6, REG_V7, 40);
theEmitter->emitIns_R_R_I(INS_sshr, EA_8BYTE, REG_V8, REG_V9, 64);
// sshr vector
theEmitter->emitIns_R_R_I(INS_sshr, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_sshr, EA_16BYTE, REG_V2, REG_V3, 8, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_sshr, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_sshr, EA_16BYTE, REG_V6, REG_V7, 16, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_sshr, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_sshr, EA_16BYTE, REG_V10, REG_V11, 32, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_sshr, EA_16BYTE, REG_V12, REG_V13, 33, INS_OPTS_2D);
theEmitter->emitIns_R_R_I(INS_sshr, EA_16BYTE, REG_V14, REG_V15, 64, INS_OPTS_2D);
// ssra scalar
theEmitter->emitIns_R_R_I(INS_ssra, EA_8BYTE, REG_V0, REG_V1, 1);
theEmitter->emitIns_R_R_I(INS_ssra, EA_8BYTE, REG_V2, REG_V3, 14);
theEmitter->emitIns_R_R_I(INS_ssra, EA_8BYTE, REG_V4, REG_V5, 27);
theEmitter->emitIns_R_R_I(INS_ssra, EA_8BYTE, REG_V6, REG_V7, 40);
theEmitter->emitIns_R_R_I(INS_ssra, EA_8BYTE, REG_V8, REG_V9, 64);
// ssra vector
theEmitter->emitIns_R_R_I(INS_ssra, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_ssra, EA_16BYTE, REG_V2, REG_V3, 8, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_ssra, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_ssra, EA_16BYTE, REG_V6, REG_V7, 16, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_ssra, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_ssra, EA_16BYTE, REG_V10, REG_V11, 32, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_ssra, EA_16BYTE, REG_V12, REG_V13, 33, INS_OPTS_2D);
theEmitter->emitIns_R_R_I(INS_ssra, EA_16BYTE, REG_V14, REG_V15, 64, INS_OPTS_2D);
// srshr scalar
theEmitter->emitIns_R_R_I(INS_srshr, EA_8BYTE, REG_V0, REG_V1, 1);
theEmitter->emitIns_R_R_I(INS_srshr, EA_8BYTE, REG_V2, REG_V3, 14);
theEmitter->emitIns_R_R_I(INS_srshr, EA_8BYTE, REG_V4, REG_V5, 27);
theEmitter->emitIns_R_R_I(INS_srshr, EA_8BYTE, REG_V6, REG_V7, 40);
theEmitter->emitIns_R_R_I(INS_srshr, EA_8BYTE, REG_V8, REG_V9, 64);
// srshr vector
theEmitter->emitIns_R_R_I(INS_srshr, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_srshr, EA_16BYTE, REG_V2, REG_V3, 8, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_srshr, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_srshr, EA_16BYTE, REG_V6, REG_V7, 16, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_srshr, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_srshr, EA_16BYTE, REG_V10, REG_V11, 32, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_srshr, EA_16BYTE, REG_V12, REG_V13, 33, INS_OPTS_2D);
theEmitter->emitIns_R_R_I(INS_srshr, EA_16BYTE, REG_V14, REG_V15, 64, INS_OPTS_2D);
// srsra scalar
theEmitter->emitIns_R_R_I(INS_srsra, EA_8BYTE, REG_V0, REG_V1, 1);
theEmitter->emitIns_R_R_I(INS_srsra, EA_8BYTE, REG_V2, REG_V3, 14);
theEmitter->emitIns_R_R_I(INS_srsra, EA_8BYTE, REG_V4, REG_V5, 27);
theEmitter->emitIns_R_R_I(INS_srsra, EA_8BYTE, REG_V6, REG_V7, 40);
theEmitter->emitIns_R_R_I(INS_srsra, EA_8BYTE, REG_V8, REG_V9, 64);
// srsra vector
theEmitter->emitIns_R_R_I(INS_srsra, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_srsra, EA_16BYTE, REG_V2, REG_V3, 8, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_srsra, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_srsra, EA_16BYTE, REG_V6, REG_V7, 16, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_srsra, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_srsra, EA_16BYTE, REG_V10, REG_V11, 32, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_srsra, EA_16BYTE, REG_V12, REG_V13, 33, INS_OPTS_2D);
theEmitter->emitIns_R_R_I(INS_srsra, EA_16BYTE, REG_V14, REG_V15, 64, INS_OPTS_2D);
// shl scalar
theEmitter->emitIns_R_R_I(INS_shl, EA_8BYTE, REG_V0, REG_V1, 0);
theEmitter->emitIns_R_R_I(INS_shl, EA_8BYTE, REG_V2, REG_V3, 14);
theEmitter->emitIns_R_R_I(INS_shl, EA_8BYTE, REG_V4, REG_V5, 27);
theEmitter->emitIns_R_R_I(INS_shl, EA_8BYTE, REG_V6, REG_V7, 40);
theEmitter->emitIns_R_R_I(INS_shl, EA_8BYTE, REG_V8, REG_V9, 63);
// shl vector
theEmitter->emitIns_R_R_I(INS_shl, EA_8BYTE, REG_V0, REG_V1, 0, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_shl, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_shl, EA_8BYTE, REG_V4, REG_V5, 8, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_shl, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_shl, EA_8BYTE, REG_V8, REG_V9, 16, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_shl, EA_16BYTE, REG_V10, REG_V11, 31, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_shl, EA_16BYTE, REG_V12, REG_V13, 32, INS_OPTS_2D);
theEmitter->emitIns_R_R_I(INS_shl, EA_16BYTE, REG_V14, REG_V15, 63, INS_OPTS_2D);
// ushr scalar
theEmitter->emitIns_R_R_I(INS_ushr, EA_8BYTE, REG_V0, REG_V1, 1);
theEmitter->emitIns_R_R_I(INS_ushr, EA_8BYTE, REG_V2, REG_V3, 14);
theEmitter->emitIns_R_R_I(INS_ushr, EA_8BYTE, REG_V4, REG_V5, 27);
theEmitter->emitIns_R_R_I(INS_ushr, EA_8BYTE, REG_V6, REG_V7, 40);
theEmitter->emitIns_R_R_I(INS_ushr, EA_8BYTE, REG_V8, REG_V9, 64);
// ushr vector
theEmitter->emitIns_R_R_I(INS_ushr, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_ushr, EA_16BYTE, REG_V2, REG_V3, 8, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_ushr, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_ushr, EA_16BYTE, REG_V6, REG_V7, 16, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_ushr, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_ushr, EA_16BYTE, REG_V10, REG_V11, 32, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_ushr, EA_16BYTE, REG_V12, REG_V13, 33, INS_OPTS_2D);
theEmitter->emitIns_R_R_I(INS_ushr, EA_16BYTE, REG_V14, REG_V15, 64, INS_OPTS_2D);
// usra scalar
theEmitter->emitIns_R_R_I(INS_usra, EA_8BYTE, REG_V0, REG_V1, 1);
theEmitter->emitIns_R_R_I(INS_usra, EA_8BYTE, REG_V2, REG_V3, 14);
theEmitter->emitIns_R_R_I(INS_usra, EA_8BYTE, REG_V4, REG_V5, 27);
theEmitter->emitIns_R_R_I(INS_usra, EA_8BYTE, REG_V6, REG_V7, 40);
theEmitter->emitIns_R_R_I(INS_usra, EA_8BYTE, REG_V8, REG_V9, 64);
// usra vector
theEmitter->emitIns_R_R_I(INS_usra, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_usra, EA_16BYTE, REG_V2, REG_V3, 8, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_usra, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_usra, EA_16BYTE, REG_V6, REG_V7, 16, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_usra, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_usra, EA_16BYTE, REG_V10, REG_V11, 32, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_usra, EA_16BYTE, REG_V12, REG_V13, 33, INS_OPTS_2D);
theEmitter->emitIns_R_R_I(INS_usra, EA_16BYTE, REG_V14, REG_V15, 64, INS_OPTS_2D);
// urshr scalar
theEmitter->emitIns_R_R_I(INS_urshr, EA_8BYTE, REG_V0, REG_V1, 1);
theEmitter->emitIns_R_R_I(INS_urshr, EA_8BYTE, REG_V2, REG_V3, 14);
theEmitter->emitIns_R_R_I(INS_urshr, EA_8BYTE, REG_V4, REG_V5, 27);
theEmitter->emitIns_R_R_I(INS_urshr, EA_8BYTE, REG_V6, REG_V7, 40);
theEmitter->emitIns_R_R_I(INS_urshr, EA_8BYTE, REG_V8, REG_V9, 64);
// urshr vector
theEmitter->emitIns_R_R_I(INS_urshr, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_urshr, EA_16BYTE, REG_V2, REG_V3, 8, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_urshr, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_urshr, EA_16BYTE, REG_V6, REG_V7, 16, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_urshr, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_urshr, EA_16BYTE, REG_V10, REG_V11, 32, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_urshr, EA_16BYTE, REG_V12, REG_V13, 33, INS_OPTS_2D);
theEmitter->emitIns_R_R_I(INS_urshr, EA_16BYTE, REG_V14, REG_V15, 64, INS_OPTS_2D);
// ursra scalar
theEmitter->emitIns_R_R_I(INS_ursra, EA_8BYTE, REG_V0, REG_V1, 1);
theEmitter->emitIns_R_R_I(INS_ursra, EA_8BYTE, REG_V2, REG_V3, 14);
theEmitter->emitIns_R_R_I(INS_ursra, EA_8BYTE, REG_V4, REG_V5, 27);
theEmitter->emitIns_R_R_I(INS_ursra, EA_8BYTE, REG_V6, REG_V7, 40);
theEmitter->emitIns_R_R_I(INS_ursra, EA_8BYTE, REG_V8, REG_V9, 64);
// ursra vector
theEmitter->emitIns_R_R_I(INS_ursra, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_ursra, EA_16BYTE, REG_V2, REG_V3, 8, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_ursra, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_ursra, EA_16BYTE, REG_V6, REG_V7, 16, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_ursra, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_ursra, EA_16BYTE, REG_V10, REG_V11, 32, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_ursra, EA_16BYTE, REG_V12, REG_V13, 33, INS_OPTS_2D);
theEmitter->emitIns_R_R_I(INS_ursra, EA_16BYTE, REG_V14, REG_V15, 64, INS_OPTS_2D);
// sri scalar
theEmitter->emitIns_R_R_I(INS_sri, EA_8BYTE, REG_V0, REG_V1, 1);
theEmitter->emitIns_R_R_I(INS_sri, EA_8BYTE, REG_V2, REG_V3, 14);
theEmitter->emitIns_R_R_I(INS_sri, EA_8BYTE, REG_V4, REG_V5, 27);
theEmitter->emitIns_R_R_I(INS_sri, EA_8BYTE, REG_V6, REG_V7, 40);
theEmitter->emitIns_R_R_I(INS_sri, EA_8BYTE, REG_V8, REG_V9, 64);
// sri vector
theEmitter->emitIns_R_R_I(INS_sri, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_sri, EA_16BYTE, REG_V2, REG_V3, 8, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_sri, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_sri, EA_16BYTE, REG_V6, REG_V7, 16, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_sri, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_sri, EA_16BYTE, REG_V10, REG_V11, 32, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_sri, EA_16BYTE, REG_V12, REG_V13, 33, INS_OPTS_2D);
theEmitter->emitIns_R_R_I(INS_sri, EA_16BYTE, REG_V14, REG_V15, 64, INS_OPTS_2D);
// sli scalar
theEmitter->emitIns_R_R_I(INS_sli, EA_8BYTE, REG_V0, REG_V1, 0);
theEmitter->emitIns_R_R_I(INS_sli, EA_8BYTE, REG_V2, REG_V3, 14);
theEmitter->emitIns_R_R_I(INS_sli, EA_8BYTE, REG_V4, REG_V5, 27);
theEmitter->emitIns_R_R_I(INS_sli, EA_8BYTE, REG_V6, REG_V7, 40);
theEmitter->emitIns_R_R_I(INS_sli, EA_8BYTE, REG_V8, REG_V9, 63);
// sli vector
theEmitter->emitIns_R_R_I(INS_sli, EA_8BYTE, REG_V0, REG_V1, 0, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_sli, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_sli, EA_8BYTE, REG_V4, REG_V5, 8, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_sli, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_sli, EA_8BYTE, REG_V8, REG_V9, 16, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_sli, EA_16BYTE, REG_V10, REG_V11, 31, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_sli, EA_16BYTE, REG_V12, REG_V13, 32, INS_OPTS_2D);
theEmitter->emitIns_R_R_I(INS_sli, EA_16BYTE, REG_V14, REG_V15, 63, INS_OPTS_2D);
// sshll{2} vector
theEmitter->emitIns_R_R_I(INS_sshll, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_sshll2, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_sshll, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_sshll2, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_sshll, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_sshll2, EA_16BYTE, REG_V10, REG_V11, 31, INS_OPTS_4S);
// ushll{2} vector
theEmitter->emitIns_R_R_I(INS_ushll, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_ushll2, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_ushll, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_ushll2, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_ushll, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_ushll2, EA_16BYTE, REG_V10, REG_V11, 31, INS_OPTS_4S);
// shrn{2} vector
theEmitter->emitIns_R_R_I(INS_shrn, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_shrn2, EA_16BYTE, REG_V2, REG_V3, 8, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_shrn, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_shrn2, EA_16BYTE, REG_V6, REG_V7, 16, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_shrn, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_shrn2, EA_16BYTE, REG_V10, REG_V11, 32, INS_OPTS_4S);
// rshrn{2} vector
theEmitter->emitIns_R_R_I(INS_rshrn, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_rshrn2, EA_16BYTE, REG_V2, REG_V3, 8, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_rshrn, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_rshrn2, EA_16BYTE, REG_V6, REG_V7, 16, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_rshrn, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_rshrn2, EA_16BYTE, REG_V10, REG_V11, 32, INS_OPTS_4S);
// sxtl{2} vector
theEmitter->emitIns_R_R(INS_sxtl, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_sxtl2, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_sxtl, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_sxtl2, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_sxtl, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_sxtl2, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
// uxtl{2} vector
theEmitter->emitIns_R_R(INS_uxtl, EA_8BYTE, REG_V0, REG_V1, INS_OPTS_8B);
theEmitter->emitIns_R_R(INS_uxtl2, EA_16BYTE, REG_V2, REG_V3, INS_OPTS_16B);
theEmitter->emitIns_R_R(INS_uxtl, EA_8BYTE, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R(INS_uxtl2, EA_16BYTE, REG_V6, REG_V7, INS_OPTS_8H);
theEmitter->emitIns_R_R(INS_uxtl, EA_8BYTE, REG_V8, REG_V9, INS_OPTS_2S);
theEmitter->emitIns_R_R(INS_uxtl2, EA_16BYTE, REG_V10, REG_V11, INS_OPTS_4S);
// sqrshrn scalar
theEmitter->emitIns_R_R_I(INS_sqrshrn, EA_1BYTE, REG_V0, REG_V1, 1, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqrshrn, EA_1BYTE, REG_V2, REG_V3, 8, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqrshrn, EA_2BYTE, REG_V4, REG_V5, 9, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqrshrn, EA_2BYTE, REG_V6, REG_V7, 16, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqrshrn, EA_4BYTE, REG_V8, REG_V9, 17, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqrshrn, EA_4BYTE, REG_V10, REG_V11, 32, INS_OPTS_NONE);
// sqrshrn{2} vector
theEmitter->emitIns_R_R_I(INS_sqrshrn, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_sqrshrn, EA_8BYTE, REG_V2, REG_V3, 8, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_sqrshrn2, EA_16BYTE, REG_V4, REG_V5, 1, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_sqrshrn2, EA_16BYTE, REG_V6, REG_V7, 8, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_sqrshrn, EA_8BYTE, REG_V8, REG_V9, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_sqrshrn, EA_8BYTE, REG_V10, REG_V11, 16, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_sqrshrn2, EA_16BYTE, REG_V12, REG_V13, 9, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_sqrshrn2, EA_16BYTE, REG_V14, REG_V15, 16, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_sqrshrn, EA_8BYTE, REG_V16, REG_V17, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_sqrshrn, EA_8BYTE, REG_V18, REG_V18, 32, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_sqrshrn2, EA_16BYTE, REG_V20, REG_V21, 17, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_sqrshrn2, EA_16BYTE, REG_V22, REG_V23, 32, INS_OPTS_4S);
// sqrshrun scalar
theEmitter->emitIns_R_R_I(INS_sqrshrun, EA_1BYTE, REG_V0, REG_V1, 1, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqrshrun, EA_1BYTE, REG_V0, REG_V1, 8, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqrshrun, EA_2BYTE, REG_V2, REG_V3, 9, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqrshrun, EA_2BYTE, REG_V2, REG_V3, 16, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqrshrun, EA_4BYTE, REG_V4, REG_V5, 17, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqrshrun, EA_4BYTE, REG_V4, REG_V5, 32, INS_OPTS_NONE);
// sqrshrun{2} vector
theEmitter->emitIns_R_R_I(INS_sqrshrun, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_sqrshrun, EA_8BYTE, REG_V2, REG_V3, 8, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_sqrshrun2, EA_16BYTE, REG_V4, REG_V5, 1, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_sqrshrun2, EA_16BYTE, REG_V6, REG_V7, 8, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_sqrshrun, EA_8BYTE, REG_V8, REG_V9, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_sqrshrun, EA_8BYTE, REG_V10, REG_V11, 16, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_sqrshrun2, EA_16BYTE, REG_V12, REG_V13, 9, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_sqrshrun2, EA_16BYTE, REG_V14, REG_V15, 16, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_sqrshrun, EA_8BYTE, REG_V16, REG_V17, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_sqrshrun, EA_8BYTE, REG_V18, REG_V18, 32, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_sqrshrun2, EA_16BYTE, REG_V20, REG_V21, 17, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_sqrshrun2, EA_16BYTE, REG_V22, REG_V23, 32, INS_OPTS_4S);
// sqshl scalar
theEmitter->emitIns_R_R_I(INS_sqshl, EA_1BYTE, REG_V0, REG_V1, 0, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshl, EA_1BYTE, REG_V2, REG_V3, 7, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshl, EA_2BYTE, REG_V4, REG_V5, 8, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshl, EA_2BYTE, REG_V6, REG_V7, 15, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshl, EA_4BYTE, REG_V8, REG_V9, 16, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshl, EA_4BYTE, REG_V10, REG_V11, 31, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshl, EA_8BYTE, REG_V12, REG_V13, 32, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshl, EA_8BYTE, REG_V14, REG_V15, 63, INS_OPTS_NONE);
// sqshl vector
theEmitter->emitIns_R_R_I(INS_sqshl, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_sqshl, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_sqshl, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_sqshl, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_sqshl, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_sqshl, EA_16BYTE, REG_V10, REG_V11, 31, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_sqshl, EA_16BYTE, REG_V12, REG_V13, 63, INS_OPTS_2D);
// sqshlu scalar
theEmitter->emitIns_R_R_I(INS_sqshlu, EA_1BYTE, REG_V0, REG_V1, 0, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshlu, EA_1BYTE, REG_V2, REG_V3, 7, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshlu, EA_2BYTE, REG_V4, REG_V5, 8, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshlu, EA_2BYTE, REG_V6, REG_V7, 15, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshlu, EA_4BYTE, REG_V8, REG_V9, 16, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshlu, EA_4BYTE, REG_V10, REG_V11, 31, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshlu, EA_8BYTE, REG_V12, REG_V13, 32, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshlu, EA_8BYTE, REG_V14, REG_V15, 63, INS_OPTS_NONE);
// sqshlu vector
theEmitter->emitIns_R_R_I(INS_sqshlu, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_sqshlu, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_sqshlu, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_sqshlu, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_sqshlu, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_sqshlu, EA_16BYTE, REG_V10, REG_V11, 31, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_sqshlu, EA_16BYTE, REG_V12, REG_V13, 63, INS_OPTS_2D);
// sqshrn scalar
theEmitter->emitIns_R_R_I(INS_sqshrn, EA_1BYTE, REG_V0, REG_V1, 1, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshrn, EA_1BYTE, REG_V2, REG_V3, 8, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshrn, EA_2BYTE, REG_V4, REG_V5, 9, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshrn, EA_2BYTE, REG_V6, REG_V7, 16, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshrn, EA_4BYTE, REG_V8, REG_V9, 17, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshrn, EA_4BYTE, REG_V10, REG_V11, 32, INS_OPTS_NONE);
// sqshrn{2} vector
theEmitter->emitIns_R_R_I(INS_sqshrn, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_sqshrn, EA_8BYTE, REG_V2, REG_V3, 8, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_sqshrn2, EA_16BYTE, REG_V4, REG_V5, 1, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_sqshrn2, EA_16BYTE, REG_V6, REG_V7, 8, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_sqshrn, EA_8BYTE, REG_V8, REG_V9, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_sqshrn, EA_8BYTE, REG_V10, REG_V11, 16, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_sqshrn2, EA_16BYTE, REG_V12, REG_V13, 9, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_sqshrn2, EA_16BYTE, REG_V14, REG_V15, 16, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_sqshrn, EA_8BYTE, REG_V16, REG_V17, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_sqshrn, EA_8BYTE, REG_V18, REG_V18, 32, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_sqshrn2, EA_16BYTE, REG_V20, REG_V21, 17, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_sqshrn2, EA_16BYTE, REG_V22, REG_V23, 32, INS_OPTS_4S);
// sqshrun scalar
theEmitter->emitIns_R_R_I(INS_sqshrun, EA_1BYTE, REG_V0, REG_V1, 1, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshrun, EA_1BYTE, REG_V2, REG_V3, 8, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshrun, EA_2BYTE, REG_V4, REG_V5, 9, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshrun, EA_2BYTE, REG_V6, REG_V7, 16, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshrun, EA_4BYTE, REG_V8, REG_V9, 17, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_sqshrun, EA_4BYTE, REG_V10, REG_V11, 32, INS_OPTS_NONE);
// sqshrun{2} vector
theEmitter->emitIns_R_R_I(INS_sqshrun, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_sqshrun, EA_8BYTE, REG_V2, REG_V3, 8, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_sqshrun2, EA_16BYTE, REG_V4, REG_V5, 1, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_sqshrun2, EA_16BYTE, REG_V6, REG_V7, 8, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_sqshrun, EA_8BYTE, REG_V8, REG_V9, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_sqshrun, EA_8BYTE, REG_V10, REG_V11, 16, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_sqshrun2, EA_16BYTE, REG_V12, REG_V13, 9, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_sqshrun2, EA_16BYTE, REG_V14, REG_V15, 16, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_sqshrun, EA_8BYTE, REG_V16, REG_V17, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_sqshrun, EA_8BYTE, REG_V18, REG_V18, 32, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_sqshrun2, EA_16BYTE, REG_V20, REG_V21, 17, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_sqshrun2, EA_16BYTE, REG_V22, REG_V23, 32, INS_OPTS_4S);
// uqrshrn scalar
theEmitter->emitIns_R_R_I(INS_uqrshrn, EA_1BYTE, REG_V0, REG_V1, 1, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_uqrshrn, EA_1BYTE, REG_V2, REG_V3, 8, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_uqrshrn, EA_2BYTE, REG_V4, REG_V5, 9, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_uqrshrn, EA_2BYTE, REG_V6, REG_V7, 16, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_uqrshrn, EA_4BYTE, REG_V8, REG_V9, 17, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_uqrshrn, EA_4BYTE, REG_V10, REG_V11, 32, INS_OPTS_NONE);
// uqrshrn{2} vector
theEmitter->emitIns_R_R_I(INS_uqrshrn, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_uqrshrn, EA_8BYTE, REG_V2, REG_V3, 8, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_uqrshrn2, EA_16BYTE, REG_V4, REG_V5, 1, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_uqrshrn2, EA_16BYTE, REG_V6, REG_V7, 8, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_uqrshrn, EA_8BYTE, REG_V8, REG_V9, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_uqrshrn, EA_8BYTE, REG_V10, REG_V11, 16, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_uqrshrn2, EA_16BYTE, REG_V12, REG_V13, 9, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_uqrshrn2, EA_16BYTE, REG_V14, REG_V15, 16, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_uqrshrn, EA_8BYTE, REG_V16, REG_V17, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_uqrshrn, EA_8BYTE, REG_V18, REG_V18, 32, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_uqrshrn2, EA_16BYTE, REG_V20, REG_V21, 17, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_uqrshrn2, EA_16BYTE, REG_V22, REG_V23, 32, INS_OPTS_4S);
// uqshl scalar
theEmitter->emitIns_R_R_I(INS_uqshl, EA_1BYTE, REG_V0, REG_V1, 0, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_uqshl, EA_1BYTE, REG_V2, REG_V3, 7, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_uqshl, EA_2BYTE, REG_V4, REG_V5, 8, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_uqshl, EA_2BYTE, REG_V6, REG_V7, 15, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_uqshl, EA_4BYTE, REG_V8, REG_V9, 16, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_uqshl, EA_4BYTE, REG_V10, REG_V11, 31, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_uqshl, EA_8BYTE, REG_V12, REG_V13, 32, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_uqshl, EA_8BYTE, REG_V14, REG_V15, 63, INS_OPTS_NONE);
// uqshl vector
theEmitter->emitIns_R_R_I(INS_uqshl, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_uqshl, EA_16BYTE, REG_V2, REG_V3, 7, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_uqshl, EA_8BYTE, REG_V4, REG_V5, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_uqshl, EA_16BYTE, REG_V6, REG_V7, 15, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_uqshl, EA_8BYTE, REG_V8, REG_V9, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_uqshl, EA_16BYTE, REG_V10, REG_V11, 31, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_uqshl, EA_16BYTE, REG_V12, REG_V13, 63, INS_OPTS_2D);
// uqshrn scalar
theEmitter->emitIns_R_R_I(INS_uqshrn, EA_1BYTE, REG_V0, REG_V1, 1, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_uqshrn, EA_1BYTE, REG_V2, REG_V3, 8, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_uqshrn, EA_2BYTE, REG_V4, REG_V5, 9, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_uqshrn, EA_2BYTE, REG_V6, REG_V7, 16, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_uqshrn, EA_4BYTE, REG_V8, REG_V9, 17, INS_OPTS_NONE);
theEmitter->emitIns_R_R_I(INS_uqshrn, EA_4BYTE, REG_V10, REG_V11, 32, INS_OPTS_NONE);
// uqshrn{2} vector
theEmitter->emitIns_R_R_I(INS_uqshrn, EA_8BYTE, REG_V0, REG_V1, 1, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_uqshrn, EA_8BYTE, REG_V2, REG_V3, 8, INS_OPTS_8B);
theEmitter->emitIns_R_R_I(INS_uqshrn2, EA_16BYTE, REG_V4, REG_V5, 1, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_uqshrn2, EA_16BYTE, REG_V6, REG_V7, 8, INS_OPTS_16B);
theEmitter->emitIns_R_R_I(INS_uqshrn, EA_8BYTE, REG_V8, REG_V9, 9, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_uqshrn, EA_8BYTE, REG_V10, REG_V11, 16, INS_OPTS_4H);
theEmitter->emitIns_R_R_I(INS_uqshrn2, EA_16BYTE, REG_V12, REG_V13, 9, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_uqshrn2, EA_16BYTE, REG_V14, REG_V15, 16, INS_OPTS_8H);
theEmitter->emitIns_R_R_I(INS_uqshrn, EA_8BYTE, REG_V16, REG_V17, 17, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_uqshrn, EA_8BYTE, REG_V18, REG_V18, 32, INS_OPTS_2S);
theEmitter->emitIns_R_R_I(INS_uqshrn2, EA_16BYTE, REG_V20, REG_V21, 17, INS_OPTS_4S);
theEmitter->emitIns_R_R_I(INS_uqshrn2, EA_16BYTE, REG_V22, REG_V23, 32, INS_OPTS_4S);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_R_R vector operations, one dest, two source
//
genDefineTempLabel(genCreateTempLabel());
// Specifying an Arrangement is optional
//
theEmitter->emitIns_R_R_R(INS_and, EA_8BYTE, REG_V6, REG_V7, REG_V8);
theEmitter->emitIns_R_R_R(INS_bic, EA_8BYTE, REG_V9, REG_V10, REG_V11);
theEmitter->emitIns_R_R_R(INS_eor, EA_8BYTE, REG_V12, REG_V13, REG_V14);
theEmitter->emitIns_R_R_R(INS_orr, EA_8BYTE, REG_V15, REG_V16, REG_V17);
theEmitter->emitIns_R_R_R(INS_orn, EA_8BYTE, REG_V18, REG_V19, REG_V20);
theEmitter->emitIns_R_R_R(INS_and, EA_16BYTE, REG_V21, REG_V22, REG_V23);
theEmitter->emitIns_R_R_R(INS_bic, EA_16BYTE, REG_V24, REG_V25, REG_V26);
theEmitter->emitIns_R_R_R(INS_eor, EA_16BYTE, REG_V27, REG_V28, REG_V29);
theEmitter->emitIns_R_R_R(INS_orr, EA_16BYTE, REG_V30, REG_V31, REG_V0);
theEmitter->emitIns_R_R_R(INS_orn, EA_16BYTE, REG_V1, REG_V2, REG_V3);
theEmitter->emitIns_R_R_R(INS_bsl, EA_8BYTE, REG_V4, REG_V5, REG_V6);
theEmitter->emitIns_R_R_R(INS_bit, EA_8BYTE, REG_V7, REG_V8, REG_V9);
theEmitter->emitIns_R_R_R(INS_bif, EA_8BYTE, REG_V10, REG_V11, REG_V12);
theEmitter->emitIns_R_R_R(INS_bsl, EA_16BYTE, REG_V13, REG_V14, REG_V15);
theEmitter->emitIns_R_R_R(INS_bit, EA_16BYTE, REG_V16, REG_V17, REG_V18);
theEmitter->emitIns_R_R_R(INS_bif, EA_16BYTE, REG_V19, REG_V20, REG_V21);
// Default Arrangement as per the ARM64 manual
//
theEmitter->emitIns_R_R_R(INS_and, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_bic, EA_8BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_eor, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_orr, EA_8BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_orn, EA_8BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_and, EA_16BYTE, REG_V21, REG_V22, REG_V23, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_bic, EA_16BYTE, REG_V24, REG_V25, REG_V26, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_eor, EA_16BYTE, REG_V27, REG_V28, REG_V29, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_orr, EA_16BYTE, REG_V30, REG_V31, REG_V0, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_orn, EA_16BYTE, REG_V1, REG_V2, REG_V3, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_bsl, EA_8BYTE, REG_V4, REG_V5, REG_V6, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_bit, EA_8BYTE, REG_V7, REG_V8, REG_V9, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_bif, EA_8BYTE, REG_V10, REG_V11, REG_V12, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_bsl, EA_16BYTE, REG_V13, REG_V14, REG_V15, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_bit, EA_16BYTE, REG_V16, REG_V17, REG_V18, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_bif, EA_16BYTE, REG_V19, REG_V20, REG_V21, INS_OPTS_16B);
genDefineTempLabel(genCreateTempLabel());
// add
theEmitter->emitIns_R_R_R(INS_add, EA_8BYTE, REG_V0, REG_V1, REG_V2); // scalar 8BYTE
theEmitter->emitIns_R_R_R(INS_add, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_add, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_add, EA_8BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_add, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_add, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_add, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_add, EA_16BYTE, REG_V21, REG_V22, REG_V23, INS_OPTS_2D);
// addp
theEmitter->emitIns_R_R(INS_addp, EA_16BYTE, REG_V0, REG_V1, INS_OPTS_2D); // scalar 16BYTE
theEmitter->emitIns_R_R_R(INS_addp, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_addp, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_addp, EA_8BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_addp, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_addp, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_addp, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_addp, EA_16BYTE, REG_V21, REG_V22, REG_V23, INS_OPTS_2D);
// sub
theEmitter->emitIns_R_R_R(INS_sub, EA_8BYTE, REG_V1, REG_V2, REG_V3); // scalar 8BYTE
theEmitter->emitIns_R_R_R(INS_sub, EA_8BYTE, REG_V4, REG_V5, REG_V6, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_sub, EA_8BYTE, REG_V7, REG_V8, REG_V9, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_sub, EA_8BYTE, REG_V10, REG_V11, REG_V12, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_sub, EA_16BYTE, REG_V13, REG_V14, REG_V15, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_sub, EA_16BYTE, REG_V16, REG_V17, REG_V18, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_sub, EA_16BYTE, REG_V19, REG_V20, REG_V21, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_sub, EA_16BYTE, REG_V22, REG_V23, REG_V24, INS_OPTS_2D);
genDefineTempLabel(genCreateTempLabel());
// saba vector
theEmitter->emitIns_R_R_R(INS_saba, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_saba, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_saba, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_saba, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_saba, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_saba, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// sabd vector
theEmitter->emitIns_R_R_R(INS_sabd, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_sabd, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_sabd, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_sabd, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_sabd, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_sabd, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// uaba vector
theEmitter->emitIns_R_R_R(INS_uaba, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_uaba, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_uaba, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_uaba, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_uaba, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_uaba, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// uabd vector
theEmitter->emitIns_R_R_R(INS_uabd, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_uabd, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_uabd, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_uabd, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_uabd, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_uabd, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
// sdot vector
theEmitter->emitIns_R_R_R(INS_sdot, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_sdot, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4S);
// smax vector
theEmitter->emitIns_R_R_R(INS_smax, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_smax, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_smax, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_smax, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_smax, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_smax, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// smaxp vector
theEmitter->emitIns_R_R_R(INS_smaxp, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_smaxp, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_smaxp, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_smaxp, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_smaxp, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_smaxp, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// smin vector
theEmitter->emitIns_R_R_R(INS_smin, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_smin, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_smin, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_smin, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_smin, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_smin, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// sminp vector
theEmitter->emitIns_R_R_R(INS_sminp, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_sminp, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_sminp, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_sminp, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_sminp, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_sminp, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// udot vector
theEmitter->emitIns_R_R_R(INS_udot, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_udot, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4S);
// umax vector
theEmitter->emitIns_R_R_R(INS_umax, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_umax, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_umax, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_umax, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_umax, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_umax, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// umaxp vector
theEmitter->emitIns_R_R_R(INS_umaxp, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_umaxp, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_umaxp, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_umaxp, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_umaxp, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_umaxp, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// umin vector
theEmitter->emitIns_R_R_R(INS_umin, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_umin, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_umin, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_umin, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_umin, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_umin, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// uminp vector
theEmitter->emitIns_R_R_R(INS_uminp, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_uminp, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_uminp, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_uminp, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_uminp, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_uminp, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// cmeq vector
theEmitter->emitIns_R_R_R(INS_cmeq, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_cmeq, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_cmeq, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_cmeq, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_cmeq, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_cmeq, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_cmeq, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_2D);
// cmge vector
theEmitter->emitIns_R_R_R(INS_cmge, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_cmge, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_cmge, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_cmge, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_cmge, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_cmge, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_cmge, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_2D);
// cmgt vector
theEmitter->emitIns_R_R_R(INS_cmgt, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_cmgt, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_cmgt, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_cmgt, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_cmgt, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_cmgt, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_cmgt, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_2D);
// cmhi vector
theEmitter->emitIns_R_R_R(INS_cmhi, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_cmhi, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_cmhi, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_cmhi, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_cmhi, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_cmhi, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_cmhi, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_2D);
// cmhs vector
theEmitter->emitIns_R_R_R(INS_cmhs, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_cmhs, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_cmhs, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_cmhs, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_cmhs, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_cmhs, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_cmhs, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_2D);
// cmtst vector
theEmitter->emitIns_R_R_R(INS_cmtst, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_cmtst, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_cmtst, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_cmtst, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_cmtst, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_cmtst, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_cmtst, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_2D);
// faddp vector
theEmitter->emitIns_R_R_R(INS_faddp, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_faddp, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_faddp, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_2D);
// fcmeq vector
theEmitter->emitIns_R_R_R(INS_fcmeq, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_fcmeq, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fcmeq, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_2D);
// fcmge vector
theEmitter->emitIns_R_R_R(INS_fcmge, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_fcmge, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fcmge, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_2D);
// fcmgt vector
theEmitter->emitIns_R_R_R(INS_fcmgt, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_fcmgt, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fcmgt, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_2D);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
// trn1 vector
theEmitter->emitIns_R_R_R(INS_trn1, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_trn1, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_trn1, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_trn1, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_trn1, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_trn1, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_trn1, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_2D);
// trn2 vector
theEmitter->emitIns_R_R_R(INS_trn2, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_trn2, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_trn2, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_trn2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_trn2, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_trn2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_trn2, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_2D);
// uzp1 vector
theEmitter->emitIns_R_R_R(INS_uzp1, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_uzp1, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_uzp1, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_uzp1, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_uzp1, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_uzp1, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_uzp1, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_2D);
// uzp2 vector
theEmitter->emitIns_R_R_R(INS_uzp2, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_uzp2, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_uzp2, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_uzp2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_uzp2, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_uzp2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_uzp2, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_2D);
// zip1 vector
theEmitter->emitIns_R_R_R(INS_zip1, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_zip1, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_zip1, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_zip1, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_zip1, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_zip1, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_zip1, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_2D);
// zip2 vector
theEmitter->emitIns_R_R_R(INS_zip2, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_zip2, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_zip2, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_zip2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_zip2, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_zip2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_zip2, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_2D);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
// srshl scalar
theEmitter->emitIns_R_R_R(INS_srshl, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_NONE);
// srshl vector
theEmitter->emitIns_R_R_R(INS_srshl, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_srshl, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_srshl, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_srshl, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_srshl, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_srshl, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_srshl, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_2D);
// sshl scalar
theEmitter->emitIns_R_R_R(INS_sshl, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_NONE);
// sshl vector
theEmitter->emitIns_R_R_R(INS_sshl, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_sshl, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_sshl, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_sshl, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_sshl, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_sshl, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_sshl, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_2D);
// urshl scalar
theEmitter->emitIns_R_R_R(INS_urshl, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_NONE);
// urshl vector
theEmitter->emitIns_R_R_R(INS_urshl, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_urshl, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_urshl, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_urshl, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_urshl, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_urshl, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_urshl, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_2D);
// ushl scalar
theEmitter->emitIns_R_R_R(INS_ushl, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_NONE);
// ushl vector
theEmitter->emitIns_R_R_R(INS_ushl, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_ushl, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_ushl, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_ushl, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_ushl, EA_8BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_ushl, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_ushl, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_2D);
// addhn vector
theEmitter->emitIns_R_R_R(INS_addhn, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_addhn, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_addhn, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// addhn2 vector
theEmitter->emitIns_R_R_R(INS_addhn2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_addhn2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_addhn2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// raddhn vector
theEmitter->emitIns_R_R_R(INS_raddhn, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_raddhn, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_raddhn, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// raddhn2 vector
theEmitter->emitIns_R_R_R(INS_raddhn2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_raddhn2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_raddhn2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// rsubhn vector
theEmitter->emitIns_R_R_R(INS_rsubhn, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_rsubhn, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_rsubhn, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// rsubhn2 vector
theEmitter->emitIns_R_R_R(INS_rsubhn2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_rsubhn2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_rsubhn2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// sabal vector
theEmitter->emitIns_R_R_R(INS_sabal, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_sabal, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_sabal, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// sabal2 vector
theEmitter->emitIns_R_R_R(INS_sabal2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_sabal2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_sabal2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// sabdl vector
theEmitter->emitIns_R_R_R(INS_sabdl, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_sabdl, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_sabdl, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// sabdl2 vector
theEmitter->emitIns_R_R_R(INS_sabdl2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_sabdl2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_sabdl2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// saddl vector
theEmitter->emitIns_R_R_R(INS_saddl, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_saddl, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_saddl, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// saddl2 vector
theEmitter->emitIns_R_R_R(INS_saddl2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_saddl2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_saddl2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// saddw vector
theEmitter->emitIns_R_R_R(INS_saddw, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_saddw, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_saddw, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// saddw2 vector
theEmitter->emitIns_R_R_R(INS_saddw2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_saddw2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_saddw2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// shadd vector
theEmitter->emitIns_R_R_R(INS_shadd, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_shadd, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_shadd, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_shadd, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_shadd, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_shadd, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// shsub vector
theEmitter->emitIns_R_R_R(INS_shsub, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_shsub, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_shsub, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_shsub, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_shsub, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_shsub, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// sqadd scalar
theEmitter->emitIns_R_R_R(INS_sqadd, EA_1BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_sqadd, EA_2BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_sqadd, EA_4BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_sqadd, EA_8BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_NONE);
// sqadd vector
theEmitter->emitIns_R_R_R(INS_sqadd, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_sqadd, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_sqadd, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_sqadd, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_sqadd, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_sqadd, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// sqrshl scalar
theEmitter->emitIns_R_R_R(INS_sqrshl, EA_1BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_sqrshl, EA_2BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_sqrshl, EA_4BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_sqrshl, EA_8BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_NONE);
// sqrshl vector
theEmitter->emitIns_R_R_R(INS_sqrshl, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_sqrshl, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_sqrshl, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_sqrshl, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_sqrshl, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_sqrshl, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_sqrshl, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_2D);
// sqshl scalar
theEmitter->emitIns_R_R_R(INS_sqshl, EA_1BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_sqshl, EA_2BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_sqshl, EA_4BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_sqshl, EA_8BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_NONE);
// sqshl vector
theEmitter->emitIns_R_R_R(INS_sqshl, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_sqshl, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_sqshl, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_sqshl, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_sqshl, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_sqshl, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_sqshl, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_2D);
// sqsub scalar
theEmitter->emitIns_R_R_R(INS_sqsub, EA_1BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_sqsub, EA_2BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_sqsub, EA_4BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_sqsub, EA_8BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_NONE);
// sqsub vector
theEmitter->emitIns_R_R_R(INS_sqsub, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_sqsub, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_sqsub, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_sqsub, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_sqsub, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_sqsub, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// srhadd vector
theEmitter->emitIns_R_R_R(INS_srhadd, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_srhadd, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_srhadd, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_srhadd, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_srhadd, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_srhadd, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// ssubl vector
theEmitter->emitIns_R_R_R(INS_ssubl, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_ssubl, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_ssubl, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// ssubl2 vector
theEmitter->emitIns_R_R_R(INS_ssubl2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_ssubl2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_ssubl2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// ssubw vector
theEmitter->emitIns_R_R_R(INS_ssubw, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_ssubw, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_ssubw, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// ssubw2 vector
theEmitter->emitIns_R_R_R(INS_ssubw2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_ssubw2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_ssubw2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// subhn vector
theEmitter->emitIns_R_R_R(INS_subhn, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_subhn, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_subhn, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// sqdmlal scalar
theEmitter->emitIns_R_R_R(INS_sqdmlal, EA_2BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_sqdmlal, EA_4BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_NONE);
// sqdmlal vector
theEmitter->emitIns_R_R_R(INS_sqdmlal, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_sqdmlal, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_2S);
// sqdmlal2 vector
theEmitter->emitIns_R_R_R(INS_sqdmlal2, EA_16BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_sqdmlal2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
// sqdmlsl scalar
theEmitter->emitIns_R_R_R(INS_sqdmlsl, EA_2BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_sqdmlsl, EA_4BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_NONE);
// sqdmlsl vector
theEmitter->emitIns_R_R_R(INS_sqdmlsl, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_sqdmlsl, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_2S);
// sqdmlsl2 vector
theEmitter->emitIns_R_R_R(INS_sqdmlsl2, EA_16BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_sqdmlsl2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
// sqdmulh scalar
theEmitter->emitIns_R_R_R(INS_sqdmulh, EA_2BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_sqdmulh, EA_4BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_NONE);
// sqdmulh vector
theEmitter->emitIns_R_R_R(INS_sqdmulh, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_sqdmulh, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_sqdmulh, EA_16BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_sqdmulh, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
// sqdmull scalar
theEmitter->emitIns_R_R_R(INS_sqdmull, EA_2BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_sqdmull, EA_4BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_NONE);
// sqdmull vector
theEmitter->emitIns_R_R_R(INS_sqdmull, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_sqdmull, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_2S);
// sqdmull2 vector
theEmitter->emitIns_R_R_R(INS_sqdmull2, EA_16BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_sqdmull2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
// sqrdmlah scalar
theEmitter->emitIns_R_R_R(INS_sqrdmlah, EA_2BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_sqrdmlah, EA_4BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_NONE);
// sqdrmlah vector
theEmitter->emitIns_R_R_R(INS_sqrdmlah, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_sqrdmlah, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_sqrdmlah, EA_16BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_sqrdmlah, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
// sqrdmlsh scalar
theEmitter->emitIns_R_R_R(INS_sqrdmlsh, EA_2BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_sqrdmlsh, EA_4BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_NONE);
// sqdrmlsh vector
theEmitter->emitIns_R_R_R(INS_sqrdmlsh, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_sqrdmlsh, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_sqrdmlsh, EA_16BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_sqrdmlsh, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
// sqrdmulh scalar
theEmitter->emitIns_R_R_R(INS_sqrdmulh, EA_2BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_sqrdmulh, EA_4BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_NONE);
// sqdrmulh vector
theEmitter->emitIns_R_R_R(INS_sqrdmulh, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_sqrdmulh, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_sqrdmulh, EA_16BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_sqrdmulh, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
// subhn2 vector
theEmitter->emitIns_R_R_R(INS_subhn2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_subhn2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_subhn2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// uabal vector
theEmitter->emitIns_R_R_R(INS_uabal, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_uabal, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_uabal, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// uabal2 vector
theEmitter->emitIns_R_R_R(INS_uabal2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_uabal2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_uabal2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// uabdl vector
theEmitter->emitIns_R_R_R(INS_uabdl, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_uabdl, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_uabdl, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// uabdl2 vector
theEmitter->emitIns_R_R_R(INS_uabdl2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_uabdl2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_uabdl2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// uaddl vector
theEmitter->emitIns_R_R_R(INS_uaddl, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_uaddl, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_uaddl, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// uaddl2 vector
theEmitter->emitIns_R_R_R(INS_uaddl2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_uaddl2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_uaddl2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// uaddw vector
theEmitter->emitIns_R_R_R(INS_uaddw, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_uaddw, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_uaddw, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// uaddw2 vector
theEmitter->emitIns_R_R_R(INS_uaddw2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_uaddw2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_uaddw2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// uhadd vector
theEmitter->emitIns_R_R_R(INS_uhadd, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_uhadd, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_uhadd, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_uhadd, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_uhadd, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_uhadd, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// uhsub vector
theEmitter->emitIns_R_R_R(INS_uhsub, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_uhsub, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_uhsub, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_uhsub, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_uhsub, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_uhsub, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// uqadd scalar
theEmitter->emitIns_R_R_R(INS_uqadd, EA_1BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_uqadd, EA_2BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_uqadd, EA_4BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_uqadd, EA_8BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_NONE);
// uqadd vector
theEmitter->emitIns_R_R_R(INS_uqadd, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_uqadd, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_uqadd, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_uqadd, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_uqadd, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_uqadd, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// uqrshl scalar
theEmitter->emitIns_R_R_R(INS_uqrshl, EA_1BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_uqrshl, EA_2BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_uqrshl, EA_4BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_uqrshl, EA_8BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_NONE);
// uqrshl vector
theEmitter->emitIns_R_R_R(INS_uqrshl, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_uqrshl, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_uqrshl, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_uqrshl, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_uqrshl, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_uqrshl, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_uqrshl, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_2D);
// uqshl scalar
theEmitter->emitIns_R_R_R(INS_uqshl, EA_1BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_uqshl, EA_2BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_uqshl, EA_4BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_uqshl, EA_8BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_NONE);
// uqshl vector
theEmitter->emitIns_R_R_R(INS_uqshl, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_uqshl, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_uqshl, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_uqshl, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_uqshl, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_uqshl, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_uqshl, EA_16BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_2D);
// uqsub scalar
theEmitter->emitIns_R_R_R(INS_uqsub, EA_1BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_uqsub, EA_2BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_uqsub, EA_4BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R(INS_uqsub, EA_8BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_NONE);
// uqsub vector
theEmitter->emitIns_R_R_R(INS_uqsub, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_uqsub, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_uqsub, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_uqsub, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_uqsub, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_uqsub, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// urhadd vector
theEmitter->emitIns_R_R_R(INS_urhadd, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_urhadd, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_urhadd, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_urhadd, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_urhadd, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_urhadd, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// usubl vector
theEmitter->emitIns_R_R_R(INS_usubl, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_usubl, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_usubl, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// usubl2 vector
theEmitter->emitIns_R_R_R(INS_usubl2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_usubl2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_usubl2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// usubw vector
theEmitter->emitIns_R_R_R(INS_usubw, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_usubw, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_usubw, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// usubw2 vector
theEmitter->emitIns_R_R_R(INS_usubw2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_usubw2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_usubw2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_R_R vector multiply
//
genDefineTempLabel(genCreateTempLabel());
theEmitter->emitIns_R_R_R(INS_mul, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_mul, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_mul, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_mul, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_mul, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_mul, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_pmul, EA_8BYTE, REG_V18, REG_V19, REG_V20, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_pmul, EA_16BYTE, REG_V21, REG_V22, REG_V23, INS_OPTS_16B);
// 'mul' vector by element
theEmitter->emitIns_R_R_R_I(INS_mul, EA_8BYTE, REG_V0, REG_V1, REG_V16, 0, INS_OPTS_2S);
theEmitter->emitIns_R_R_R_I(INS_mul, EA_8BYTE, REG_V2, REG_V3, REG_V15, 1, INS_OPTS_2S);
theEmitter->emitIns_R_R_R_I(INS_mul, EA_8BYTE, REG_V4, REG_V5, REG_V17, 3, INS_OPTS_2S);
theEmitter->emitIns_R_R_R_I(INS_mul, EA_8BYTE, REG_V6, REG_V7, REG_V0, 0, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_mul, EA_8BYTE, REG_V8, REG_V9, REG_V1, 3, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_mul, EA_8BYTE, REG_V10, REG_V11, REG_V2, 7, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_mul, EA_16BYTE, REG_V12, REG_V13, REG_V14, 0, INS_OPTS_4S);
theEmitter->emitIns_R_R_R_I(INS_mul, EA_16BYTE, REG_V14, REG_V15, REG_V18, 1, INS_OPTS_4S);
theEmitter->emitIns_R_R_R_I(INS_mul, EA_16BYTE, REG_V16, REG_V17, REG_V13, 3, INS_OPTS_4S);
theEmitter->emitIns_R_R_R_I(INS_mul, EA_16BYTE, REG_V18, REG_V19, REG_V3, 0, INS_OPTS_8H);
theEmitter->emitIns_R_R_R_I(INS_mul, EA_16BYTE, REG_V20, REG_V21, REG_V4, 3, INS_OPTS_8H);
theEmitter->emitIns_R_R_R_I(INS_mul, EA_16BYTE, REG_V22, REG_V23, REG_V5, 7, INS_OPTS_8H);
// 'mla' vector by element
theEmitter->emitIns_R_R_R_I(INS_mla, EA_8BYTE, REG_V0, REG_V1, REG_V16, 0, INS_OPTS_2S);
theEmitter->emitIns_R_R_R_I(INS_mla, EA_8BYTE, REG_V2, REG_V3, REG_V15, 1, INS_OPTS_2S);
theEmitter->emitIns_R_R_R_I(INS_mla, EA_8BYTE, REG_V4, REG_V5, REG_V17, 3, INS_OPTS_2S);
theEmitter->emitIns_R_R_R_I(INS_mla, EA_8BYTE, REG_V6, REG_V7, REG_V0, 0, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_mla, EA_8BYTE, REG_V8, REG_V9, REG_V1, 3, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_mla, EA_8BYTE, REG_V10, REG_V11, REG_V2, 7, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_mla, EA_16BYTE, REG_V12, REG_V13, REG_V14, 0, INS_OPTS_4S);
theEmitter->emitIns_R_R_R_I(INS_mla, EA_16BYTE, REG_V14, REG_V15, REG_V18, 1, INS_OPTS_4S);
theEmitter->emitIns_R_R_R_I(INS_mla, EA_16BYTE, REG_V16, REG_V17, REG_V13, 3, INS_OPTS_4S);
theEmitter->emitIns_R_R_R_I(INS_mla, EA_16BYTE, REG_V18, REG_V19, REG_V3, 0, INS_OPTS_8H);
theEmitter->emitIns_R_R_R_I(INS_mla, EA_16BYTE, REG_V20, REG_V21, REG_V4, 3, INS_OPTS_8H);
theEmitter->emitIns_R_R_R_I(INS_mla, EA_16BYTE, REG_V22, REG_V23, REG_V5, 7, INS_OPTS_8H);
// 'mls' vector by element
theEmitter->emitIns_R_R_R_I(INS_mls, EA_8BYTE, REG_V0, REG_V1, REG_V16, 0, INS_OPTS_2S);
theEmitter->emitIns_R_R_R_I(INS_mls, EA_8BYTE, REG_V2, REG_V3, REG_V15, 1, INS_OPTS_2S);
theEmitter->emitIns_R_R_R_I(INS_mls, EA_8BYTE, REG_V4, REG_V5, REG_V17, 3, INS_OPTS_2S);
theEmitter->emitIns_R_R_R_I(INS_mls, EA_8BYTE, REG_V6, REG_V7, REG_V0, 0, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_mls, EA_8BYTE, REG_V8, REG_V9, REG_V1, 3, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_mls, EA_8BYTE, REG_V10, REG_V11, REG_V2, 7, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_mls, EA_16BYTE, REG_V12, REG_V13, REG_V14, 0, INS_OPTS_4S);
theEmitter->emitIns_R_R_R_I(INS_mls, EA_16BYTE, REG_V14, REG_V15, REG_V18, 1, INS_OPTS_4S);
theEmitter->emitIns_R_R_R_I(INS_mls, EA_16BYTE, REG_V16, REG_V17, REG_V13, 3, INS_OPTS_4S);
theEmitter->emitIns_R_R_R_I(INS_mls, EA_16BYTE, REG_V18, REG_V19, REG_V3, 0, INS_OPTS_8H);
theEmitter->emitIns_R_R_R_I(INS_mls, EA_16BYTE, REG_V20, REG_V21, REG_V4, 3, INS_OPTS_8H);
theEmitter->emitIns_R_R_R_I(INS_mls, EA_16BYTE, REG_V22, REG_V23, REG_V5, 7, INS_OPTS_8H);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
// pmull vector
theEmitter->emitIns_R_R_R(INS_pmull, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_pmull, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_1D);
// pmull2 vector
theEmitter->emitIns_R_R_R(INS_pmull2, EA_16BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_pmull2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_2D);
// sdot vector
theEmitter->emitIns_R_R_R_I(INS_sdot, EA_8BYTE, REG_V0, REG_V1, REG_V16, 3, INS_OPTS_2S);
theEmitter->emitIns_R_R_R_I(INS_sdot, EA_16BYTE, REG_V3, REG_V4, REG_V31, 1, INS_OPTS_4S);
// smlal vector
theEmitter->emitIns_R_R_R(INS_smlal, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_smlal, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_smlal, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// smlal2 vector
theEmitter->emitIns_R_R_R(INS_smlal2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_smlal2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_smlal2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// smlsl vector
theEmitter->emitIns_R_R_R(INS_smlsl, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_smlsl, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_smlsl, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// smlsl2 vector
theEmitter->emitIns_R_R_R(INS_smlsl2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_smlsl2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_smlsl2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// smull vector
theEmitter->emitIns_R_R_R(INS_smull, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_smull, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_smull, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// smull2 vector
theEmitter->emitIns_R_R_R(INS_smull2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_smull2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_smull2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// udot vector
theEmitter->emitIns_R_R_R_I(INS_udot, EA_8BYTE, REG_V0, REG_V1, REG_V16, 3, INS_OPTS_2S);
theEmitter->emitIns_R_R_R_I(INS_udot, EA_16BYTE, REG_V3, REG_V4, REG_V31, 1, INS_OPTS_4S);
// umlal vector
theEmitter->emitIns_R_R_R(INS_umlal, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_umlal, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_umlal, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// umlal2 vector
theEmitter->emitIns_R_R_R(INS_umlal2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_umlal2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_umlal2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// umlsl vector
theEmitter->emitIns_R_R_R(INS_umlsl, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_umlsl, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_umlsl, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// umlsl2 vector
theEmitter->emitIns_R_R_R(INS_umlsl2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_umlsl2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_umlsl2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// umull vector
theEmitter->emitIns_R_R_R(INS_umull, EA_8BYTE, REG_V0, REG_V1, REG_V2, INS_OPTS_8B);
theEmitter->emitIns_R_R_R(INS_umull, EA_8BYTE, REG_V3, REG_V4, REG_V5, INS_OPTS_4H);
theEmitter->emitIns_R_R_R(INS_umull, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
// umull2 vector
theEmitter->emitIns_R_R_R(INS_umull2, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_16B);
theEmitter->emitIns_R_R_R(INS_umull2, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_8H);
theEmitter->emitIns_R_R_R(INS_umull2, EA_16BYTE, REG_V15, REG_V16, REG_V17, INS_OPTS_4S);
// smlal vector, by element
theEmitter->emitIns_R_R_R_I(INS_smlal, EA_8BYTE, REG_V0, REG_V1, REG_V2, 3, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_smlal, EA_8BYTE, REG_V3, REG_V4, REG_V5, 1, INS_OPTS_2S);
// smlal2 vector, by element
theEmitter->emitIns_R_R_R_I(INS_smlal2, EA_16BYTE, REG_V6, REG_V7, REG_V8, 7, INS_OPTS_8H);
theEmitter->emitIns_R_R_R_I(INS_smlal2, EA_16BYTE, REG_V9, REG_V10, REG_V11, 3, INS_OPTS_4S);
// smlsl vector, by element
theEmitter->emitIns_R_R_R_I(INS_smlsl, EA_8BYTE, REG_V0, REG_V1, REG_V2, 3, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_smlsl, EA_8BYTE, REG_V3, REG_V4, REG_V5, 1, INS_OPTS_2S);
// smlsl2 vector, by element
theEmitter->emitIns_R_R_R_I(INS_smlsl2, EA_16BYTE, REG_V6, REG_V7, REG_V8, 7, INS_OPTS_8H);
theEmitter->emitIns_R_R_R_I(INS_smlsl2, EA_16BYTE, REG_V9, REG_V10, REG_V11, 3, INS_OPTS_4S);
// smull vector, by element
theEmitter->emitIns_R_R_R_I(INS_smull, EA_8BYTE, REG_V0, REG_V1, REG_V2, 3, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_smull, EA_8BYTE, REG_V3, REG_V4, REG_V5, 1, INS_OPTS_2S);
// smull2 vector, by element
theEmitter->emitIns_R_R_R_I(INS_smull2, EA_16BYTE, REG_V6, REG_V7, REG_V8, 7, INS_OPTS_8H);
theEmitter->emitIns_R_R_R_I(INS_smull2, EA_16BYTE, REG_V9, REG_V10, REG_V11, 3, INS_OPTS_4S);
// sqdmlal scalar, by element
theEmitter->emitIns_R_R_R_I(INS_sqdmlal, EA_2BYTE, REG_V0, REG_V1, REG_V2, 7, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R_I(INS_sqdmlal, EA_4BYTE, REG_V3, REG_V4, REG_V5, 3, INS_OPTS_NONE);
// sqdmlal vector, by element
theEmitter->emitIns_R_R_R_I(INS_sqdmlal, EA_8BYTE, REG_V0, REG_V1, REG_V2, 7, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_sqdmlal, EA_8BYTE, REG_V3, REG_V4, REG_V5, 3, INS_OPTS_2S);
// sqdmlal2 vector, by element
theEmitter->emitIns_R_R_R_I(INS_sqdmlal2, EA_16BYTE, REG_V6, REG_V7, REG_V8, 7, INS_OPTS_8H);
theEmitter->emitIns_R_R_R_I(INS_sqdmlal2, EA_16BYTE, REG_V9, REG_V10, REG_V11, 3, INS_OPTS_4S);
// sqdmlsl scalar, by element
theEmitter->emitIns_R_R_R_I(INS_sqdmlsl, EA_2BYTE, REG_V0, REG_V1, REG_V2, 7, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R_I(INS_sqdmlsl, EA_4BYTE, REG_V3, REG_V4, REG_V5, 3, INS_OPTS_NONE);
// sqdmlsl vector, by element
theEmitter->emitIns_R_R_R_I(INS_sqdmlsl, EA_8BYTE, REG_V0, REG_V1, REG_V2, 7, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_sqdmlsl, EA_8BYTE, REG_V3, REG_V4, REG_V5, 3, INS_OPTS_2S);
// sqdmlsl2 vector, by element
theEmitter->emitIns_R_R_R_I(INS_sqdmlsl2, EA_16BYTE, REG_V6, REG_V7, REG_V8, 7, INS_OPTS_8H);
theEmitter->emitIns_R_R_R_I(INS_sqdmlsl2, EA_16BYTE, REG_V9, REG_V10, REG_V11, 3, INS_OPTS_4S);
// sqdmulh scalar
theEmitter->emitIns_R_R_R_I(INS_sqdmulh, EA_2BYTE, REG_V0, REG_V1, REG_V2, 7, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R_I(INS_sqdmulh, EA_4BYTE, REG_V3, REG_V4, REG_V5, 3, INS_OPTS_NONE);
// sqdmulh vector
theEmitter->emitIns_R_R_R_I(INS_sqdmulh, EA_8BYTE, REG_V0, REG_V1, REG_V2, 7, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_sqdmulh, EA_8BYTE, REG_V3, REG_V4, REG_V5, 3, INS_OPTS_2S);
theEmitter->emitIns_R_R_R_I(INS_sqdmulh, EA_16BYTE, REG_V6, REG_V7, REG_V8, 7, INS_OPTS_8H);
theEmitter->emitIns_R_R_R_I(INS_sqdmulh, EA_16BYTE, REG_V9, REG_V10, REG_V11, 3, INS_OPTS_4S);
// sqdmull scalar, by element
theEmitter->emitIns_R_R_R_I(INS_sqdmull, EA_2BYTE, REG_V0, REG_V1, REG_V2, 7, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R_I(INS_sqdmull, EA_4BYTE, REG_V3, REG_V4, REG_V5, 3, INS_OPTS_NONE);
// sqdmull vector, by element
theEmitter->emitIns_R_R_R_I(INS_sqdmull, EA_8BYTE, REG_V0, REG_V1, REG_V2, 7, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_sqdmull, EA_8BYTE, REG_V3, REG_V4, REG_V5, 3, INS_OPTS_2S);
// sqdmull2 vector, by element
theEmitter->emitIns_R_R_R_I(INS_sqdmull2, EA_16BYTE, REG_V6, REG_V7, REG_V8, 7, INS_OPTS_8H);
theEmitter->emitIns_R_R_R_I(INS_sqdmull2, EA_16BYTE, REG_V9, REG_V10, REG_V11, 3, INS_OPTS_4S);
// sqrdmlah scalar
theEmitter->emitIns_R_R_R_I(INS_sqrdmlah, EA_2BYTE, REG_V0, REG_V1, REG_V2, 7, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R_I(INS_sqrdmlah, EA_4BYTE, REG_V3, REG_V4, REG_V5, 3, INS_OPTS_NONE);
// sqdrmlah vector
theEmitter->emitIns_R_R_R_I(INS_sqrdmlah, EA_8BYTE, REG_V0, REG_V1, REG_V2, 7, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_sqrdmlah, EA_8BYTE, REG_V3, REG_V4, REG_V5, 3, INS_OPTS_2S);
theEmitter->emitIns_R_R_R_I(INS_sqrdmlah, EA_16BYTE, REG_V6, REG_V7, REG_V8, 7, INS_OPTS_8H);
theEmitter->emitIns_R_R_R_I(INS_sqrdmlah, EA_16BYTE, REG_V9, REG_V10, REG_V11, 3, INS_OPTS_4S);
// sqrdmlsh scalar
theEmitter->emitIns_R_R_R_I(INS_sqrdmlsh, EA_2BYTE, REG_V0, REG_V1, REG_V2, 7, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R_I(INS_sqrdmlsh, EA_4BYTE, REG_V3, REG_V4, REG_V5, 3, INS_OPTS_NONE);
// sqdrmlsh vector
theEmitter->emitIns_R_R_R_I(INS_sqrdmlsh, EA_8BYTE, REG_V0, REG_V1, REG_V2, 7, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_sqrdmlsh, EA_8BYTE, REG_V3, REG_V4, REG_V5, 3, INS_OPTS_2S);
theEmitter->emitIns_R_R_R_I(INS_sqrdmlsh, EA_16BYTE, REG_V6, REG_V7, REG_V8, 7, INS_OPTS_8H);
theEmitter->emitIns_R_R_R_I(INS_sqrdmlsh, EA_16BYTE, REG_V9, REG_V10, REG_V11, 3, INS_OPTS_4S);
// sqrdmulh scalar
theEmitter->emitIns_R_R_R_I(INS_sqrdmulh, EA_2BYTE, REG_V0, REG_V1, REG_V2, 7, INS_OPTS_NONE);
theEmitter->emitIns_R_R_R_I(INS_sqrdmulh, EA_4BYTE, REG_V3, REG_V4, REG_V5, 3, INS_OPTS_NONE);
// sqdrmulh vector
theEmitter->emitIns_R_R_R_I(INS_sqrdmulh, EA_8BYTE, REG_V0, REG_V1, REG_V2, 7, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_sqrdmulh, EA_8BYTE, REG_V3, REG_V4, REG_V5, 3, INS_OPTS_2S);
theEmitter->emitIns_R_R_R_I(INS_sqrdmulh, EA_16BYTE, REG_V6, REG_V7, REG_V8, 7, INS_OPTS_8H);
theEmitter->emitIns_R_R_R_I(INS_sqrdmulh, EA_16BYTE, REG_V9, REG_V10, REG_V11, 3, INS_OPTS_4S);
// umlal vector, by element
theEmitter->emitIns_R_R_R_I(INS_umlal, EA_8BYTE, REG_V0, REG_V1, REG_V2, 3, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_umlal, EA_8BYTE, REG_V3, REG_V4, REG_V5, 1, INS_OPTS_2S);
// umlal2 vector, by element
theEmitter->emitIns_R_R_R_I(INS_umlal2, EA_16BYTE, REG_V6, REG_V7, REG_V8, 7, INS_OPTS_8H);
theEmitter->emitIns_R_R_R_I(INS_umlal2, EA_16BYTE, REG_V9, REG_V10, REG_V11, 3, INS_OPTS_4S);
// umlsl vector, by element
theEmitter->emitIns_R_R_R_I(INS_umlsl, EA_8BYTE, REG_V0, REG_V1, REG_V2, 3, INS_OPTS_4H);
// umlsl2 vector, by element
theEmitter->emitIns_R_R_R_I(INS_umlsl2, EA_16BYTE, REG_V6, REG_V7, REG_V8, 7, INS_OPTS_8H);
theEmitter->emitIns_R_R_R_I(INS_umlsl2, EA_16BYTE, REG_V9, REG_V10, REG_V11, 3, INS_OPTS_4S);
// umull vector, by element
theEmitter->emitIns_R_R_R_I(INS_umull, EA_8BYTE, REG_V0, REG_V1, REG_V2, 3, INS_OPTS_4H);
theEmitter->emitIns_R_R_R_I(INS_umull, EA_8BYTE, REG_V3, REG_V4, REG_V5, 1, INS_OPTS_2S);
// umull2 vector, by element
theEmitter->emitIns_R_R_R_I(INS_umull2, EA_16BYTE, REG_V6, REG_V7, REG_V8, 7, INS_OPTS_8H);
theEmitter->emitIns_R_R_R_I(INS_umull2, EA_16BYTE, REG_V9, REG_V10, REG_V11, 3, INS_OPTS_4S);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_R_R floating point operations, one source/dest, and two source
//
genDefineTempLabel(genCreateTempLabel());
theEmitter->emitIns_R_R_R(INS_fmla, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_fmla, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fmla, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2D);
theEmitter->emitIns_R_R_R_I(INS_fmla, EA_4BYTE, REG_V15, REG_V16, REG_V17, 3); // scalar by element 4BYTE
theEmitter->emitIns_R_R_R_I(INS_fmla, EA_8BYTE, REG_V18, REG_V19, REG_V20, 1); // scalar by element 8BYTE
theEmitter->emitIns_R_R_R_I(INS_fmla, EA_8BYTE, REG_V21, REG_V22, REG_V23, 0, INS_OPTS_2S);
theEmitter->emitIns_R_R_R_I(INS_fmla, EA_16BYTE, REG_V24, REG_V25, REG_V26, 2, INS_OPTS_4S);
theEmitter->emitIns_R_R_R_I(INS_fmla, EA_16BYTE, REG_V27, REG_V28, REG_V29, 0, INS_OPTS_2D);
theEmitter->emitIns_R_R_R(INS_fmls, EA_8BYTE, REG_V6, REG_V7, REG_V8, INS_OPTS_2S);
theEmitter->emitIns_R_R_R(INS_fmls, EA_16BYTE, REG_V9, REG_V10, REG_V11, INS_OPTS_4S);
theEmitter->emitIns_R_R_R(INS_fmls, EA_16BYTE, REG_V12, REG_V13, REG_V14, INS_OPTS_2D);
theEmitter->emitIns_R_R_R_I(INS_fmls, EA_4BYTE, REG_V15, REG_V16, REG_V17, 3); // scalar by element 4BYTE
theEmitter->emitIns_R_R_R_I(INS_fmls, EA_8BYTE, REG_V18, REG_V19, REG_V20, 1); // scalar by element 8BYTE
theEmitter->emitIns_R_R_R_I(INS_fmls, EA_8BYTE, REG_V21, REG_V22, REG_V23, 0, INS_OPTS_2S);
theEmitter->emitIns_R_R_R_I(INS_fmls, EA_16BYTE, REG_V24, REG_V25, REG_V26, 2, INS_OPTS_4S);
theEmitter->emitIns_R_R_R_I(INS_fmls, EA_16BYTE, REG_V27, REG_V28, REG_V29, 0, INS_OPTS_2D);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
//
// R_R_R_R floating point operations, one dest, and three source
//
theEmitter->emitIns_R_R_R_R(INS_fmadd, EA_4BYTE, REG_V0, REG_V8, REG_V16, REG_V24);
theEmitter->emitIns_R_R_R_R(INS_fmsub, EA_4BYTE, REG_V1, REG_V9, REG_V17, REG_V25);
theEmitter->emitIns_R_R_R_R(INS_fnmadd, EA_4BYTE, REG_V2, REG_V10, REG_V18, REG_V26);
theEmitter->emitIns_R_R_R_R(INS_fnmsub, EA_4BYTE, REG_V3, REG_V11, REG_V19, REG_V27);
theEmitter->emitIns_R_R_R_R(INS_fmadd, EA_8BYTE, REG_V4, REG_V12, REG_V20, REG_V28);
theEmitter->emitIns_R_R_R_R(INS_fmsub, EA_8BYTE, REG_V5, REG_V13, REG_V21, REG_V29);
theEmitter->emitIns_R_R_R_R(INS_fnmadd, EA_8BYTE, REG_V6, REG_V14, REG_V22, REG_V30);
theEmitter->emitIns_R_R_R_R(INS_fnmsub, EA_8BYTE, REG_V7, REG_V15, REG_V23, REG_V31);
#endif
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
BasicBlock* label = genCreateTempLabel();
genDefineTempLabel(label);
instGen(INS_nop);
instGen(INS_nop);
instGen(INS_nop);
instGen(INS_nop);
theEmitter->emitIns_R_L(INS_adr, EA_4BYTE_DSP_RELOC, label, REG_R0);
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
#ifdef ALL_ARM64_EMITTER_UNIT_TESTS
printf("*************** End of genArm64EmitterUnitTests()\n");
#endif // ALL_ARM64_EMITTER_UNIT_TESTS
}
#endif // defined(DEBUG)
//------------------------------------------------------------------------
// genEstablishFramePointer: Set up the frame pointer by adding an offset to the stack pointer.
//
// Arguments:
// delta - the offset to add to the current stack pointer to establish the frame pointer
// reportUnwindData - true if establishing the frame pointer should be reported in the OS unwind data.
//
void CodeGen::genEstablishFramePointer(int delta, bool reportUnwindData)
{
assert(compiler->compGeneratingProlog);
if (delta == 0)
{
GetEmitter()->emitIns_Mov(INS_mov, EA_PTRSIZE, REG_FPBASE, REG_SPBASE, /* canSkip */ false);
}
else
{
GetEmitter()->emitIns_R_R_I(INS_add, EA_PTRSIZE, REG_FPBASE, REG_SPBASE, delta);
}
if (reportUnwindData)
{
compiler->unwindSetFrameReg(REG_FPBASE, delta);
}
}
//------------------------------------------------------------------------
// genAllocLclFrame: Probe the stack.
//
// Notes:
// This only does the probing; allocating the frame is done when callee-saved registers are saved.
// This is done before anything has been pushed. The previous frame might have a large outgoing argument
// space that has been allocated, but the lowest addresses have not been touched. Our frame setup might
// not touch up to the first 504 bytes. This means we could miss a guard page. On Windows, however,
// there are always three guard pages, so we will not miss them all. On Linux, there is only one guard
// page by default, so we need to be more careful. We do an extra probe if we might not have probed
// recently enough. That is, if a call and prolog establishment might lead to missing a page. We do this
// on Windows as well just to be consistent, even though it should not be necessary.
//
// Arguments:
// frameSize - the size of the stack frame being allocated.
// initReg - register to use as a scratch register.
// pInitRegZeroed - OUT parameter. *pInitRegZeroed is set to 'false' if and only if
// this call sets 'initReg' to a non-zero value. Otherwise, it is unchanged.
// maskArgRegsLiveIn - incoming argument registers that are currently live.
//
// Return value:
// None
//
void CodeGen::genAllocLclFrame(unsigned frameSize, regNumber initReg, bool* pInitRegZeroed, regMaskTP maskArgRegsLiveIn)
{
assert(compiler->compGeneratingProlog);
if (frameSize == 0)
{
return;
}
const target_size_t pageSize = compiler->eeGetPageSize();
// What offset from the final SP was the last probe? If we haven't probed almost a complete page, and
// if the next action on the stack might subtract from SP first, before touching the current SP, then
// we do one more probe at the very bottom. This can happen if we call a function on arm64 that does
// a "STP fp, lr, [sp-504]!", that is, pre-decrement SP then store. Note that we probe here for arm64,
// but we don't alter SP.
target_size_t lastTouchDelta = 0;
assert(!compiler->info.compPublishStubParam || (REG_SECRET_STUB_PARAM != initReg));
if (frameSize < pageSize)
{
lastTouchDelta = frameSize;
}
else if (frameSize < 3 * pageSize)
{
// The probing loop in "else"-case below would require at least 6 instructions (and more if
// 'frameSize' or 'pageSize' can not be encoded with mov-instruction immediate).
// Hence for frames that are smaller than 3 * PAGE_SIZE the JIT inlines the following probing code
// to decrease code size.
// TODO-ARM64: The probing mechanisms should be replaced by a call to stack probe helper
// as it is done on other platforms.
lastTouchDelta = frameSize;
for (target_size_t probeOffset = pageSize; probeOffset <= frameSize; probeOffset += pageSize)
{
// Generate:
// movw initReg, -probeOffset
// ldr wzr, [sp + initReg]
instGen_Set_Reg_To_Imm(EA_PTRSIZE, initReg, -(ssize_t)probeOffset);
GetEmitter()->emitIns_R_R_R(INS_ldr, EA_4BYTE, REG_ZR, REG_SPBASE, initReg);
regSet.verifyRegUsed(initReg);
*pInitRegZeroed = false; // The initReg does not contain zero
lastTouchDelta -= pageSize;
}
assert(lastTouchDelta == frameSize % pageSize);
compiler->unwindPadding();
}
else
{
// Emit the following sequence to 'tickle' the pages. Note it is important that stack pointer not change
// until this is complete since the tickles could cause a stack overflow, and we need to be able to crawl
// the stack afterward (which means the stack pointer needs to be known).
regMaskTP availMask = RBM_ALLINT & (regSet.rsGetModifiedRegsMask() | ~RBM_INT_CALLEE_SAVED);
availMask &= ~maskArgRegsLiveIn; // Remove all of the incoming argument registers as they are currently live
availMask &= ~genRegMask(initReg); // Remove the pre-calculated initReg
regNumber rOffset = initReg;
regNumber rLimit;
regMaskTP tempMask;
// We pick the next lowest register number for rLimit
noway_assert(availMask != RBM_NONE);
tempMask = genFindLowestBit(availMask);
rLimit = genRegNumFromMask(tempMask);
// Generate:
//
// mov rOffset, -pageSize // On arm, this turns out to be "movw r1, 0xf000; sxth r1, r1".
// // We could save 4 bytes in the prolog by using "movs r1, 0" at the
// // runtime expense of running a useless first loop iteration.
// mov rLimit, -frameSize
// loop:
// ldr wzr, [sp + rOffset]
// sub rOffset, pageSize
// cmp rLimit, rOffset
// b.ls loop // If rLimit is lower or same, we need to probe this rOffset. Note
// // especially that if it is the same, we haven't probed this page.
noway_assert((ssize_t)(int)frameSize == (ssize_t)frameSize); // make sure framesize safely fits within an int
instGen_Set_Reg_To_Imm(EA_PTRSIZE, rOffset, -(ssize_t)pageSize);
instGen_Set_Reg_To_Imm(EA_PTRSIZE, rLimit, -(ssize_t)frameSize);
// There's a "virtual" label here. But we can't create a label in the prolog, so we use the magic
// `emitIns_J` with a negative `instrCount` to branch back a specific number of instructions.
GetEmitter()->emitIns_R_R_R(INS_ldr, EA_4BYTE, REG_ZR, REG_SPBASE, rOffset);
GetEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, rOffset, rOffset, pageSize);
GetEmitter()->emitIns_R_R(INS_cmp, EA_PTRSIZE, rLimit, rOffset); // If equal, we need to probe again
GetEmitter()->emitIns_J(INS_bls, NULL, -4);
*pInitRegZeroed = false; // The initReg does not contain zero
compiler->unwindPadding();
lastTouchDelta = frameSize % pageSize;
}
if (lastTouchDelta + STACK_PROBE_BOUNDARY_THRESHOLD_BYTES > pageSize)
{
assert(lastTouchDelta + STACK_PROBE_BOUNDARY_THRESHOLD_BYTES < 2 * pageSize);
instGen_Set_Reg_To_Imm(EA_PTRSIZE, initReg, -(ssize_t)frameSize);
GetEmitter()->emitIns_R_R_R(INS_ldr, EA_4BYTE, REG_ZR, REG_SPBASE, initReg);
compiler->unwindPadding();
regSet.verifyRegUsed(initReg);
*pInitRegZeroed = false; // The initReg does not contain zero
}
}
//-----------------------------------------------------------------------------------
// instGen_MemoryBarrier: Emit a MemoryBarrier instruction
//
// Arguments:
// barrierKind - kind of barrier to emit (Full or Load-Only).
//
// Notes:
// All MemoryBarriers instructions can be removed by DOTNET_JitNoMemoryBarriers=1
//
void CodeGen::instGen_MemoryBarrier(BarrierKind barrierKind)
{
#ifdef DEBUG
if (JitConfig.JitNoMemoryBarriers() == 1)
{
return;
}
#endif // DEBUG
// Avoid emitting redundant memory barriers on arm64 if they belong to the same IG
// and there were no memory accesses in-between them
emitter::instrDesc* lastMemBarrier = GetEmitter()->emitLastMemBarrier;
if ((lastMemBarrier != nullptr) && compiler->opts.OptimizationEnabled())
{
BarrierKind prevBarrierKind = BARRIER_FULL;
if (lastMemBarrier->idSmallCns() == INS_BARRIER_ISHLD)
{
prevBarrierKind = BARRIER_LOAD_ONLY;
}
else
{
// Currently we only emit two kinds of barriers on arm64:
// ISH - Full (inner shareable domain)
// ISHLD - LoadOnly (inner shareable domain)
assert(lastMemBarrier->idSmallCns() == INS_BARRIER_ISH);
}
if ((prevBarrierKind == BARRIER_LOAD_ONLY) && (barrierKind == BARRIER_FULL))
{
// Previous memory barrier: load-only, current: full
// Upgrade the previous one to full
assert((prevBarrierKind == BARRIER_LOAD_ONLY) && (barrierKind == BARRIER_FULL));
lastMemBarrier->idSmallCns(INS_BARRIER_ISH);
}
}
else
{
GetEmitter()->emitIns_BARR(INS_dmb, barrierKind == BARRIER_LOAD_ONLY ? INS_BARRIER_ISHLD : INS_BARRIER_ISH);
}
}
//-----------------------------------------------------------------------------------
// genCodeForMadd: Emit a madd (Multiply-Add) instruction
//
// Arguments:
// tree - GT_MADD tree where op1 or op2 is GT_ADD
//
void CodeGen::genCodeForMadd(GenTreeOp* tree)
{
assert(tree->OperIs(GT_MADD) && varTypeIsIntegral(tree) && !(tree->gtFlags & GTF_SET_FLAGS));
genConsumeOperands(tree);
GenTree* a;
GenTree* b;
GenTree* c;
if (tree->gtGetOp1()->OperIs(GT_MUL) && tree->gtGetOp1()->isContained())
{
a = tree->gtGetOp1()->gtGetOp1();
b = tree->gtGetOp1()->gtGetOp2();
c = tree->gtGetOp2();
}
else
{
assert(tree->gtGetOp2()->OperIs(GT_MUL) && tree->gtGetOp2()->isContained());
a = tree->gtGetOp2()->gtGetOp1();
b = tree->gtGetOp2()->gtGetOp2();
c = tree->gtGetOp1();
}
bool useMsub = false;
if (a->OperIs(GT_NEG) && a->isContained())
{
a = a->gtGetOp1();
useMsub = true;
}
if (b->OperIs(GT_NEG) && b->isContained())
{
b = b->gtGetOp1();
useMsub = !useMsub; // it's either "a * -b" or "-a * -b" which is the same as "a * b"
}
GetEmitter()->emitIns_R_R_R_R(useMsub ? INS_msub : INS_madd, emitActualTypeSize(tree), tree->GetRegNum(),
a->GetRegNum(), b->GetRegNum(), c->GetRegNum());
genProduceReg(tree);
}
//-----------------------------------------------------------------------------------
// genCodeForMsub: Emit a msub (Multiply-Subtract) instruction
//
// Arguments:
// tree - GT_MSUB tree where op2 is GT_MUL
//
void CodeGen::genCodeForMsub(GenTreeOp* tree)
{
assert(tree->OperIs(GT_MSUB) && varTypeIsIntegral(tree) && !(tree->gtFlags & GTF_SET_FLAGS));
genConsumeOperands(tree);
assert(tree->gtGetOp2()->OperIs(GT_MUL));
assert(tree->gtGetOp2()->isContained());
GenTree* a = tree->gtGetOp1();
GenTree* b = tree->gtGetOp2()->gtGetOp1();
GenTree* c = tree->gtGetOp2()->gtGetOp2();
// d = a - b * c
// MSUB d, b, c, a
GetEmitter()->emitIns_R_R_R_R(INS_msub, emitActualTypeSize(tree), tree->GetRegNum(), b->GetRegNum(), c->GetRegNum(),
a->GetRegNum());
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genCodeForBfiz: Generates the code sequence for a GenTree node that
// represents a bitfield insert in zero with sign/zero extension.
//
// Arguments:
// tree - the bitfield insert in zero node.
//
void CodeGen::genCodeForBfiz(GenTreeOp* tree)
{
assert(tree->OperIs(GT_BFIZ));
emitAttr size = emitActualTypeSize(tree);
unsigned shiftBy = (unsigned)tree->gtGetOp2()->AsIntCon()->IconValue();
unsigned shiftByImm = shiftBy & (emitter::getBitWidth(size) - 1);
GenTreeCast* cast = tree->gtGetOp1()->AsCast();
GenTree* castOp = cast->CastOp();
genConsumeRegs(castOp);
unsigned srcBits = varTypeIsSmall(cast->CastToType()) ? genTypeSize(cast->CastToType()) * BITS_PER_BYTE
: genTypeSize(castOp) * BITS_PER_BYTE;
const bool isUnsigned = cast->IsUnsigned() || varTypeIsUnsigned(cast->CastToType());
GetEmitter()->emitIns_R_R_I_I(isUnsigned ? INS_ubfiz : INS_sbfiz, size, tree->GetRegNum(), castOp->GetRegNum(),
(int)shiftByImm, (int)srcBits);
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genCodeForAddEx: Generates the code sequence for a GenTree node that
// represents an addition with sign or zero extended
//
// Arguments:
// tree - the add with extend node.
//
void CodeGen::genCodeForAddEx(GenTreeOp* tree)
{
assert(tree->OperIs(GT_ADDEX));
genConsumeOperands(tree);
GenTree* op;
GenTree* containedOp;
if (tree->gtGetOp1()->isContained())
{
containedOp = tree->gtGetOp1();
op = tree->gtGetOp2();
}
else
{
containedOp = tree->gtGetOp2();
op = tree->gtGetOp1();
}
assert(containedOp->isContained() && !op->isContained());
regNumber dstReg = tree->GetRegNum();
regNumber op1Reg = op->GetRegNum();
regNumber op2Reg = containedOp->gtGetOp1()->GetRegNum();
if (containedOp->OperIs(GT_CAST))
{
GenTreeCast* cast = containedOp->AsCast();
assert(varTypeIsLong(cast->CastToType()));
insOpts opts = cast->IsUnsigned() ? INS_OPTS_UXTW : INS_OPTS_SXTW;
GetEmitter()->emitIns_R_R_R(tree->gtSetFlags() ? INS_adds : INS_add, emitActualTypeSize(tree), dstReg, op1Reg,
op2Reg, opts);
}
else
{
assert(containedOp->OperIs(GT_LSH));
ssize_t cns = containedOp->gtGetOp2()->AsIntCon()->IconValue();
GetEmitter()->emitIns_R_R_R_I(tree->gtSetFlags() ? INS_adds : INS_add, emitActualTypeSize(tree), dstReg, op1Reg,
op2Reg, cns, INS_OPTS_LSL);
}
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genCodeForCond: Generates the code sequence for a GenTree node that
// represents a conditional instruction.
//
// Arguments:
// tree - conditional op
//
void CodeGen::genCodeForCond(GenTreeOp* tree)
{
assert(tree->OperIs(GT_CSNEG_MI));
assert(!(tree->gtFlags & GTF_SET_FLAGS) && (tree->gtFlags & GTF_USE_FLAGS));
genConsumeOperands(tree);
instruction ins;
insCond cond;
switch (tree->OperGet())
{
case GT_CSNEG_MI:
{
ins = INS_csneg;
cond = INS_COND_MI;
break;
}
default:
unreached();
}
regNumber dstReg = tree->GetRegNum();
regNumber op1Reg = tree->gtGetOp1()->GetRegNum();
regNumber op2Reg = tree->gtGetOp2()->GetRegNum();
GetEmitter()->emitIns_R_R_R_COND(ins, emitActualTypeSize(tree), dstReg, op1Reg, op2Reg, cond);
genProduceReg(tree);
}
#endif // TARGET_ARM64
| 1 |
dotnet/runtime
| 66,407 |
ARM64 - Optimizing a % b operations part 2
|
Addressing part of this issue: https://github.com/dotnet/runtime/issues/34937
**Description**
There are various ways to optimize `%` for integers on ARM64.
`a % b` can be transformed into a specific sequence of instructions for ARM64 if the operation is signed and `b` is a constant with the power of 2.
**Acceptance Criteria**
- [x] Merge https://github.com/dotnet/runtime/pull/65535
- [x] Add Tests
ARM64 diffs based on tests
```diff
- asr w1, w0, #31
- and w1, w1, #15
- add w1, w1, w0
- asr w1, w1, #4
- lsl w1, w1, #4
- sub w0, w0, w1
- ;; bbWeight=1 PerfScore 4.50
+ and w1, w0, #15
+ negs w0, w0
+ and w0, w0, #15
+ csneg w0, w1, w0, mi
+ ;; bbWeight=1 PerfScore 2.00
```
|
TIHan
| 2022-03-09T20:33:02Z | 2022-04-13T20:01:26Z |
3b6a539c7219383ec6181a112465f904fe9e2edb
|
a81f4bc9d53dedefe03f49da53ed198abbf9c467
|
ARM64 - Optimizing a % b operations part 2. Addressing part of this issue: https://github.com/dotnet/runtime/issues/34937
**Description**
There are various ways to optimize `%` for integers on ARM64.
`a % b` can be transformed into a specific sequence of instructions for ARM64 if the operation is signed and `b` is a constant with the power of 2.
**Acceptance Criteria**
- [x] Merge https://github.com/dotnet/runtime/pull/65535
- [x] Add Tests
ARM64 diffs based on tests
```diff
- asr w1, w0, #31
- and w1, w1, #15
- add w1, w1, w0
- asr w1, w1, #4
- lsl w1, w1, #4
- sub w0, w0, w1
- ;; bbWeight=1 PerfScore 4.50
+ and w1, w0, #15
+ negs w0, w0
+ and w0, w0, #15
+ csneg w0, w1, w0, mi
+ ;; bbWeight=1 PerfScore 2.00
```
|
./src/coreclr/jit/codegenarmarch.cpp
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX ARM/ARM64 Code Generator Common Code XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#ifdef TARGET_ARMARCH // This file is ONLY used for ARM and ARM64 architectures
#include "codegen.h"
#include "lower.h"
#include "gcinfo.h"
#include "emit.h"
#include "patchpointinfo.h"
//------------------------------------------------------------------------
// genStackPointerConstantAdjustment: add a specified constant value to the stack pointer.
// No probe is done.
//
// Arguments:
// spDelta - the value to add to SP. Must be negative or zero.
// regTmp - an available temporary register that is used if 'spDelta' cannot be encoded by
// 'sub sp, sp, #spDelta' instruction.
// Can be REG_NA if the caller knows for certain that 'spDelta' fits into the immediate
// value range.
//
// Return Value:
// None.
//
void CodeGen::genStackPointerConstantAdjustment(ssize_t spDelta, regNumber regTmp)
{
assert(spDelta < 0);
// We assert that the SP change is less than one page. If it's greater, you should have called a
// function that does a probe, which will in turn call this function.
assert((target_size_t)(-spDelta) <= compiler->eeGetPageSize());
#ifdef TARGET_ARM64
genInstrWithConstant(INS_sub, EA_PTRSIZE, REG_SPBASE, REG_SPBASE, -spDelta, regTmp);
#else
genInstrWithConstant(INS_sub, EA_PTRSIZE, REG_SPBASE, REG_SPBASE, -spDelta, INS_FLAGS_DONT_CARE, regTmp);
#endif
}
//------------------------------------------------------------------------
// genStackPointerConstantAdjustmentWithProbe: add a specified constant value to the stack pointer,
// and probe the stack as appropriate. Should only be called as a helper for
// genStackPointerConstantAdjustmentLoopWithProbe.
//
// Arguments:
// spDelta - the value to add to SP. Must be negative or zero. If zero, the probe happens,
// but the stack pointer doesn't move.
// regTmp - temporary register to use as target for probe load instruction
//
// Return Value:
// None.
//
void CodeGen::genStackPointerConstantAdjustmentWithProbe(ssize_t spDelta, regNumber regTmp)
{
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, regTmp, REG_SP, 0);
genStackPointerConstantAdjustment(spDelta, regTmp);
}
//------------------------------------------------------------------------
// genStackPointerConstantAdjustmentLoopWithProbe: Add a specified constant value to the stack pointer,
// and probe the stack as appropriate. Generates one probe per page, up to the total amount required.
// This will generate a sequence of probes in-line.
//
// Arguments:
// spDelta - the value to add to SP. Must be negative.
// regTmp - temporary register to use as target for probe load instruction
//
// Return Value:
// Offset in bytes from SP to last probed address.
//
target_ssize_t CodeGen::genStackPointerConstantAdjustmentLoopWithProbe(ssize_t spDelta, regNumber regTmp)
{
assert(spDelta < 0);
const target_size_t pageSize = compiler->eeGetPageSize();
ssize_t spRemainingDelta = spDelta;
do
{
ssize_t spOneDelta = -(ssize_t)min((target_size_t)-spRemainingDelta, pageSize);
genStackPointerConstantAdjustmentWithProbe(spOneDelta, regTmp);
spRemainingDelta -= spOneDelta;
} while (spRemainingDelta < 0);
// What offset from the final SP was the last probe? This depends on the fact that
// genStackPointerConstantAdjustmentWithProbe() probes first, then does "SUB SP".
target_size_t lastTouchDelta = (target_size_t)(-spDelta) % pageSize;
if ((lastTouchDelta == 0) || (lastTouchDelta + STACK_PROBE_BOUNDARY_THRESHOLD_BYTES > pageSize))
{
// We haven't probed almost a complete page. If lastTouchDelta==0, then spDelta was an exact
// multiple of pageSize, which means we last probed exactly one page back. Otherwise, we probed
// the page, but very far from the end. If the next action on the stack might subtract from SP
// first, before touching the current SP, then we do one more probe at the very bottom. This can
// happen on x86, for example, when we copy an argument to the stack using a "SUB ESP; REP MOV"
// strategy.
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, regTmp, REG_SP, 0);
lastTouchDelta = 0;
}
return lastTouchDelta;
}
//------------------------------------------------------------------------
// genCodeForTreeNode Generate code for a single node in the tree.
//
// Preconditions:
// All operands have been evaluated.
//
void CodeGen::genCodeForTreeNode(GenTree* treeNode)
{
regNumber targetReg = treeNode->GetRegNum();
var_types targetType = treeNode->TypeGet();
emitter* emit = GetEmitter();
#ifdef DEBUG
// Validate that all the operands for the current node are consumed in order.
// This is important because LSRA ensures that any necessary copies will be
// handled correctly.
lastConsumedNode = nullptr;
if (compiler->verbose)
{
unsigned seqNum = treeNode->gtSeqNum; // Useful for setting a conditional break in Visual Studio
compiler->gtDispLIRNode(treeNode, "Generating: ");
}
#endif // DEBUG
// Is this a node whose value is already in a register? LSRA denotes this by
// setting the GTF_REUSE_REG_VAL flag.
if (treeNode->IsReuseRegVal())
{
// For now, this is only used for constant nodes.
assert((treeNode->OperGet() == GT_CNS_INT) || (treeNode->OperGet() == GT_CNS_DBL));
JITDUMP(" TreeNode is marked ReuseReg\n");
return;
}
// contained nodes are part of their parents for codegen purposes
// ex : immediates, most LEAs
if (treeNode->isContained())
{
return;
}
switch (treeNode->gtOper)
{
case GT_START_NONGC:
GetEmitter()->emitDisableGC();
break;
case GT_START_PREEMPTGC:
// Kill callee saves GC registers, and create a label
// so that information gets propagated to the emitter.
gcInfo.gcMarkRegSetNpt(RBM_INT_CALLEE_SAVED);
genDefineTempLabel(genCreateTempLabel());
break;
case GT_PROF_HOOK:
// We should be seeing this only if profiler hook is needed
noway_assert(compiler->compIsProfilerHookNeeded());
#ifdef PROFILING_SUPPORTED
// Right now this node is used only for tail calls. In future if
// we intend to use it for Enter or Leave hooks, add a data member
// to this node indicating the kind of profiler hook. For example,
// helper number can be used.
genProfilingLeaveCallback(CORINFO_HELP_PROF_FCN_TAILCALL);
#endif // PROFILING_SUPPORTED
break;
case GT_LCLHEAP:
genLclHeap(treeNode);
break;
case GT_CNS_INT:
case GT_CNS_DBL:
genSetRegToConst(targetReg, targetType, treeNode);
genProduceReg(treeNode);
break;
case GT_NOT:
case GT_NEG:
genCodeForNegNot(treeNode);
break;
#if defined(TARGET_ARM64)
case GT_BSWAP:
case GT_BSWAP16:
genCodeForBswap(treeNode);
break;
#endif // defined(TARGET_ARM64)
case GT_MOD:
case GT_UMOD:
case GT_DIV:
case GT_UDIV:
genCodeForDivMod(treeNode->AsOp());
break;
case GT_OR:
case GT_XOR:
case GT_AND:
case GT_AND_NOT:
assert(varTypeIsIntegralOrI(treeNode));
FALLTHROUGH;
#if !defined(TARGET_64BIT)
case GT_ADD_LO:
case GT_ADD_HI:
case GT_SUB_LO:
case GT_SUB_HI:
#endif // !defined(TARGET_64BIT)
case GT_ADD:
case GT_SUB:
case GT_MUL:
genConsumeOperands(treeNode->AsOp());
genCodeForBinary(treeNode->AsOp());
break;
case GT_LSH:
case GT_RSH:
case GT_RSZ:
// case GT_ROL: // No ROL instruction on ARM; it has been lowered to ROR.
case GT_ROR:
genCodeForShift(treeNode);
break;
#if !defined(TARGET_64BIT)
case GT_LSH_HI:
case GT_RSH_LO:
genCodeForShiftLong(treeNode);
break;
#endif // !defined(TARGET_64BIT)
case GT_CAST:
genCodeForCast(treeNode->AsOp());
break;
case GT_BITCAST:
genCodeForBitCast(treeNode->AsOp());
break;
case GT_LCL_FLD_ADDR:
case GT_LCL_VAR_ADDR:
genCodeForLclAddr(treeNode->AsLclVarCommon());
break;
case GT_LCL_FLD:
genCodeForLclFld(treeNode->AsLclFld());
break;
case GT_LCL_VAR:
genCodeForLclVar(treeNode->AsLclVar());
break;
case GT_STORE_LCL_FLD:
genCodeForStoreLclFld(treeNode->AsLclFld());
break;
case GT_STORE_LCL_VAR:
genCodeForStoreLclVar(treeNode->AsLclVar());
break;
case GT_RETFILT:
case GT_RETURN:
genReturn(treeNode);
break;
case GT_LEA:
// If we are here, it is the case where there is an LEA that cannot be folded into a parent instruction.
genLeaInstruction(treeNode->AsAddrMode());
break;
case GT_INDEX_ADDR:
genCodeForIndexAddr(treeNode->AsIndexAddr());
break;
case GT_IND:
genCodeForIndir(treeNode->AsIndir());
break;
case GT_MUL_LONG:
genCodeForMulLong(treeNode->AsOp());
break;
#ifdef TARGET_ARM64
case GT_MADD:
genCodeForMadd(treeNode->AsOp());
break;
case GT_MSUB:
genCodeForMsub(treeNode->AsOp());
break;
case GT_INC_SATURATE:
genCodeForIncSaturate(treeNode);
break;
case GT_MULHI:
genCodeForMulHi(treeNode->AsOp());
break;
case GT_SWAP:
genCodeForSwap(treeNode->AsOp());
break;
case GT_ADDEX:
genCodeForAddEx(treeNode->AsOp());
break;
case GT_BFIZ:
genCodeForBfiz(treeNode->AsOp());
break;
#endif // TARGET_ARM64
case GT_JMP:
genJmpMethod(treeNode);
break;
case GT_CKFINITE:
genCkfinite(treeNode);
break;
case GT_INTRINSIC:
genIntrinsic(treeNode);
break;
#ifdef FEATURE_SIMD
case GT_SIMD:
genSIMDIntrinsic(treeNode->AsSIMD());
break;
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
genHWIntrinsic(treeNode->AsHWIntrinsic());
break;
#endif // FEATURE_HW_INTRINSICS
case GT_EQ:
case GT_NE:
case GT_LT:
case GT_LE:
case GT_GE:
case GT_GT:
case GT_CMP:
#ifdef TARGET_ARM64
case GT_TEST_EQ:
case GT_TEST_NE:
#endif // TARGET_ARM64
genCodeForCompare(treeNode->AsOp());
break;
case GT_JTRUE:
genCodeForJumpTrue(treeNode->AsOp());
break;
#ifdef TARGET_ARM64
case GT_JCMP:
genCodeForJumpCompare(treeNode->AsOp());
break;
#endif // TARGET_ARM64
case GT_JCC:
genCodeForJcc(treeNode->AsCC());
break;
case GT_SETCC:
genCodeForSetcc(treeNode->AsCC());
break;
case GT_RETURNTRAP:
genCodeForReturnTrap(treeNode->AsOp());
break;
case GT_STOREIND:
genCodeForStoreInd(treeNode->AsStoreInd());
break;
case GT_COPY:
// This is handled at the time we call genConsumeReg() on the GT_COPY
break;
case GT_FIELD_LIST:
// Should always be marked contained.
assert(!"LIST, FIELD_LIST nodes should always be marked contained.");
break;
case GT_PUTARG_STK:
genPutArgStk(treeNode->AsPutArgStk());
break;
case GT_PUTARG_REG:
genPutArgReg(treeNode->AsOp());
break;
case GT_PUTARG_SPLIT:
genPutArgSplit(treeNode->AsPutArgSplit());
break;
case GT_CALL:
genCall(treeNode->AsCall());
break;
case GT_MEMORYBARRIER:
{
CodeGen::BarrierKind barrierKind =
treeNode->gtFlags & GTF_MEMORYBARRIER_LOAD ? BARRIER_LOAD_ONLY : BARRIER_FULL;
instGen_MemoryBarrier(barrierKind);
break;
}
#ifdef TARGET_ARM64
case GT_XCHG:
case GT_XORR:
case GT_XAND:
case GT_XADD:
genLockedInstructions(treeNode->AsOp());
break;
case GT_CMPXCHG:
genCodeForCmpXchg(treeNode->AsCmpXchg());
break;
#endif // TARGET_ARM64
case GT_RELOAD:
// do nothing - reload is just a marker.
// The parent node will call genConsumeReg on this which will trigger the unspill of this node's child
// into the register specified in this node.
break;
case GT_NOP:
break;
case GT_KEEPALIVE:
if (treeNode->AsOp()->gtOp1->isContained())
{
// For this case we simply need to update the lifetime of the local.
genUpdateLife(treeNode->AsOp()->gtOp1);
}
else
{
genConsumeReg(treeNode->AsOp()->gtOp1);
}
break;
case GT_NO_OP:
instGen(INS_nop);
break;
case GT_BOUNDS_CHECK:
genRangeCheck(treeNode);
break;
case GT_PHYSREG:
genCodeForPhysReg(treeNode->AsPhysReg());
break;
case GT_NULLCHECK:
genCodeForNullCheck(treeNode->AsIndir());
break;
case GT_CATCH_ARG:
noway_assert(handlerGetsXcptnObj(compiler->compCurBB->bbCatchTyp));
/* Catch arguments get passed in a register. genCodeForBBlist()
would have marked it as holding a GC object, but not used. */
noway_assert(gcInfo.gcRegGCrefSetCur & RBM_EXCEPTION_OBJECT);
genConsumeReg(treeNode);
break;
case GT_PINVOKE_PROLOG:
noway_assert(((gcInfo.gcRegGCrefSetCur | gcInfo.gcRegByrefSetCur) & ~fullIntArgRegMask()) == 0);
#ifdef PSEUDORANDOM_NOP_INSERTION
// the runtime side requires the codegen here to be consistent
emit->emitDisableRandomNops();
#endif // PSEUDORANDOM_NOP_INSERTION
break;
case GT_LABEL:
genPendingCallLabel = genCreateTempLabel();
#if defined(TARGET_ARM)
genMov32RelocatableDisplacement(genPendingCallLabel, targetReg);
#else
emit->emitIns_R_L(INS_adr, EA_PTRSIZE, genPendingCallLabel, targetReg);
#endif
break;
case GT_STORE_OBJ:
case GT_STORE_DYN_BLK:
case GT_STORE_BLK:
genCodeForStoreBlk(treeNode->AsBlk());
break;
case GT_JMPTABLE:
genJumpTable(treeNode);
break;
case GT_SWITCH_TABLE:
genTableBasedSwitch(treeNode);
break;
case GT_ARR_INDEX:
genCodeForArrIndex(treeNode->AsArrIndex());
break;
case GT_ARR_OFFSET:
genCodeForArrOffset(treeNode->AsArrOffs());
break;
#ifdef TARGET_ARM
case GT_CLS_VAR_ADDR:
emit->emitIns_R_C(INS_lea, EA_PTRSIZE, targetReg, treeNode->AsClsVar()->gtClsVarHnd, 0);
genProduceReg(treeNode);
break;
case GT_LONG:
assert(treeNode->isUsedFromReg());
genConsumeRegs(treeNode);
break;
#endif // TARGET_ARM
case GT_IL_OFFSET:
// Do nothing; these nodes are simply markers for debug info.
break;
default:
{
#ifdef DEBUG
char message[256];
_snprintf_s(message, ArrLen(message), _TRUNCATE, "NYI: Unimplemented node type %s",
GenTree::OpName(treeNode->OperGet()));
NYIRAW(message);
#else
NYI("unimplemented node");
#endif
}
break;
}
}
//---------------------------------------------------------------------
// genSetGSSecurityCookie: Set the "GS" security cookie in the prolog.
//
// Arguments:
// initReg - register to use as a scratch register
// pInitRegZeroed - OUT parameter. *pInitRegZeroed is set to 'false' if and only if
// this call sets 'initReg' to a non-zero value.
//
// Return Value:
// None
//
void CodeGen::genSetGSSecurityCookie(regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
if (!compiler->getNeedsGSSecurityCookie())
{
return;
}
if (compiler->opts.IsOSR() && compiler->info.compPatchpointInfo->HasSecurityCookie())
{
// Security cookie is on original frame and was initialized there.
return;
}
if (compiler->gsGlobalSecurityCookieAddr == nullptr)
{
noway_assert(compiler->gsGlobalSecurityCookieVal != 0);
// initReg = #GlobalSecurityCookieVal; [frame.GSSecurityCookie] = initReg
instGen_Set_Reg_To_Imm(EA_PTRSIZE, initReg, compiler->gsGlobalSecurityCookieVal);
GetEmitter()->emitIns_S_R(INS_str, EA_PTRSIZE, initReg, compiler->lvaGSSecurityCookie, 0);
}
else
{
instGen_Set_Reg_To_Imm(EA_PTR_DSP_RELOC, initReg, (ssize_t)compiler->gsGlobalSecurityCookieAddr,
INS_FLAGS_DONT_CARE DEBUGARG((size_t)THT_SetGSCookie) DEBUGARG(GTF_EMPTY));
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, initReg, initReg, 0);
regSet.verifyRegUsed(initReg);
GetEmitter()->emitIns_S_R(INS_str, EA_PTRSIZE, initReg, compiler->lvaGSSecurityCookie, 0);
}
*pInitRegZeroed = false;
}
//------------------------------------------------------------------------
// genEmitGSCookieCheck: Generate code to check that the GS cookie
// wasn't thrashed by a buffer overrun.
//
void CodeGen::genEmitGSCookieCheck(bool pushReg)
{
noway_assert(compiler->gsGlobalSecurityCookieAddr || compiler->gsGlobalSecurityCookieVal);
// Make sure that the return register is reported as live GC-ref so that any GC that kicks in while
// executing GS cookie check will not collect the object pointed to by REG_INTRET (R0).
if (!pushReg && (compiler->info.compRetNativeType == TYP_REF))
gcInfo.gcRegGCrefSetCur |= RBM_INTRET;
// We need two temporary registers, to load the GS cookie values and compare them. We can't use
// any argument registers if 'pushReg' is true (meaning we have a JMP call). They should be
// callee-trash registers, which should not contain anything interesting at this point.
// We don't have any IR node representing this check, so LSRA can't communicate registers
// for us to use.
regNumber regGSConst = REG_GSCOOKIE_TMP_0;
regNumber regGSValue = REG_GSCOOKIE_TMP_1;
if (compiler->gsGlobalSecurityCookieAddr == nullptr)
{
// load the GS cookie constant into a reg
//
instGen_Set_Reg_To_Imm(EA_PTRSIZE, regGSConst, compiler->gsGlobalSecurityCookieVal);
}
else
{
// Ngen case - GS cookie constant needs to be accessed through an indirection.
instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, regGSConst, (ssize_t)compiler->gsGlobalSecurityCookieAddr,
INS_FLAGS_DONT_CARE DEBUGARG((size_t)THT_GSCookieCheck) DEBUGARG(GTF_EMPTY));
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, regGSConst, regGSConst, 0);
}
// Load this method's GS value from the stack frame
GetEmitter()->emitIns_R_S(INS_ldr, EA_PTRSIZE, regGSValue, compiler->lvaGSSecurityCookie, 0);
// Compare with the GC cookie constant
GetEmitter()->emitIns_R_R(INS_cmp, EA_PTRSIZE, regGSConst, regGSValue);
BasicBlock* gsCheckBlk = genCreateTempLabel();
inst_JMP(EJ_eq, gsCheckBlk);
// regGSConst and regGSValue aren't needed anymore, we can use them for helper call
genEmitHelperCall(CORINFO_HELP_FAIL_FAST, 0, EA_UNKNOWN, regGSConst);
genDefineTempLabel(gsCheckBlk);
}
//---------------------------------------------------------------------
// genIntrinsic - generate code for a given intrinsic
//
// Arguments
// treeNode - the GT_INTRINSIC node
//
// Return value:
// None
//
void CodeGen::genIntrinsic(GenTree* treeNode)
{
assert(treeNode->OperIs(GT_INTRINSIC));
// Both operand and its result must be of the same floating point type.
GenTree* srcNode = treeNode->AsOp()->gtOp1;
assert(varTypeIsFloating(srcNode));
assert(srcNode->TypeGet() == treeNode->TypeGet());
// Only a subset of functions are treated as math intrinsics.
//
switch (treeNode->AsIntrinsic()->gtIntrinsicName)
{
case NI_System_Math_Abs:
genConsumeOperands(treeNode->AsOp());
GetEmitter()->emitInsBinary(INS_ABS, emitActualTypeSize(treeNode), treeNode, srcNode);
break;
#ifdef TARGET_ARM64
case NI_System_Math_Ceiling:
genConsumeOperands(treeNode->AsOp());
GetEmitter()->emitInsBinary(INS_frintp, emitActualTypeSize(treeNode), treeNode, srcNode);
break;
case NI_System_Math_Floor:
genConsumeOperands(treeNode->AsOp());
GetEmitter()->emitInsBinary(INS_frintm, emitActualTypeSize(treeNode), treeNode, srcNode);
break;
case NI_System_Math_Truncate:
genConsumeOperands(treeNode->AsOp());
GetEmitter()->emitInsBinary(INS_frintz, emitActualTypeSize(treeNode), treeNode, srcNode);
break;
case NI_System_Math_Round:
genConsumeOperands(treeNode->AsOp());
GetEmitter()->emitInsBinary(INS_frintn, emitActualTypeSize(treeNode), treeNode, srcNode);
break;
case NI_System_Math_Max:
genConsumeOperands(treeNode->AsOp());
GetEmitter()->emitIns_R_R_R(INS_fmax, emitActualTypeSize(treeNode), treeNode->GetRegNum(),
treeNode->gtGetOp1()->GetRegNum(), treeNode->gtGetOp2()->GetRegNum());
break;
case NI_System_Math_Min:
genConsumeOperands(treeNode->AsOp());
GetEmitter()->emitIns_R_R_R(INS_fmin, emitActualTypeSize(treeNode), treeNode->GetRegNum(),
treeNode->gtGetOp1()->GetRegNum(), treeNode->gtGetOp2()->GetRegNum());
break;
#endif // TARGET_ARM64
case NI_System_Math_Sqrt:
genConsumeOperands(treeNode->AsOp());
GetEmitter()->emitInsBinary(INS_SQRT, emitActualTypeSize(treeNode), treeNode, srcNode);
break;
default:
assert(!"genIntrinsic: Unsupported intrinsic");
unreached();
}
genProduceReg(treeNode);
}
//---------------------------------------------------------------------
// genPutArgStk - generate code for a GT_PUTARG_STK node
//
// Arguments
// treeNode - the GT_PUTARG_STK node
//
// Return value:
// None
//
void CodeGen::genPutArgStk(GenTreePutArgStk* treeNode)
{
assert(treeNode->OperIs(GT_PUTARG_STK));
GenTree* source = treeNode->gtOp1;
var_types targetType;
if (!compMacOsArm64Abi())
{
targetType = genActualType(source->TypeGet());
}
else
{
targetType = source->TypeGet();
}
emitter* emit = GetEmitter();
// This is the varNum for our store operations,
// typically this is the varNum for the Outgoing arg space
// When we are generating a tail call it will be the varNum for arg0
unsigned varNumOut = (unsigned)-1;
unsigned argOffsetMax = (unsigned)-1; // Records the maximum size of this area for assert checks
// Get argument offset to use with 'varNumOut'
// Here we cross check that argument offset hasn't changed from lowering to codegen since
// we are storing arg slot number in GT_PUTARG_STK node in lowering phase.
unsigned argOffsetOut = treeNode->getArgOffset();
#ifdef DEBUG
fgArgTabEntry* curArgTabEntry = compiler->gtArgEntryByNode(treeNode->gtCall, treeNode);
assert(curArgTabEntry != nullptr);
DEBUG_ARG_SLOTS_ASSERT(argOffsetOut == (curArgTabEntry->slotNum * TARGET_POINTER_SIZE));
#endif // DEBUG
// Whether to setup stk arg in incoming or out-going arg area?
// Fast tail calls implemented as epilog+jmp = stk arg is setup in incoming arg area.
// All other calls - stk arg is setup in out-going arg area.
if (treeNode->putInIncomingArgArea())
{
varNumOut = getFirstArgWithStackSlot();
argOffsetMax = compiler->compArgSize;
#if FEATURE_FASTTAILCALL
// This must be a fast tail call.
assert(treeNode->gtCall->IsFastTailCall());
// Since it is a fast tail call, the existence of first incoming arg is guaranteed
// because fast tail call requires that in-coming arg area of caller is >= out-going
// arg area required for tail call.
LclVarDsc* varDsc = compiler->lvaGetDesc(varNumOut);
assert(varDsc != nullptr);
#endif // FEATURE_FASTTAILCALL
}
else
{
varNumOut = compiler->lvaOutgoingArgSpaceVar;
argOffsetMax = compiler->lvaOutgoingArgSpaceSize;
}
bool isStruct = (targetType == TYP_STRUCT) || (source->OperGet() == GT_FIELD_LIST);
if (!isStruct) // a normal non-Struct argument
{
if (varTypeIsSIMD(targetType))
{
assert(!source->isContained());
regNumber srcReg = genConsumeReg(source);
assert((srcReg != REG_NA) && (genIsValidFloatReg(srcReg)));
assert(compMacOsArm64Abi() || treeNode->GetStackByteSize() % TARGET_POINTER_SIZE == 0);
#ifdef TARGET_ARM64
if (compMacOsArm64Abi() && (treeNode->GetStackByteSize() == 12))
{
regNumber tmpReg = treeNode->GetSingleTempReg();
GetEmitter()->emitStoreSIMD12ToLclOffset(varNumOut, argOffsetOut, srcReg, tmpReg);
argOffsetOut += 12;
}
else
#endif // TARGET_ARM64
{
emitAttr storeAttr = emitTypeSize(targetType);
emit->emitIns_S_R(INS_str, storeAttr, srcReg, varNumOut, argOffsetOut);
argOffsetOut += EA_SIZE_IN_BYTES(storeAttr);
}
assert(argOffsetOut <= argOffsetMax); // We can't write beyond the outgoing arg area
return;
}
if (compMacOsArm64Abi())
{
switch (treeNode->GetStackByteSize())
{
case 1:
targetType = TYP_BYTE;
break;
case 2:
targetType = TYP_SHORT;
break;
default:
assert(treeNode->GetStackByteSize() >= 4);
break;
}
}
instruction storeIns = ins_Store(targetType);
emitAttr storeAttr = emitTypeSize(targetType);
// If it is contained then source must be the integer constant zero
if (source->isContained())
{
#ifdef TARGET_ARM64
assert(source->OperGet() == GT_CNS_INT);
assert(source->AsIntConCommon()->IconValue() == 0);
emit->emitIns_S_R(storeIns, storeAttr, REG_ZR, varNumOut, argOffsetOut);
#else // !TARGET_ARM64
// There is no zero register on ARM32
unreached();
#endif // !TARGET_ARM64
}
else
{
genConsumeReg(source);
emit->emitIns_S_R(storeIns, storeAttr, source->GetRegNum(), varNumOut, argOffsetOut);
#ifdef TARGET_ARM
if (targetType == TYP_LONG)
{
// This case currently only occurs for double types that are passed as TYP_LONG;
// actual long types would have been decomposed by now.
assert(source->IsCopyOrReload());
regNumber otherReg = (regNumber)source->AsCopyOrReload()->GetRegNumByIdx(1);
assert(otherReg != REG_NA);
argOffsetOut += EA_4BYTE;
emit->emitIns_S_R(storeIns, storeAttr, otherReg, varNumOut, argOffsetOut);
}
#endif // TARGET_ARM
}
argOffsetOut += EA_SIZE_IN_BYTES(storeAttr);
assert(argOffsetOut <= argOffsetMax); // We can't write beyond the outgoing arg area
}
else // We have some kind of a struct argument
{
assert(source->isContained()); // We expect that this node was marked as contained in Lower
if (source->OperGet() == GT_FIELD_LIST)
{
genPutArgStkFieldList(treeNode, varNumOut);
}
else // We must have a GT_OBJ or a GT_LCL_VAR
{
noway_assert(source->OperIs(GT_LCL_VAR, GT_OBJ));
var_types targetType = source->TypeGet();
noway_assert(varTypeIsStruct(targetType));
// We will copy this struct to the stack, possibly using a ldp/ldr instruction
// in ARM64/ARM
// Setup loReg (and hiReg) from the internal registers that we reserved in lower.
//
regNumber loReg = treeNode->ExtractTempReg();
#ifdef TARGET_ARM64
regNumber hiReg = treeNode->GetSingleTempReg();
#endif // TARGET_ARM64
regNumber addrReg = REG_NA;
GenTreeLclVarCommon* varNode = nullptr;
GenTree* addrNode = nullptr;
if (source->OperGet() == GT_LCL_VAR)
{
varNode = source->AsLclVarCommon();
}
else // we must have a GT_OBJ
{
assert(source->OperGet() == GT_OBJ);
addrNode = source->AsOp()->gtOp1;
// addrNode can either be a GT_LCL_VAR_ADDR or an address expression
//
if (addrNode->OperGet() == GT_LCL_VAR_ADDR)
{
// We have a GT_OBJ(GT_LCL_VAR_ADDR)
//
// We will treat this case the same as above
// (i.e if we just had this GT_LCL_VAR directly as the source)
// so update 'source' to point this GT_LCL_VAR_ADDR node
// and continue to the codegen for the LCL_VAR node below
//
assert(addrNode->isContained());
varNode = addrNode->AsLclVarCommon();
addrNode = nullptr;
}
else // addrNode is used
{
// TODO-Cleanup: `Lowering::NewPutArg` marks only `LCL_VAR_ADDR` as contained nowadays,
// but we use `genConsumeAddress` as a precaution, use `genConsumeReg()` instead.
assert(!addrNode->isContained());
// Generate code to load the address that we need into a register
genConsumeAddress(addrNode);
addrReg = addrNode->GetRegNum();
#ifdef TARGET_ARM64
// If addrReg equal to loReg, swap(loReg, hiReg)
// This reduces code complexity by only supporting one addrReg overwrite case
if (loReg == addrReg)
{
loReg = hiReg;
hiReg = addrReg;
}
#endif // TARGET_ARM64
}
}
// Either varNode or addrNOde must have been setup above,
// the xor ensures that only one of the two is setup, not both
assert((varNode != nullptr) ^ (addrNode != nullptr));
ClassLayout* layout;
unsigned srcSize;
bool isHfa;
// Setup the srcSize, isHFa, and gcPtrCount
if (source->OperGet() == GT_LCL_VAR)
{
assert(varNode != nullptr);
LclVarDsc* varDsc = compiler->lvaGetDesc(varNode);
// This struct also must live in the stack frame
// And it can't live in a register (SIMD)
assert(varDsc->lvType == TYP_STRUCT);
assert(varDsc->lvOnFrame && !varDsc->lvRegister);
srcSize = varDsc->lvSize();
isHfa = varDsc->lvIsHfa();
layout = varDsc->GetLayout();
}
else // we must have a GT_OBJ
{
assert(source->OperGet() == GT_OBJ);
// If the source is an OBJ node then we need to use the type information
// it provides (size and GC layout) even if the node wraps a lclvar. Due
// to struct reinterpretation (e.g. Unsafe.As<X, Y>) it is possible that
// the OBJ node has a different type than the lclvar.
layout = source->AsObj()->GetLayout();
srcSize = layout->GetSize();
isHfa = compiler->IsHfa(layout->GetClassHandle());
}
// If we have an HFA we can't have any GC pointers,
// if not then the max size for the the struct is 16 bytes
if (isHfa)
{
noway_assert(!layout->HasGCPtr());
}
#ifdef TARGET_ARM64
else
{
noway_assert(srcSize <= 2 * TARGET_POINTER_SIZE);
}
noway_assert(srcSize <= MAX_PASS_MULTIREG_BYTES);
#endif // TARGET_ARM64
unsigned structSize;
unsigned dstSize = treeNode->GetStackByteSize();
if (dstSize != srcSize)
{
// We can generate a smaller code if store size is a multiple of TARGET_POINTER_SIZE.
// The dst size can be rounded up to PUTARG_STK size.
// The src size can be rounded up if it reads a local variable slot because the local
// variable stack allocation size is rounded up to be a multiple of the TARGET_POINTER_SIZE.
// The exception is arm64 apple arguments because they can be passed without padding.
if (varNode != nullptr)
{
// If we have a varNode, even if it was casted using `OBJ`, we can read its original memory size.
const LclVarDsc* varDsc = compiler->lvaGetDesc(varNode);
const unsigned varStackSize = varDsc->lvSize();
if (varStackSize >= srcSize)
{
srcSize = varStackSize;
}
}
}
if (dstSize == srcSize)
{
structSize = dstSize;
}
else
{
// With Unsafe object cast we can have different strange combinations:
// PutArgStk<8>(Obj<16>(LclVar<8>)) -> copy 8 bytes;
// PutArgStk<16>(Obj<16>(LclVar<8>)) -> copy 16 bytes, reading undefined memory after the local.
structSize = min(dstSize, srcSize);
}
int remainingSize = structSize;
unsigned structOffset = 0;
unsigned nextIndex = 0;
#ifdef TARGET_ARM64
// For a >= 16-byte structSize we will generate a ldp and stp instruction each loop
// ldp x2, x3, [x0]
// stp x2, x3, [sp, #16]
while (remainingSize >= 2 * TARGET_POINTER_SIZE)
{
var_types type0 = layout->GetGCPtrType(nextIndex + 0);
var_types type1 = layout->GetGCPtrType(nextIndex + 1);
if (varNode != nullptr)
{
// Load from our varNumImp source
emit->emitIns_R_R_S_S(INS_ldp, emitTypeSize(type0), emitTypeSize(type1), loReg, hiReg,
varNode->GetLclNum(), structOffset);
}
else
{
// check for case of destroying the addrRegister while we still need it
assert(loReg != addrReg);
noway_assert((remainingSize == 2 * TARGET_POINTER_SIZE) || (hiReg != addrReg));
// Load from our address expression source
emit->emitIns_R_R_R_I(INS_ldp, emitTypeSize(type0), loReg, hiReg, addrReg, structOffset,
INS_OPTS_NONE, emitTypeSize(type0));
}
// Emit stp instruction to store the two registers into the outgoing argument area
emit->emitIns_S_S_R_R(INS_stp, emitTypeSize(type0), emitTypeSize(type1), loReg, hiReg, varNumOut,
argOffsetOut);
argOffsetOut += (2 * TARGET_POINTER_SIZE); // We stored 16-bytes of the struct
assert(argOffsetOut <= argOffsetMax); // We can't write beyond the outgoing arg area
remainingSize -= (2 * TARGET_POINTER_SIZE); // We loaded 16-bytes of the struct
structOffset += (2 * TARGET_POINTER_SIZE);
nextIndex += 2;
}
#else // TARGET_ARM
// For a >= 4 byte structSize we will generate a ldr and str instruction each loop
// ldr r2, [r0]
// str r2, [sp, #16]
while (remainingSize >= TARGET_POINTER_SIZE)
{
var_types type = layout->GetGCPtrType(nextIndex);
if (varNode != nullptr)
{
// Load from our varNumImp source
emit->emitIns_R_S(INS_ldr, emitTypeSize(type), loReg, varNode->GetLclNum(), structOffset);
}
else
{
// check for case of destroying the addrRegister while we still need it
assert(loReg != addrReg || remainingSize == TARGET_POINTER_SIZE);
// Load from our address expression source
emit->emitIns_R_R_I(INS_ldr, emitTypeSize(type), loReg, addrReg, structOffset);
}
// Emit str instruction to store the register into the outgoing argument area
emit->emitIns_S_R(INS_str, emitTypeSize(type), loReg, varNumOut, argOffsetOut);
argOffsetOut += TARGET_POINTER_SIZE; // We stored 4-bytes of the struct
assert(argOffsetOut <= argOffsetMax); // We can't write beyond the outgoing arg area
remainingSize -= TARGET_POINTER_SIZE; // We loaded 4-bytes of the struct
structOffset += TARGET_POINTER_SIZE;
nextIndex += 1;
}
#endif // TARGET_ARM
// For a 12-byte structSize we will generate two load instructions
// ldr x2, [x0]
// ldr w3, [x0, #8]
// str x2, [sp, #16]
// str w3, [sp, #24]
while (remainingSize > 0)
{
var_types type;
if (remainingSize >= TARGET_POINTER_SIZE)
{
type = layout->GetGCPtrType(nextIndex);
}
else // (remainingSize < TARGET_POINTER_SIZE)
{
// the left over size is smaller than a pointer and thus can never be a GC type
assert(!layout->IsGCPtr(nextIndex));
if (remainingSize == 1)
{
type = TYP_UBYTE;
}
else if (remainingSize == 2)
{
type = TYP_USHORT;
}
else
{
assert(remainingSize == 4);
type = TYP_UINT;
}
}
const emitAttr attr = emitTypeSize(type);
const unsigned moveSize = genTypeSize(type);
assert(EA_SIZE_IN_BYTES(attr) == moveSize);
remainingSize -= moveSize;
instruction loadIns = ins_Load(type);
if (varNode != nullptr)
{
// Load from our varNumImp source
emit->emitIns_R_S(loadIns, attr, loReg, varNode->GetLclNum(), structOffset);
}
else
{
assert(loReg != addrReg);
// Load from our address expression source
emit->emitIns_R_R_I(loadIns, attr, loReg, addrReg, structOffset);
}
// Emit a store instruction to store the register into the outgoing argument area
instruction storeIns = ins_Store(type);
emit->emitIns_S_R(storeIns, attr, loReg, varNumOut, argOffsetOut);
argOffsetOut += moveSize;
assert(argOffsetOut <= argOffsetMax); // We can't write beyond the outgoing arg area
structOffset += moveSize;
nextIndex++;
}
}
}
}
//---------------------------------------------------------------------
// genPutArgReg - generate code for a GT_PUTARG_REG node
//
// Arguments
// tree - the GT_PUTARG_REG node
//
// Return value:
// None
//
void CodeGen::genPutArgReg(GenTreeOp* tree)
{
assert(tree->OperIs(GT_PUTARG_REG));
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->GetRegNum();
assert(targetType != TYP_STRUCT);
GenTree* op1 = tree->gtOp1;
genConsumeReg(op1);
// If child node is not already in the register we need, move it
inst_Mov(targetType, targetReg, op1->GetRegNum(), /* canSkip */ true);
genProduceReg(tree);
}
//---------------------------------------------------------------------
// genPutArgSplit - generate code for a GT_PUTARG_SPLIT node
//
// Arguments
// tree - the GT_PUTARG_SPLIT node
//
// Return value:
// None
//
void CodeGen::genPutArgSplit(GenTreePutArgSplit* treeNode)
{
assert(treeNode->OperIs(GT_PUTARG_SPLIT));
GenTree* source = treeNode->gtOp1;
emitter* emit = GetEmitter();
unsigned varNumOut = compiler->lvaOutgoingArgSpaceVar;
unsigned argOffsetMax = compiler->lvaOutgoingArgSpaceSize;
if (source->OperGet() == GT_FIELD_LIST)
{
// Evaluate each of the GT_FIELD_LIST items into their register
// and store their register into the outgoing argument area
unsigned regIndex = 0;
unsigned firstOnStackOffs = UINT_MAX;
for (GenTreeFieldList::Use& use : source->AsFieldList()->Uses())
{
GenTree* nextArgNode = use.GetNode();
regNumber fieldReg = nextArgNode->GetRegNum();
genConsumeReg(nextArgNode);
if (regIndex >= treeNode->gtNumRegs)
{
if (firstOnStackOffs == UINT_MAX)
{
firstOnStackOffs = use.GetOffset();
}
var_types type = nextArgNode->TypeGet();
emitAttr attr = emitTypeSize(type);
unsigned offset = treeNode->getArgOffset() + use.GetOffset() - firstOnStackOffs;
// We can't write beyond the outgoing arg area
assert(offset + EA_SIZE_IN_BYTES(attr) <= argOffsetMax);
// Emit store instructions to store the registers produced by the GT_FIELD_LIST into the outgoing
// argument area
emit->emitIns_S_R(ins_Store(type), attr, fieldReg, varNumOut, offset);
}
else
{
var_types type = treeNode->GetRegType(regIndex);
regNumber argReg = treeNode->GetRegNumByIdx(regIndex);
#ifdef TARGET_ARM
if (type == TYP_LONG)
{
// We should only see long fields for DOUBLEs passed in 2 integer registers, via bitcast.
// All other LONGs should have been decomposed.
// Handle the first INT, and then handle the 2nd below.
assert(nextArgNode->OperIs(GT_BITCAST));
type = TYP_INT;
inst_Mov(type, argReg, fieldReg, /* canSkip */ true);
// Now set up the next register for the 2nd INT
argReg = REG_NEXT(argReg);
regIndex++;
assert(argReg == treeNode->GetRegNumByIdx(regIndex));
fieldReg = nextArgNode->AsMultiRegOp()->GetRegNumByIdx(1);
}
#endif // TARGET_ARM
// If child node is not already in the register we need, move it
inst_Mov(type, argReg, fieldReg, /* canSkip */ true);
regIndex++;
}
}
}
else
{
var_types targetType = source->TypeGet();
assert(source->OperGet() == GT_OBJ);
assert(varTypeIsStruct(targetType));
regNumber baseReg = treeNode->ExtractTempReg();
regNumber addrReg = REG_NA;
GenTreeLclVarCommon* varNode = nullptr;
GenTree* addrNode = nullptr;
addrNode = source->AsOp()->gtOp1;
// addrNode can either be a GT_LCL_VAR_ADDR or an address expression
//
if (addrNode->OperGet() == GT_LCL_VAR_ADDR)
{
// We have a GT_OBJ(GT_LCL_VAR_ADDR)
//
// We will treat this case the same as above
// (i.e if we just had this GT_LCL_VAR directly as the source)
// so update 'source' to point this GT_LCL_VAR_ADDR node
// and continue to the codegen for the LCL_VAR node below
//
varNode = addrNode->AsLclVarCommon();
addrNode = nullptr;
}
// Either varNode or addrNOde must have been setup above,
// the xor ensures that only one of the two is setup, not both
assert((varNode != nullptr) ^ (addrNode != nullptr));
// This is the varNum for our load operations,
// only used when we have a struct with a LclVar source
unsigned srcVarNum = BAD_VAR_NUM;
if (varNode != nullptr)
{
assert(varNode->isContained());
srcVarNum = varNode->GetLclNum();
// handle promote situation
LclVarDsc* varDsc = compiler->lvaGetDesc(srcVarNum);
// This struct also must live in the stack frame
// And it can't live in a register (SIMD)
assert(varDsc->lvType == TYP_STRUCT);
assert(varDsc->lvOnFrame && !varDsc->lvRegister);
// We don't split HFA struct
assert(!varDsc->lvIsHfa());
}
else // addrNode is used
{
assert(addrNode != nullptr);
// TODO-Cleanup: `Lowering::NewPutArg` marks only `LCL_VAR_ADDR` as contained nowadays,
// but we use `genConsumeAddress` as a precaution, use `genConsumeReg()` instead.
assert(!addrNode->isContained());
// Generate code to load the address that we need into a register
genConsumeAddress(addrNode);
addrReg = addrNode->GetRegNum();
// If addrReg equal to baseReg, we use the last target register as alternative baseReg.
// Because the candidate mask for the internal baseReg does not include any of the target register,
// we can ensure that baseReg, addrReg, and the last target register are not all same.
assert(baseReg != addrReg);
// We don't split HFA struct
assert(!compiler->IsHfa(source->AsObj()->GetLayout()->GetClassHandle()));
}
ClassLayout* layout = source->AsObj()->GetLayout();
// Put on stack first
unsigned nextIndex = treeNode->gtNumRegs;
unsigned structOffset = nextIndex * TARGET_POINTER_SIZE;
int remainingSize = treeNode->GetStackByteSize();
unsigned argOffsetOut = treeNode->getArgOffset();
// remainingSize is always multiple of TARGET_POINTER_SIZE
assert(remainingSize % TARGET_POINTER_SIZE == 0);
while (remainingSize > 0)
{
var_types type = layout->GetGCPtrType(nextIndex);
if (varNode != nullptr)
{
// Load from our varNumImp source
emit->emitIns_R_S(INS_ldr, emitTypeSize(type), baseReg, srcVarNum, structOffset);
}
else
{
// check for case of destroying the addrRegister while we still need it
assert(baseReg != addrReg);
// Load from our address expression source
emit->emitIns_R_R_I(INS_ldr, emitTypeSize(type), baseReg, addrReg, structOffset);
}
// Emit str instruction to store the register into the outgoing argument area
emit->emitIns_S_R(INS_str, emitTypeSize(type), baseReg, varNumOut, argOffsetOut);
argOffsetOut += TARGET_POINTER_SIZE; // We stored 4-bytes of the struct
assert(argOffsetOut <= argOffsetMax); // We can't write beyond the outgoing arg area
remainingSize -= TARGET_POINTER_SIZE; // We loaded 4-bytes of the struct
structOffset += TARGET_POINTER_SIZE;
nextIndex += 1;
}
// We set up the registers in order, so that we assign the last target register `baseReg` is no longer in use,
// in case we had to reuse the last target register for it.
structOffset = 0;
for (unsigned idx = 0; idx < treeNode->gtNumRegs; idx++)
{
regNumber targetReg = treeNode->GetRegNumByIdx(idx);
var_types type = treeNode->GetRegType(idx);
if (varNode != nullptr)
{
// Load from our varNumImp source
emit->emitIns_R_S(INS_ldr, emitTypeSize(type), targetReg, srcVarNum, structOffset);
}
else
{
// check for case of destroying the addrRegister while we still need it
if (targetReg == addrReg && idx != treeNode->gtNumRegs - 1)
{
assert(targetReg != baseReg);
var_types addrType = addrNode->TypeGet();
emit->emitIns_Mov(INS_mov, emitActualTypeSize(addrType), baseReg, addrReg, /* canSkip */ false);
addrReg = baseReg;
}
// Load from our address expression source
emit->emitIns_R_R_I(INS_ldr, emitTypeSize(type), targetReg, addrReg, structOffset);
}
structOffset += TARGET_POINTER_SIZE;
}
}
genProduceReg(treeNode);
}
#ifdef FEATURE_SIMD
//----------------------------------------------------------------------------------
// genMultiRegStoreToSIMDLocal: store multi-reg value to a single-reg SIMD local
//
// Arguments:
// lclNode - GentreeLclVar of GT_STORE_LCL_VAR
//
// Return Value:
// None
//
void CodeGen::genMultiRegStoreToSIMDLocal(GenTreeLclVar* lclNode)
{
regNumber dst = lclNode->GetRegNum();
GenTree* op1 = lclNode->gtGetOp1();
GenTree* actualOp1 = op1->gtSkipReloadOrCopy();
unsigned regCount = actualOp1->GetMultiRegCount(compiler);
assert(op1->IsMultiRegNode());
genConsumeRegs(op1);
// Treat dst register as a homogenous vector with element size equal to the src size
// Insert pieces in reverse order
for (int i = regCount - 1; i >= 0; --i)
{
var_types type = op1->gtSkipReloadOrCopy()->GetRegTypeByIndex(i);
regNumber reg = actualOp1->GetRegByIndex(i);
if (op1->IsCopyOrReload())
{
// GT_COPY/GT_RELOAD will have valid reg for those positions
// that need to be copied or reloaded.
regNumber reloadReg = op1->AsCopyOrReload()->GetRegNumByIdx(i);
if (reloadReg != REG_NA)
{
reg = reloadReg;
}
}
assert(reg != REG_NA);
if (varTypeIsFloating(type))
{
// If the register piece was passed in a floating point register
// Use a vector mov element instruction
// src is not a vector, so it is in the first element reg[0]
// mov dst[i], reg[0]
// This effectively moves from `reg[0]` to `dst[i]`, leaving other dst bits unchanged till further
// iterations
// For the case where reg == dst, if we iterate so that we write dst[0] last, we eliminate the need for
// a temporary
GetEmitter()->emitIns_R_R_I_I(INS_mov, emitTypeSize(type), dst, reg, i, 0);
}
else
{
// If the register piece was passed in an integer register
// Use a vector mov from general purpose register instruction
// mov dst[i], reg
// This effectively moves from `reg` to `dst[i]`
GetEmitter()->emitIns_R_R_I(INS_mov, emitTypeSize(type), dst, reg, i);
}
}
genProduceReg(lclNode);
}
#endif // FEATURE_SIMD
//------------------------------------------------------------------------
// genRangeCheck: generate code for GT_BOUNDS_CHECK node.
//
void CodeGen::genRangeCheck(GenTree* oper)
{
noway_assert(oper->OperIs(GT_BOUNDS_CHECK));
GenTreeBoundsChk* bndsChk = oper->AsBoundsChk();
GenTree* arrLen = bndsChk->GetArrayLength();
GenTree* arrIndex = bndsChk->GetIndex();
GenTree* arrRef = nullptr;
int lenOffset = 0;
GenTree* src1;
GenTree* src2;
emitJumpKind jmpKind;
genConsumeRegs(arrIndex);
genConsumeRegs(arrLen);
if (arrIndex->isContainedIntOrIImmed())
{
// To encode using a cmp immediate, we place the
// constant operand in the second position
src1 = arrLen;
src2 = arrIndex;
jmpKind = EJ_ls;
}
else
{
src1 = arrIndex;
src2 = arrLen;
jmpKind = EJ_hs;
}
var_types bndsChkType = genActualType(src2->TypeGet());
#if DEBUG
// Bounds checks can only be 32 or 64 bit sized comparisons.
assert(bndsChkType == TYP_INT || bndsChkType == TYP_LONG);
// The type of the bounds check should always wide enough to compare against the index.
assert(emitTypeSize(bndsChkType) >= emitActualTypeSize(src1->TypeGet()));
#endif // DEBUG
GetEmitter()->emitInsBinary(INS_cmp, emitActualTypeSize(bndsChkType), src1, src2);
genJumpToThrowHlpBlk(jmpKind, bndsChk->gtThrowKind, bndsChk->gtIndRngFailBB);
}
//---------------------------------------------------------------------
// genCodeForPhysReg - generate code for a GT_PHYSREG node
//
// Arguments
// tree - the GT_PHYSREG node
//
// Return value:
// None
//
void CodeGen::genCodeForPhysReg(GenTreePhysReg* tree)
{
assert(tree->OperIs(GT_PHYSREG));
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->GetRegNum();
inst_Mov(targetType, targetReg, tree->gtSrcReg, /* canSkip */ true);
genTransferRegGCState(targetReg, tree->gtSrcReg);
genProduceReg(tree);
}
//---------------------------------------------------------------------
// genCodeForNullCheck - generate code for a GT_NULLCHECK node
//
// Arguments
// tree - the GT_NULLCHECK node
//
// Return value:
// None
//
void CodeGen::genCodeForNullCheck(GenTreeIndir* tree)
{
#ifdef TARGET_ARM
assert(!"GT_NULLCHECK isn't supported for Arm32; use GT_IND.");
#else
assert(tree->OperIs(GT_NULLCHECK));
GenTree* op1 = tree->gtOp1;
genConsumeRegs(op1);
regNumber targetReg = REG_ZR;
GetEmitter()->emitInsLoadStoreOp(ins_Load(tree->TypeGet()), emitActualTypeSize(tree), targetReg, tree);
#endif
}
//------------------------------------------------------------------------
// genCodeForArrIndex: Generates code to bounds check the index for one dimension of an array reference,
// producing the effective index by subtracting the lower bound.
//
// Arguments:
// arrIndex - the node for which we're generating code
//
// Return Value:
// None.
//
void CodeGen::genCodeForArrIndex(GenTreeArrIndex* arrIndex)
{
emitter* emit = GetEmitter();
GenTree* arrObj = arrIndex->ArrObj();
GenTree* indexNode = arrIndex->IndexExpr();
regNumber arrReg = genConsumeReg(arrObj);
regNumber indexReg = genConsumeReg(indexNode);
regNumber tgtReg = arrIndex->GetRegNum();
noway_assert(tgtReg != REG_NA);
// We will use a temp register to load the lower bound and dimension size values.
regNumber tmpReg = arrIndex->GetSingleTempReg();
assert(tgtReg != tmpReg);
unsigned dim = arrIndex->gtCurrDim;
unsigned rank = arrIndex->gtArrRank;
var_types elemType = arrIndex->gtArrElemType;
unsigned offset;
offset = compiler->eeGetMDArrayLowerBoundOffset(rank, dim);
emit->emitIns_R_R_I(INS_ldr, EA_4BYTE, tmpReg, arrReg, offset);
emit->emitIns_R_R_R(INS_sub, EA_4BYTE, tgtReg, indexReg, tmpReg);
offset = compiler->eeGetMDArrayLengthOffset(rank, dim);
emit->emitIns_R_R_I(INS_ldr, EA_4BYTE, tmpReg, arrReg, offset);
emit->emitIns_R_R(INS_cmp, EA_4BYTE, tgtReg, tmpReg);
genJumpToThrowHlpBlk(EJ_hs, SCK_RNGCHK_FAIL);
genProduceReg(arrIndex);
}
//------------------------------------------------------------------------
// genCodeForArrOffset: Generates code to compute the flattened array offset for
// one dimension of an array reference:
// result = (prevDimOffset * dimSize) + effectiveIndex
// where dimSize is obtained from the arrObj operand
//
// Arguments:
// arrOffset - the node for which we're generating code
//
// Return Value:
// None.
//
// Notes:
// dimSize and effectiveIndex are always non-negative, the former by design,
// and the latter because it has been normalized to be zero-based.
void CodeGen::genCodeForArrOffset(GenTreeArrOffs* arrOffset)
{
GenTree* offsetNode = arrOffset->gtOffset;
GenTree* indexNode = arrOffset->gtIndex;
regNumber tgtReg = arrOffset->GetRegNum();
noway_assert(tgtReg != REG_NA);
if (!offsetNode->IsIntegralConst(0))
{
emitter* emit = GetEmitter();
regNumber offsetReg = genConsumeReg(offsetNode);
regNumber indexReg = genConsumeReg(indexNode);
regNumber arrReg = genConsumeReg(arrOffset->gtArrObj);
noway_assert(offsetReg != REG_NA);
noway_assert(indexReg != REG_NA);
noway_assert(arrReg != REG_NA);
regNumber tmpReg = arrOffset->GetSingleTempReg();
unsigned dim = arrOffset->gtCurrDim;
unsigned rank = arrOffset->gtArrRank;
var_types elemType = arrOffset->gtArrElemType;
unsigned offset = compiler->eeGetMDArrayLengthOffset(rank, dim);
// Load tmpReg with the dimension size and evaluate
// tgtReg = offsetReg*tmpReg + indexReg.
emit->emitIns_R_R_I(INS_ldr, EA_4BYTE, tmpReg, arrReg, offset);
emit->emitIns_R_R_R_R(INS_MULADD, EA_PTRSIZE, tgtReg, tmpReg, offsetReg, indexReg);
}
else
{
regNumber indexReg = genConsumeReg(indexNode);
inst_Mov(TYP_INT, tgtReg, indexReg, /* canSkip */ true);
}
genProduceReg(arrOffset);
}
//------------------------------------------------------------------------
// genCodeForShift: Generates the code sequence for a GenTree node that
// represents a bit shift or rotate operation (<<, >>, >>>, rol, ror).
//
// Arguments:
// tree - the bit shift node (that specifies the type of bit shift to perform).
//
// Assumptions:
// a) All GenTrees are register allocated.
//
void CodeGen::genCodeForShift(GenTree* tree)
{
var_types targetType = tree->TypeGet();
genTreeOps oper = tree->OperGet();
instruction ins = genGetInsForOper(oper, targetType);
emitAttr size = emitActualTypeSize(tree);
regNumber dstReg = tree->GetRegNum();
assert(dstReg != REG_NA);
genConsumeOperands(tree->AsOp());
GenTree* operand = tree->gtGetOp1();
GenTree* shiftBy = tree->gtGetOp2();
if (!shiftBy->IsCnsIntOrI())
{
GetEmitter()->emitIns_R_R_R(ins, size, dstReg, operand->GetRegNum(), shiftBy->GetRegNum());
}
else
{
unsigned immWidth = emitter::getBitWidth(size); // For ARM64, immWidth will be set to 32 or 64
unsigned shiftByImm = (unsigned)shiftBy->AsIntCon()->gtIconVal & (immWidth - 1);
GetEmitter()->emitIns_R_R_I(ins, size, dstReg, operand->GetRegNum(), shiftByImm);
}
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genCodeForLclAddr: Generates the code for GT_LCL_FLD_ADDR/GT_LCL_VAR_ADDR.
//
// Arguments:
// lclAddrNode - the node.
//
void CodeGen::genCodeForLclAddr(GenTreeLclVarCommon* lclAddrNode)
{
assert(lclAddrNode->OperIs(GT_LCL_FLD_ADDR, GT_LCL_VAR_ADDR));
var_types targetType = lclAddrNode->TypeGet();
emitAttr size = emitTypeSize(targetType);
regNumber targetReg = lclAddrNode->GetRegNum();
// Address of a local var.
noway_assert((targetType == TYP_BYREF) || (targetType == TYP_I_IMPL));
GetEmitter()->emitIns_R_S(INS_lea, size, targetReg, lclAddrNode->GetLclNum(), lclAddrNode->GetLclOffs());
genProduceReg(lclAddrNode);
}
//------------------------------------------------------------------------
// genCodeForLclFld: Produce code for a GT_LCL_FLD node.
//
// Arguments:
// tree - the GT_LCL_FLD node
//
void CodeGen::genCodeForLclFld(GenTreeLclFld* tree)
{
assert(tree->OperIs(GT_LCL_FLD));
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->GetRegNum();
emitter* emit = GetEmitter();
NYI_IF(targetType == TYP_STRUCT, "GT_LCL_FLD: struct load local field not supported");
assert(targetReg != REG_NA);
unsigned offs = tree->GetLclOffs();
unsigned varNum = tree->GetLclNum();
assert(varNum < compiler->lvaCount);
#ifdef TARGET_ARM
if (tree->IsOffsetMisaligned())
{
// Arm supports unaligned access only for integer types,
// load the floating data as 1 or 2 integer registers and convert them to float.
regNumber addr = tree->ExtractTempReg();
emit->emitIns_R_S(INS_lea, EA_PTRSIZE, addr, varNum, offs);
if (targetType == TYP_FLOAT)
{
regNumber floatAsInt = tree->GetSingleTempReg();
emit->emitIns_R_R(INS_ldr, EA_4BYTE, floatAsInt, addr);
emit->emitIns_Mov(INS_vmov_i2f, EA_4BYTE, targetReg, floatAsInt, /* canSkip */ false);
}
else
{
regNumber halfdoubleAsInt1 = tree->ExtractTempReg();
regNumber halfdoubleAsInt2 = tree->GetSingleTempReg();
emit->emitIns_R_R_I(INS_ldr, EA_4BYTE, halfdoubleAsInt1, addr, 0);
emit->emitIns_R_R_I(INS_ldr, EA_4BYTE, halfdoubleAsInt2, addr, 4);
emit->emitIns_R_R_R(INS_vmov_i2d, EA_8BYTE, targetReg, halfdoubleAsInt1, halfdoubleAsInt2);
}
}
else
#endif // TARGET_ARM
{
emitAttr attr = emitActualTypeSize(targetType);
instruction ins = ins_Load(targetType);
emit->emitIns_R_S(ins, attr, targetReg, varNum, offs);
}
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genCodeForIndexAddr: Produce code for a GT_INDEX_ADDR node.
//
// Arguments:
// tree - the GT_INDEX_ADDR node
//
void CodeGen::genCodeForIndexAddr(GenTreeIndexAddr* node)
{
GenTree* const base = node->Arr();
GenTree* const index = node->Index();
genConsumeReg(base);
genConsumeReg(index);
// NOTE: `genConsumeReg` marks the consumed register as not a GC pointer, as it assumes that the input registers
// die at the first instruction generated by the node. This is not the case for `INDEX_ADDR`, however, as the
// base register is multiply-used. As such, we need to mark the base register as containing a GC pointer until
// we are finished generating the code for this node.
gcInfo.gcMarkRegPtrVal(base->GetRegNum(), base->TypeGet());
assert(!varTypeIsGC(index->TypeGet()));
// The index is never contained, even if it is a constant.
assert(index->isUsedFromReg());
const regNumber tmpReg = node->GetSingleTempReg();
// Generate the bounds check if necessary.
if ((node->gtFlags & GTF_INX_RNGCHK) != 0)
{
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, tmpReg, base->GetRegNum(), node->gtLenOffset);
GetEmitter()->emitIns_R_R(INS_cmp, emitActualTypeSize(index->TypeGet()), index->GetRegNum(), tmpReg);
genJumpToThrowHlpBlk(EJ_hs, SCK_RNGCHK_FAIL, node->gtIndRngFailBB);
}
// Can we use a ScaledAdd instruction?
//
if (isPow2(node->gtElemSize) && (node->gtElemSize <= 32768))
{
DWORD scale;
BitScanForward(&scale, node->gtElemSize);
// dest = base + index * scale
genScaledAdd(emitActualTypeSize(node), node->GetRegNum(), base->GetRegNum(), index->GetRegNum(), scale);
}
else // we have to load the element size and use a MADD (multiply-add) instruction
{
// tmpReg = element size
instGen_Set_Reg_To_Imm(EA_4BYTE, tmpReg, (ssize_t)node->gtElemSize);
// dest = index * tmpReg + base
GetEmitter()->emitIns_R_R_R_R(INS_MULADD, emitActualTypeSize(node), node->GetRegNum(), index->GetRegNum(),
tmpReg, base->GetRegNum());
}
// dest = dest + elemOffs
GetEmitter()->emitIns_R_R_I(INS_add, emitActualTypeSize(node), node->GetRegNum(), node->GetRegNum(),
node->gtElemOffset);
gcInfo.gcMarkRegSetNpt(base->gtGetRegMask());
genProduceReg(node);
}
//------------------------------------------------------------------------
// genCodeForIndir: Produce code for a GT_IND node.
//
// Arguments:
// tree - the GT_IND node
//
void CodeGen::genCodeForIndir(GenTreeIndir* tree)
{
assert(tree->OperIs(GT_IND));
#ifdef FEATURE_SIMD
// Handling of Vector3 type values loaded through indirection.
if (tree->TypeGet() == TYP_SIMD12)
{
genLoadIndTypeSIMD12(tree);
return;
}
#endif // FEATURE_SIMD
var_types type = tree->TypeGet();
instruction ins = ins_Load(type);
regNumber targetReg = tree->GetRegNum();
genConsumeAddress(tree->Addr());
bool emitBarrier = false;
if ((tree->gtFlags & GTF_IND_VOLATILE) != 0)
{
#ifdef TARGET_ARM64
bool addrIsInReg = tree->Addr()->isUsedFromReg();
bool addrIsAligned = ((tree->gtFlags & GTF_IND_UNALIGNED) == 0);
// on arm64-v8.3+ we can use ldap* instructions with acquire/release semantics to avoid
// full memory barriers if mixed with STLR
bool hasRcpc = compiler->compOpportunisticallyDependsOn(InstructionSet_Rcpc);
if ((ins == INS_ldrb) && addrIsInReg)
{
ins = hasRcpc ? INS_ldaprb : INS_ldarb;
}
else if ((ins == INS_ldrh) && addrIsInReg && addrIsAligned)
{
ins = hasRcpc ? INS_ldaprh : INS_ldarh;
}
else if ((ins == INS_ldr) && addrIsInReg && addrIsAligned && genIsValidIntReg(targetReg))
{
ins = hasRcpc ? INS_ldapr : INS_ldar;
}
else
#endif // TARGET_ARM64
{
emitBarrier = true;
}
}
GetEmitter()->emitInsLoadStoreOp(ins, emitActualTypeSize(type), targetReg, tree);
if (emitBarrier)
{
// when INS_ldar* could not be used for a volatile load,
// we use an ordinary load followed by a load barrier.
instGen_MemoryBarrier(BARRIER_LOAD_ONLY);
}
genProduceReg(tree);
}
//----------------------------------------------------------------------------------
// genCodeForCpBlkHelper - Generate code for a CpBlk node by the means of the VM memcpy helper call
//
// Arguments:
// cpBlkNode - the GT_STORE_[BLK|OBJ|DYN_BLK]
//
// Preconditions:
// The register assignments have been set appropriately.
// This is validated by genConsumeBlockOp().
//
void CodeGen::genCodeForCpBlkHelper(GenTreeBlk* cpBlkNode)
{
// Destination address goes in arg0, source address goes in arg1, and size goes in arg2.
// genConsumeBlockOp takes care of this for us.
genConsumeBlockOp(cpBlkNode, REG_ARG_0, REG_ARG_1, REG_ARG_2);
if (cpBlkNode->gtFlags & GTF_BLK_VOLATILE)
{
// issue a full memory barrier before a volatile CpBlk operation
instGen_MemoryBarrier();
}
genEmitHelperCall(CORINFO_HELP_MEMCPY, 0, EA_UNKNOWN);
if (cpBlkNode->gtFlags & GTF_BLK_VOLATILE)
{
// issue a load barrier after a volatile CpBlk operation
instGen_MemoryBarrier(BARRIER_LOAD_ONLY);
}
}
#ifdef TARGET_ARM64
// The following classes
// - InitBlockUnrollHelper
// - CopyBlockUnrollHelper
// encapsulate algorithms that produce instruction sequences for inlined equivalents of memset() and memcpy() functions.
//
// Each class has a private template function that accepts an "InstructionStream" as a template class argument:
// - InitBlockUnrollHelper::UnrollInitBlock<InstructionStream>(startDstOffset, byteCount, initValue)
// - CopyBlockUnrollHelper::UnrollCopyBlock<InstructionStream>(startSrcOffset, startDstOffset, byteCount)
//
// The design goal is to separate optimization approaches implemented by the algorithms
// from the target platform specific details.
//
// InstructionStream is a "stream" of load/store instructions (i.e. ldr/ldp/str/stp) that represents an instruction
// sequence that will initialize a memory region with some value or copy values from one memory region to another.
//
// As far as UnrollInitBlock and UnrollCopyBlock concerned, InstructionStream implements the following class member
// functions:
// - LoadPairRegs(offset, regSizeBytes)
// - StorePairRegs(offset, regSizeBytes)
// - LoadReg(offset, regSizeBytes)
// - StoreReg(offset, regSizeBytes)
//
// There are three implementations of InstructionStream:
// - CountingStream that counts how many instructions were pushed out of the stream
// - VerifyingStream that validates that all the instructions in the stream are encodable on Arm64
// - ProducingStream that maps the function to corresponding emitter functions
//
// The idea behind the design is that decision regarding what instruction sequence to emit
// (scalar instructions vs. SIMD instructions) is made by execution an algorithm producing an instruction sequence
// while counting the number of produced instructions and verifying that all the instructions are encodable.
//
// For example, using SIMD instructions might produce a shorter sequence but require "spilling" a value of a starting
// address
// to an integer register (due to stricter offset alignment rules for 16-byte wide SIMD instructions).
// This the CodeGen can take this fact into account before emitting an instruction sequence.
//
// Alternative design might have had VerifyingStream and ProducingStream fused into one class
// that would allow to undo an instruction if the sequence is not fully encodable.
class CountingStream
{
public:
CountingStream()
{
instrCount = 0;
}
void LoadPairRegs(int offset, unsigned regSizeBytes)
{
instrCount++;
}
void StorePairRegs(int offset, unsigned regSizeBytes)
{
instrCount++;
}
void LoadReg(int offset, unsigned regSizeBytes)
{
instrCount++;
}
void StoreReg(int offset, unsigned regSizeBytes)
{
instrCount++;
}
unsigned InstructionCount() const
{
return instrCount;
}
private:
unsigned instrCount;
};
class VerifyingStream
{
public:
VerifyingStream()
{
canEncodeAllLoads = true;
canEncodeAllStores = true;
}
void LoadPairRegs(int offset, unsigned regSizeBytes)
{
canEncodeAllLoads = canEncodeAllLoads && emitter::canEncodeLoadOrStorePairOffset(offset, EA_SIZE(regSizeBytes));
}
void StorePairRegs(int offset, unsigned regSizeBytes)
{
canEncodeAllStores =
canEncodeAllStores && emitter::canEncodeLoadOrStorePairOffset(offset, EA_SIZE(regSizeBytes));
}
void LoadReg(int offset, unsigned regSizeBytes)
{
canEncodeAllLoads =
canEncodeAllLoads && emitter::emitIns_valid_imm_for_ldst_offset(offset, EA_SIZE(regSizeBytes));
}
void StoreReg(int offset, unsigned regSizeBytes)
{
canEncodeAllStores =
canEncodeAllStores && emitter::emitIns_valid_imm_for_ldst_offset(offset, EA_SIZE(regSizeBytes));
}
bool CanEncodeAllLoads() const
{
return canEncodeAllLoads;
}
bool CanEncodeAllStores() const
{
return canEncodeAllStores;
}
private:
bool canEncodeAllLoads;
bool canEncodeAllStores;
};
class ProducingStreamBaseInstrs
{
public:
ProducingStreamBaseInstrs(regNumber intReg1, regNumber intReg2, regNumber addrReg, emitter* emitter)
: intReg1(intReg1), intReg2(intReg2), addrReg(addrReg), emitter(emitter)
{
}
void LoadPairRegs(int offset, unsigned regSizeBytes)
{
assert(regSizeBytes == 8);
emitter->emitIns_R_R_R_I(INS_ldp, EA_SIZE(regSizeBytes), intReg1, intReg2, addrReg, offset);
}
void StorePairRegs(int offset, unsigned regSizeBytes)
{
assert(regSizeBytes == 8);
emitter->emitIns_R_R_R_I(INS_stp, EA_SIZE(regSizeBytes), intReg1, intReg2, addrReg, offset);
}
void LoadReg(int offset, unsigned regSizeBytes)
{
instruction ins = INS_ldr;
if (regSizeBytes == 1)
{
ins = INS_ldrb;
}
else if (regSizeBytes == 2)
{
ins = INS_ldrh;
}
emitter->emitIns_R_R_I(ins, EA_SIZE(regSizeBytes), intReg1, addrReg, offset);
}
void StoreReg(int offset, unsigned regSizeBytes)
{
instruction ins = INS_str;
if (regSizeBytes == 1)
{
ins = INS_strb;
}
else if (regSizeBytes == 2)
{
ins = INS_strh;
}
emitter->emitIns_R_R_I(ins, EA_SIZE(regSizeBytes), intReg1, addrReg, offset);
}
private:
const regNumber intReg1;
const regNumber intReg2;
const regNumber addrReg;
emitter* const emitter;
};
class ProducingStream
{
public:
ProducingStream(regNumber intReg1, regNumber simdReg1, regNumber simdReg2, regNumber addrReg, emitter* emitter)
: intReg1(intReg1), simdReg1(simdReg1), simdReg2(simdReg2), addrReg(addrReg), emitter(emitter)
{
}
void LoadPairRegs(int offset, unsigned regSizeBytes)
{
assert((regSizeBytes == 8) || (regSizeBytes == 16));
emitter->emitIns_R_R_R_I(INS_ldp, EA_SIZE(regSizeBytes), simdReg1, simdReg2, addrReg, offset);
}
void StorePairRegs(int offset, unsigned regSizeBytes)
{
assert((regSizeBytes == 8) || (regSizeBytes == 16));
emitter->emitIns_R_R_R_I(INS_stp, EA_SIZE(regSizeBytes), simdReg1, simdReg2, addrReg, offset);
}
void LoadReg(int offset, unsigned regSizeBytes)
{
instruction ins = INS_ldr;
// Note that 'intReg1' can be unavailable.
// If that is the case, then use SIMD instruction ldr and
// 'simdReg1' as a temporary register.
regNumber tempReg;
if ((regSizeBytes == 16) || (intReg1 == REG_NA))
{
tempReg = simdReg1;
}
else
{
tempReg = intReg1;
if (regSizeBytes == 1)
{
ins = INS_ldrb;
}
else if (regSizeBytes == 2)
{
ins = INS_ldrh;
}
}
emitter->emitIns_R_R_I(ins, EA_SIZE(regSizeBytes), tempReg, addrReg, offset);
}
void StoreReg(int offset, unsigned regSizeBytes)
{
instruction ins = INS_str;
// Note that 'intReg1' can be unavailable.
// If that is the case, then use SIMD instruction ldr and
// 'simdReg1' as a temporary register.
regNumber tempReg;
if ((regSizeBytes == 16) || (intReg1 == REG_NA))
{
tempReg = simdReg1;
}
else
{
tempReg = intReg1;
if (regSizeBytes == 1)
{
ins = INS_strb;
}
else if (regSizeBytes == 2)
{
ins = INS_strh;
}
}
emitter->emitIns_R_R_I(ins, EA_SIZE(regSizeBytes), tempReg, addrReg, offset);
}
private:
const regNumber intReg1;
const regNumber simdReg1;
const regNumber simdReg2;
const regNumber addrReg;
emitter* const emitter;
};
class BlockUnrollHelper
{
public:
// The following function returns a 'size' bytes that
// 1) is greater or equal to 'byteCount' and
// 2) can be read or written by a single instruction on Arm64.
// For example, Arm64 ISA has ldrb/strb and ldrh/strh that
// load/store 1 or 2 bytes, correspondingly.
// However, there are no instructions that can load/store 3 bytes and
// the next "smallest" instruction is ldr/str that operates on 4 byte granularity.
static unsigned GetRegSizeAtLeastBytes(unsigned byteCount)
{
assert(byteCount != 0);
assert(byteCount < 16);
unsigned regSizeBytes = byteCount;
if (byteCount > 8)
{
regSizeBytes = 16;
}
else if (byteCount > 4)
{
regSizeBytes = 8;
}
else if (byteCount > 2)
{
regSizeBytes = 4;
}
return regSizeBytes;
}
};
class InitBlockUnrollHelper
{
public:
InitBlockUnrollHelper(int dstOffset, unsigned byteCount) : dstStartOffset(dstOffset), byteCount(byteCount)
{
}
int GetDstOffset() const
{
return dstStartOffset;
}
void SetDstOffset(int dstOffset)
{
dstStartOffset = dstOffset;
}
bool CanEncodeAllOffsets(int regSizeBytes) const
{
VerifyingStream instrStream;
UnrollInitBlock(instrStream, regSizeBytes);
return instrStream.CanEncodeAllStores();
}
unsigned InstructionCount(int regSizeBytes) const
{
CountingStream instrStream;
UnrollInitBlock(instrStream, regSizeBytes);
return instrStream.InstructionCount();
}
void Unroll(regNumber intReg, regNumber simdReg, regNumber addrReg, emitter* emitter) const
{
ProducingStream instrStream(intReg, simdReg, simdReg, addrReg, emitter);
UnrollInitBlock(instrStream, FP_REGSIZE_BYTES);
}
void UnrollBaseInstrs(regNumber intReg, regNumber addrReg, emitter* emitter) const
{
ProducingStreamBaseInstrs instrStream(intReg, intReg, addrReg, emitter);
UnrollInitBlock(instrStream, REGSIZE_BYTES);
}
private:
template <class InstructionStream>
void UnrollInitBlock(InstructionStream& instrStream, int initialRegSizeBytes) const
{
assert((initialRegSizeBytes == 8) || (initialRegSizeBytes == 16));
int offset = dstStartOffset;
const int endOffset = offset + byteCount;
const int storePairRegsAlignment = initialRegSizeBytes;
const int storePairRegsWritesBytes = 2 * initialRegSizeBytes;
const int offsetAligned = AlignUp((UINT)offset, storePairRegsAlignment);
const int storePairRegsInstrCount = (endOffset - offsetAligned) / storePairRegsWritesBytes;
if (storePairRegsInstrCount > 0)
{
if (offset != offsetAligned)
{
const int firstRegSizeBytes = BlockUnrollHelper::GetRegSizeAtLeastBytes(offsetAligned - offset);
instrStream.StoreReg(offset, firstRegSizeBytes);
offset = offsetAligned;
}
while (endOffset - offset >= storePairRegsWritesBytes)
{
instrStream.StorePairRegs(offset, initialRegSizeBytes);
offset += storePairRegsWritesBytes;
}
if (endOffset - offset >= initialRegSizeBytes)
{
instrStream.StoreReg(offset, initialRegSizeBytes);
offset += initialRegSizeBytes;
}
if (offset != endOffset)
{
const int lastRegSizeBytes = BlockUnrollHelper::GetRegSizeAtLeastBytes(endOffset - offset);
instrStream.StoreReg(endOffset - lastRegSizeBytes, lastRegSizeBytes);
}
}
else
{
bool isSafeToWriteBehind = false;
while (endOffset - offset >= initialRegSizeBytes)
{
instrStream.StoreReg(offset, initialRegSizeBytes);
offset += initialRegSizeBytes;
isSafeToWriteBehind = true;
}
assert(endOffset - offset < initialRegSizeBytes);
while (offset != endOffset)
{
if (isSafeToWriteBehind)
{
assert(endOffset - offset < initialRegSizeBytes);
const int lastRegSizeBytes = BlockUnrollHelper::GetRegSizeAtLeastBytes(endOffset - offset);
instrStream.StoreReg(endOffset - lastRegSizeBytes, lastRegSizeBytes);
break;
}
if (offset + initialRegSizeBytes > endOffset)
{
initialRegSizeBytes = initialRegSizeBytes / 2;
}
else
{
instrStream.StoreReg(offset, initialRegSizeBytes);
offset += initialRegSizeBytes;
isSafeToWriteBehind = true;
}
}
}
}
int dstStartOffset;
const unsigned byteCount;
};
class CopyBlockUnrollHelper
{
public:
CopyBlockUnrollHelper(int srcOffset, int dstOffset, unsigned byteCount)
: srcStartOffset(srcOffset), dstStartOffset(dstOffset), byteCount(byteCount)
{
}
int GetSrcOffset() const
{
return srcStartOffset;
}
int GetDstOffset() const
{
return dstStartOffset;
}
void SetSrcOffset(int srcOffset)
{
srcStartOffset = srcOffset;
}
void SetDstOffset(int dstOffset)
{
dstStartOffset = dstOffset;
}
unsigned InstructionCount(int regSizeBytes) const
{
CountingStream instrStream;
UnrollCopyBlock(instrStream, instrStream, regSizeBytes);
return instrStream.InstructionCount();
}
bool CanEncodeAllOffsets(int regSizeBytes) const
{
bool canEncodeAllLoads = true;
bool canEncodeAllStores = true;
TryEncodeAllOffsets(regSizeBytes, &canEncodeAllLoads, &canEncodeAllStores);
return canEncodeAllLoads && canEncodeAllStores;
}
void TryEncodeAllOffsets(int regSizeBytes, bool* pCanEncodeAllLoads, bool* pCanEncodeAllStores) const
{
assert(pCanEncodeAllLoads != nullptr);
assert(pCanEncodeAllStores != nullptr);
VerifyingStream instrStream;
UnrollCopyBlock(instrStream, instrStream, regSizeBytes);
*pCanEncodeAllLoads = instrStream.CanEncodeAllLoads();
*pCanEncodeAllStores = instrStream.CanEncodeAllStores();
}
void Unroll(unsigned initialRegSizeBytes,
regNumber intReg,
regNumber simdReg1,
regNumber simdReg2,
regNumber srcAddrReg,
regNumber dstAddrReg,
emitter* emitter) const
{
ProducingStream loadStream(intReg, simdReg1, simdReg2, srcAddrReg, emitter);
ProducingStream storeStream(intReg, simdReg1, simdReg2, dstAddrReg, emitter);
UnrollCopyBlock(loadStream, storeStream, initialRegSizeBytes);
}
void UnrollBaseInstrs(
regNumber intReg1, regNumber intReg2, regNumber srcAddrReg, regNumber dstAddrReg, emitter* emitter) const
{
ProducingStreamBaseInstrs loadStream(intReg1, intReg2, srcAddrReg, emitter);
ProducingStreamBaseInstrs storeStream(intReg1, intReg2, dstAddrReg, emitter);
UnrollCopyBlock(loadStream, storeStream, REGSIZE_BYTES);
}
private:
template <class InstructionStream>
void UnrollCopyBlock(InstructionStream& loadStream, InstructionStream& storeStream, int initialRegSizeBytes) const
{
assert((initialRegSizeBytes == 8) || (initialRegSizeBytes == 16));
int srcOffset = srcStartOffset;
int dstOffset = dstStartOffset;
const int endSrcOffset = srcOffset + byteCount;
const int endDstOffset = dstOffset + byteCount;
const int storePairRegsAlignment = initialRegSizeBytes;
const int storePairRegsWritesBytes = 2 * initialRegSizeBytes;
const int dstOffsetAligned = AlignUp((UINT)dstOffset, storePairRegsAlignment);
if (byteCount >= (unsigned)storePairRegsWritesBytes)
{
const int dstBytesToAlign = dstOffsetAligned - dstOffset;
if (dstBytesToAlign != 0)
{
const int firstRegSizeBytes = BlockUnrollHelper::GetRegSizeAtLeastBytes(dstBytesToAlign);
loadStream.LoadReg(srcOffset, firstRegSizeBytes);
storeStream.StoreReg(dstOffset, firstRegSizeBytes);
srcOffset = srcOffset + dstBytesToAlign;
dstOffset = dstOffsetAligned;
}
while (endDstOffset - dstOffset >= storePairRegsWritesBytes)
{
loadStream.LoadPairRegs(srcOffset, initialRegSizeBytes);
storeStream.StorePairRegs(dstOffset, initialRegSizeBytes);
srcOffset += storePairRegsWritesBytes;
dstOffset += storePairRegsWritesBytes;
}
if (endDstOffset - dstOffset >= initialRegSizeBytes)
{
loadStream.LoadReg(srcOffset, initialRegSizeBytes);
storeStream.StoreReg(dstOffset, initialRegSizeBytes);
srcOffset += initialRegSizeBytes;
dstOffset += initialRegSizeBytes;
}
if (dstOffset != endDstOffset)
{
const int lastRegSizeBytes = BlockUnrollHelper::GetRegSizeAtLeastBytes(endDstOffset - dstOffset);
loadStream.LoadReg(endSrcOffset - lastRegSizeBytes, lastRegSizeBytes);
storeStream.StoreReg(endDstOffset - lastRegSizeBytes, lastRegSizeBytes);
}
}
else
{
bool isSafeToWriteBehind = false;
while (endDstOffset - dstOffset >= initialRegSizeBytes)
{
loadStream.LoadReg(srcOffset, initialRegSizeBytes);
storeStream.StoreReg(dstOffset, initialRegSizeBytes);
srcOffset += initialRegSizeBytes;
dstOffset += initialRegSizeBytes;
isSafeToWriteBehind = true;
}
assert(endSrcOffset - srcOffset < initialRegSizeBytes);
while (dstOffset != endDstOffset)
{
if (isSafeToWriteBehind)
{
const int lastRegSizeBytes = BlockUnrollHelper::GetRegSizeAtLeastBytes(endDstOffset - dstOffset);
loadStream.LoadReg(endSrcOffset - lastRegSizeBytes, lastRegSizeBytes);
storeStream.StoreReg(endDstOffset - lastRegSizeBytes, lastRegSizeBytes);
break;
}
if (dstOffset + initialRegSizeBytes > endDstOffset)
{
initialRegSizeBytes = initialRegSizeBytes / 2;
}
else
{
loadStream.LoadReg(srcOffset, initialRegSizeBytes);
storeStream.StoreReg(dstOffset, initialRegSizeBytes);
srcOffset += initialRegSizeBytes;
dstOffset += initialRegSizeBytes;
isSafeToWriteBehind = true;
}
}
}
}
int srcStartOffset;
int dstStartOffset;
const unsigned byteCount;
};
#endif // TARGET_ARM64
//----------------------------------------------------------------------------------
// genCodeForInitBlkUnroll: Generate unrolled block initialization code.
//
// Arguments:
// node - the GT_STORE_BLK node to generate code for
//
void CodeGen::genCodeForInitBlkUnroll(GenTreeBlk* node)
{
assert(node->OperIs(GT_STORE_BLK));
unsigned dstLclNum = BAD_VAR_NUM;
regNumber dstAddrBaseReg = REG_NA;
int dstOffset = 0;
GenTree* dstAddr = node->Addr();
if (!dstAddr->isContained())
{
dstAddrBaseReg = genConsumeReg(dstAddr);
}
else if (dstAddr->OperIsAddrMode())
{
assert(!dstAddr->AsAddrMode()->HasIndex());
dstAddrBaseReg = genConsumeReg(dstAddr->AsAddrMode()->Base());
dstOffset = dstAddr->AsAddrMode()->Offset();
}
else
{
assert(dstAddr->OperIsLocalAddr());
dstLclNum = dstAddr->AsLclVarCommon()->GetLclNum();
dstOffset = dstAddr->AsLclVarCommon()->GetLclOffs();
}
GenTree* src = node->Data();
if (src->OperIs(GT_INIT_VAL))
{
assert(src->isContained());
src = src->gtGetOp1();
}
if (node->IsVolatile())
{
instGen_MemoryBarrier();
}
emitter* emit = GetEmitter();
unsigned size = node->GetLayout()->GetSize();
assert(size <= INT32_MAX);
assert(dstOffset < INT32_MAX - static_cast<int>(size));
#ifdef TARGET_ARM64
InitBlockUnrollHelper helper(dstOffset, size);
regNumber srcReg;
if (!src->isContained())
{
srcReg = genConsumeReg(src);
}
else
{
assert(src->IsIntegralConst(0));
srcReg = REG_ZR;
}
regNumber dstReg = dstAddrBaseReg;
int dstRegAddrAlignment = 0;
bool isDstRegAddrAlignmentKnown = false;
if (dstLclNum != BAD_VAR_NUM)
{
bool fpBased;
const int baseAddr = compiler->lvaFrameAddress(dstLclNum, &fpBased);
dstReg = fpBased ? REG_FPBASE : REG_SPBASE;
dstRegAddrAlignment = fpBased ? (genSPtoFPdelta() % 16) : 0;
isDstRegAddrAlignmentKnown = true;
helper.SetDstOffset(baseAddr + dstOffset);
}
if (!helper.CanEncodeAllOffsets(REGSIZE_BYTES))
{
// If dstRegAddrAlignment is known and non-zero the following ensures that the adjusted value of dstReg is at
// 16-byte aligned boundary.
// This is done to potentially allow more cases where the JIT can use 16-byte stores.
const int dstOffsetAdjustment = helper.GetDstOffset() - dstRegAddrAlignment;
dstRegAddrAlignment = 0;
const regNumber tempReg = node->ExtractTempReg(RBM_ALLINT);
genInstrWithConstant(INS_add, EA_PTRSIZE, tempReg, dstReg, dstOffsetAdjustment, tempReg);
dstReg = tempReg;
helper.SetDstOffset(helper.GetDstOffset() - dstOffsetAdjustment);
}
bool shouldUse16ByteWideInstrs = false;
// Store operations that cross a 16-byte boundary reduce bandwidth or incur additional latency.
// The following condition prevents using 16-byte stores when dstRegAddrAlignment is:
// 1) unknown (i.e. dstReg is neither FP nor SP) or
// 2) non-zero (i.e. dstRegAddr is not 16-byte aligned).
const bool hasAvailableSimdReg = isDstRegAddrAlignmentKnown && (size > FP_REGSIZE_BYTES);
const bool canUse16ByteWideInstrs =
hasAvailableSimdReg && (dstRegAddrAlignment == 0) && helper.CanEncodeAllOffsets(FP_REGSIZE_BYTES);
if (canUse16ByteWideInstrs)
{
// The JIT would need to initialize a SIMD register with "movi simdReg.16B, #initValue".
const unsigned instrCount16ByteWide = helper.InstructionCount(FP_REGSIZE_BYTES) + 1;
shouldUse16ByteWideInstrs = instrCount16ByteWide < helper.InstructionCount(REGSIZE_BYTES);
}
if (shouldUse16ByteWideInstrs)
{
const regNumber simdReg = node->GetSingleTempReg(RBM_ALLFLOAT);
const int initValue = (src->AsIntCon()->IconValue() & 0xFF);
emit->emitIns_R_I(INS_movi, EA_16BYTE, simdReg, initValue, INS_OPTS_16B);
helper.Unroll(srcReg, simdReg, dstReg, GetEmitter());
}
else
{
helper.UnrollBaseInstrs(srcReg, dstReg, GetEmitter());
}
#endif // TARGET_ARM64
#ifdef TARGET_ARM
const regNumber srcReg = genConsumeReg(src);
for (unsigned regSize = REGSIZE_BYTES; size > 0; size -= regSize, dstOffset += regSize)
{
while (regSize > size)
{
regSize /= 2;
}
instruction storeIns;
emitAttr attr;
switch (regSize)
{
case 1:
storeIns = INS_strb;
attr = EA_4BYTE;
break;
case 2:
storeIns = INS_strh;
attr = EA_4BYTE;
break;
case 4:
storeIns = INS_str;
attr = EA_ATTR(regSize);
break;
default:
unreached();
}
if (dstLclNum != BAD_VAR_NUM)
{
emit->emitIns_S_R(storeIns, attr, srcReg, dstLclNum, dstOffset);
}
else
{
emit->emitIns_R_R_I(storeIns, attr, srcReg, dstAddrBaseReg, dstOffset);
}
}
#endif // TARGET_ARM
}
//----------------------------------------------------------------------------------
// genCodeForCpBlkUnroll: Generate unrolled block copy code.
//
// Arguments:
// node - the GT_STORE_BLK node to generate code for
//
void CodeGen::genCodeForCpBlkUnroll(GenTreeBlk* node)
{
assert(node->OperIs(GT_STORE_BLK));
unsigned dstLclNum = BAD_VAR_NUM;
regNumber dstAddrBaseReg = REG_NA;
int dstOffset = 0;
GenTree* dstAddr = node->Addr();
if (!dstAddr->isContained())
{
dstAddrBaseReg = genConsumeReg(dstAddr);
}
else if (dstAddr->OperIsAddrMode())
{
assert(!dstAddr->AsAddrMode()->HasIndex());
dstAddrBaseReg = genConsumeReg(dstAddr->AsAddrMode()->Base());
dstOffset = dstAddr->AsAddrMode()->Offset();
}
else
{
assert(dstAddr->OperIsLocalAddr());
dstLclNum = dstAddr->AsLclVarCommon()->GetLclNum();
dstOffset = dstAddr->AsLclVarCommon()->GetLclOffs();
}
unsigned srcLclNum = BAD_VAR_NUM;
regNumber srcAddrBaseReg = REG_NA;
int srcOffset = 0;
GenTree* src = node->Data();
assert(src->isContained());
if (src->OperIs(GT_LCL_VAR, GT_LCL_FLD))
{
srcLclNum = src->AsLclVarCommon()->GetLclNum();
srcOffset = src->AsLclVarCommon()->GetLclOffs();
}
else
{
assert(src->OperIs(GT_IND));
GenTree* srcAddr = src->AsIndir()->Addr();
if (!srcAddr->isContained())
{
srcAddrBaseReg = genConsumeReg(srcAddr);
}
else if (srcAddr->OperIsAddrMode())
{
srcAddrBaseReg = genConsumeReg(srcAddr->AsAddrMode()->Base());
srcOffset = srcAddr->AsAddrMode()->Offset();
}
else
{
assert(srcAddr->OperIsLocalAddr());
srcLclNum = srcAddr->AsLclVarCommon()->GetLclNum();
srcOffset = srcAddr->AsLclVarCommon()->GetLclOffs();
}
}
if (node->IsVolatile())
{
// issue a full memory barrier before a volatile CpBlk operation
instGen_MemoryBarrier();
}
emitter* emit = GetEmitter();
unsigned size = node->GetLayout()->GetSize();
assert(size <= INT32_MAX);
assert(srcOffset < INT32_MAX - static_cast<int>(size));
assert(dstOffset < INT32_MAX - static_cast<int>(size));
#ifdef TARGET_ARM64
CopyBlockUnrollHelper helper(srcOffset, dstOffset, size);
regNumber srcReg = srcAddrBaseReg;
int srcRegAddrAlignment = 0;
bool isSrcRegAddrAlignmentKnown = false;
if (srcLclNum != BAD_VAR_NUM)
{
bool fpBased;
const int baseAddr = compiler->lvaFrameAddress(srcLclNum, &fpBased);
srcReg = fpBased ? REG_FPBASE : REG_SPBASE;
srcRegAddrAlignment = fpBased ? (genSPtoFPdelta() % 16) : 0;
isSrcRegAddrAlignmentKnown = true;
helper.SetSrcOffset(baseAddr + srcOffset);
}
regNumber dstReg = dstAddrBaseReg;
int dstRegAddrAlignment = 0;
bool isDstRegAddrAlignmentKnown = false;
if (dstLclNum != BAD_VAR_NUM)
{
bool fpBased;
const int baseAddr = compiler->lvaFrameAddress(dstLclNum, &fpBased);
dstReg = fpBased ? REG_FPBASE : REG_SPBASE;
dstRegAddrAlignment = fpBased ? (genSPtoFPdelta() % 16) : 0;
isDstRegAddrAlignmentKnown = true;
helper.SetDstOffset(baseAddr + dstOffset);
}
bool canEncodeAllLoads = true;
bool canEncodeAllStores = true;
helper.TryEncodeAllOffsets(REGSIZE_BYTES, &canEncodeAllLoads, &canEncodeAllStores);
srcOffset = helper.GetSrcOffset();
dstOffset = helper.GetDstOffset();
int srcOffsetAdjustment = 0;
int dstOffsetAdjustment = 0;
if (!canEncodeAllLoads && !canEncodeAllStores)
{
srcOffsetAdjustment = srcOffset;
dstOffsetAdjustment = dstOffset;
}
else if (!canEncodeAllLoads)
{
srcOffsetAdjustment = srcOffset - dstOffset;
}
else if (!canEncodeAllStores)
{
dstOffsetAdjustment = dstOffset - srcOffset;
}
helper.SetSrcOffset(srcOffset - srcOffsetAdjustment);
helper.SetDstOffset(dstOffset - dstOffsetAdjustment);
// Quad-word load operations that are not 16-byte aligned, and store operations that cross a 16-byte boundary
// can reduce bandwidth or incur additional latency.
// Therefore, the JIT would attempt to use 16-byte variants of such instructions when both conditions are met:
// 1) the base address stored in dstReg has known alignment (modulo 16 bytes) and
// 2) the base address stored in srcReg has the same alignment as the address in dstReg.
//
// When both addresses are 16-byte aligned the CopyBlock instruction sequence looks like
//
// ldp Q_simdReg1, Q_simdReg2, [srcReg, #srcOffset]
// stp Q_simdReg1, Q_simdReg2, [dstReg, #dstOffset]
// ldp Q_simdReg1, Q_simdReg2, [srcReg, #dstOffset+32]
// stp Q_simdReg1, Q_simdReg2, [dstReg, #dstOffset+32]
// ...
//
// When both addresses are not 16-byte aligned the CopyBlock instruction sequence starts with padding
// str instruction. For example, when both addresses are 8-byte aligned the instruction sequence looks like
//
// ldr X_intReg1, [srcReg, #srcOffset]
// str X_intReg1, [dstReg, #dstOffset]
// ldp Q_simdReg1, Q_simdReg2, [srcReg, #srcOffset+8]
// stp Q_simdReg1, Q_simdReg2, [dstReg, #dstOffset+8]
// ldp Q_simdReg1, Q_simdReg2, [srcReg, #srcOffset+40]
// stp Q_simdReg1, Q_simdReg2, [dstReg, #dstOffset+40]
// ...
// LSRA allocates a pair of SIMD registers when alignments of both source and destination base addresses are
// known and the block size is larger than a single SIMD register size (i.e. when using SIMD instructions can
// be profitable).
const bool canUse16ByteWideInstrs = isSrcRegAddrAlignmentKnown && isDstRegAddrAlignmentKnown &&
(size >= 2 * FP_REGSIZE_BYTES) && (srcRegAddrAlignment == dstRegAddrAlignment);
bool shouldUse16ByteWideInstrs = false;
if (canUse16ByteWideInstrs)
{
bool canEncodeAll16ByteWideLoads = false;
bool canEncodeAll16ByteWideStores = false;
helper.TryEncodeAllOffsets(FP_REGSIZE_BYTES, &canEncodeAll16ByteWideLoads, &canEncodeAll16ByteWideStores);
if (canEncodeAll16ByteWideLoads && canEncodeAll16ByteWideStores)
{
// No further adjustments for srcOffset and dstOffset are needed.
// The JIT should use 16-byte loads and stores when the resulting sequence has fewer number of instructions.
shouldUse16ByteWideInstrs =
(helper.InstructionCount(FP_REGSIZE_BYTES) < helper.InstructionCount(REGSIZE_BYTES));
}
else if (canEncodeAllLoads && canEncodeAllStores &&
(canEncodeAll16ByteWideLoads || canEncodeAll16ByteWideStores))
{
// In order to use 16-byte instructions the JIT needs to adjust either srcOffset or dstOffset.
// The JIT should use 16-byte loads and stores when the resulting sequence (incl. an additional add
// instruction) has fewer number of instructions.
if (helper.InstructionCount(FP_REGSIZE_BYTES) + 1 < helper.InstructionCount(REGSIZE_BYTES))
{
shouldUse16ByteWideInstrs = true;
if (!canEncodeAll16ByteWideLoads)
{
srcOffsetAdjustment = srcOffset - dstOffset;
}
else
{
dstOffsetAdjustment = dstOffset - srcOffset;
}
helper.SetSrcOffset(srcOffset - srcOffsetAdjustment);
helper.SetDstOffset(dstOffset - dstOffsetAdjustment);
}
}
}
#ifdef DEBUG
if (shouldUse16ByteWideInstrs)
{
assert(helper.CanEncodeAllOffsets(FP_REGSIZE_BYTES));
}
else
{
assert(helper.CanEncodeAllOffsets(REGSIZE_BYTES));
}
#endif
if ((srcOffsetAdjustment != 0) && (dstOffsetAdjustment != 0))
{
const regNumber tempReg1 = node->ExtractTempReg(RBM_ALLINT);
genInstrWithConstant(INS_add, EA_PTRSIZE, tempReg1, srcReg, srcOffsetAdjustment, tempReg1);
srcReg = tempReg1;
const regNumber tempReg2 = node->ExtractTempReg(RBM_ALLINT);
genInstrWithConstant(INS_add, EA_PTRSIZE, tempReg2, dstReg, dstOffsetAdjustment, tempReg2);
dstReg = tempReg2;
}
else if (srcOffsetAdjustment != 0)
{
const regNumber tempReg = node->ExtractTempReg(RBM_ALLINT);
genInstrWithConstant(INS_add, EA_PTRSIZE, tempReg, srcReg, srcOffsetAdjustment, tempReg);
srcReg = tempReg;
}
else if (dstOffsetAdjustment != 0)
{
const regNumber tempReg = node->ExtractTempReg(RBM_ALLINT);
genInstrWithConstant(INS_add, EA_PTRSIZE, tempReg, dstReg, dstOffsetAdjustment, tempReg);
dstReg = tempReg;
}
regNumber intReg1 = REG_NA;
regNumber intReg2 = REG_NA;
const unsigned intRegCount = node->AvailableTempRegCount(RBM_ALLINT);
if (intRegCount >= 2)
{
intReg1 = node->ExtractTempReg(RBM_ALLINT);
intReg2 = node->ExtractTempReg(RBM_ALLINT);
}
else if (intRegCount == 1)
{
intReg1 = node->GetSingleTempReg(RBM_ALLINT);
intReg2 = rsGetRsvdReg();
}
else
{
intReg1 = rsGetRsvdReg();
}
if (shouldUse16ByteWideInstrs)
{
const regNumber simdReg1 = node->ExtractTempReg(RBM_ALLFLOAT);
const regNumber simdReg2 = node->GetSingleTempReg(RBM_ALLFLOAT);
helper.Unroll(FP_REGSIZE_BYTES, intReg1, simdReg1, simdReg2, srcReg, dstReg, GetEmitter());
}
else
{
helper.UnrollBaseInstrs(intReg1, intReg2, srcReg, dstReg, GetEmitter());
}
#endif // TARGET_ARM64
#ifdef TARGET_ARM
const regNumber tempReg = node->ExtractTempReg(RBM_ALLINT);
for (unsigned regSize = REGSIZE_BYTES; size > 0; size -= regSize, srcOffset += regSize, dstOffset += regSize)
{
while (regSize > size)
{
regSize /= 2;
}
instruction loadIns;
instruction storeIns;
emitAttr attr;
switch (regSize)
{
case 1:
loadIns = INS_ldrb;
storeIns = INS_strb;
attr = EA_4BYTE;
break;
case 2:
loadIns = INS_ldrh;
storeIns = INS_strh;
attr = EA_4BYTE;
break;
case 4:
loadIns = INS_ldr;
storeIns = INS_str;
attr = EA_ATTR(regSize);
break;
default:
unreached();
}
if (srcLclNum != BAD_VAR_NUM)
{
emit->emitIns_R_S(loadIns, attr, tempReg, srcLclNum, srcOffset);
}
else
{
emit->emitIns_R_R_I(loadIns, attr, tempReg, srcAddrBaseReg, srcOffset);
}
if (dstLclNum != BAD_VAR_NUM)
{
emit->emitIns_S_R(storeIns, attr, tempReg, dstLclNum, dstOffset);
}
else
{
emit->emitIns_R_R_I(storeIns, attr, tempReg, dstAddrBaseReg, dstOffset);
}
}
#endif // TARGET_ARM
if (node->IsVolatile())
{
// issue a load barrier after a volatile CpBlk operation
instGen_MemoryBarrier(BARRIER_LOAD_ONLY);
}
}
//------------------------------------------------------------------------
// genCodeForInitBlkHelper - Generate code for an InitBlk node by the means of the VM memcpy helper call
//
// Arguments:
// initBlkNode - the GT_STORE_[BLK|OBJ|DYN_BLK]
//
// Preconditions:
// The register assignments have been set appropriately.
// This is validated by genConsumeBlockOp().
//
void CodeGen::genCodeForInitBlkHelper(GenTreeBlk* initBlkNode)
{
// Size goes in arg2, source address goes in arg1, and size goes in arg2.
// genConsumeBlockOp takes care of this for us.
genConsumeBlockOp(initBlkNode, REG_ARG_0, REG_ARG_1, REG_ARG_2);
if (initBlkNode->gtFlags & GTF_BLK_VOLATILE)
{
// issue a full memory barrier before a volatile initBlock Operation
instGen_MemoryBarrier();
}
genEmitHelperCall(CORINFO_HELP_MEMSET, 0, EA_UNKNOWN);
}
//------------------------------------------------------------------------
// genCall: Produce code for a GT_CALL node
//
void CodeGen::genCall(GenTreeCall* call)
{
// Consume all the arg regs
for (GenTreeCall::Use& use : call->LateArgs())
{
GenTree* argNode = use.GetNode();
fgArgTabEntry* curArgTabEntry = compiler->gtArgEntryByNode(call, argNode);
assert(curArgTabEntry);
// GT_RELOAD/GT_COPY use the child node
argNode = argNode->gtSkipReloadOrCopy();
if (curArgTabEntry->GetRegNum() == REG_STK)
continue;
// Deal with multi register passed struct args.
if (argNode->OperGet() == GT_FIELD_LIST)
{
regNumber argReg = curArgTabEntry->GetRegNum();
for (GenTreeFieldList::Use& use : argNode->AsFieldList()->Uses())
{
GenTree* putArgRegNode = use.GetNode();
assert(putArgRegNode->gtOper == GT_PUTARG_REG);
genConsumeReg(putArgRegNode);
inst_Mov_Extend(putArgRegNode->TypeGet(), /* srcInReg */ true, argReg, putArgRegNode->GetRegNum(),
/* canSkip */ true, emitActualTypeSize(TYP_I_IMPL));
argReg = genRegArgNext(argReg);
#if defined(TARGET_ARM)
// A double register is modelled as an even-numbered single one
if (putArgRegNode->TypeGet() == TYP_DOUBLE)
{
argReg = genRegArgNext(argReg);
}
#endif // TARGET_ARM
}
}
else if (curArgTabEntry->IsSplit())
{
assert(compFeatureArgSplit());
assert(curArgTabEntry->numRegs >= 1);
genConsumeArgSplitStruct(argNode->AsPutArgSplit());
for (unsigned idx = 0; idx < curArgTabEntry->numRegs; idx++)
{
regNumber argReg = (regNumber)((unsigned)curArgTabEntry->GetRegNum() + idx);
regNumber allocReg = argNode->AsPutArgSplit()->GetRegNumByIdx(idx);
inst_Mov_Extend(argNode->TypeGet(), /* srcInReg */ true, argReg, allocReg, /* canSkip */ true,
emitActualTypeSize(TYP_I_IMPL));
}
}
else
{
regNumber argReg = curArgTabEntry->GetRegNum();
genConsumeReg(argNode);
inst_Mov_Extend(argNode->TypeGet(), /* srcInReg */ true, argReg, argNode->GetRegNum(), /* canSkip */ true,
emitActualTypeSize(TYP_I_IMPL));
}
}
// Insert a null check on "this" pointer if asked.
if (call->NeedsNullCheck())
{
const regNumber regThis = genGetThisArgReg(call);
#if defined(TARGET_ARM)
const regNumber tmpReg = call->ExtractTempReg();
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, tmpReg, regThis, 0);
#elif defined(TARGET_ARM64)
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_ZR, regThis, 0);
#endif // TARGET*
}
// If fast tail call, then we are done here, we just have to load the call
// target into the right registers. We ensure in RA that target is loaded
// into a volatile register that won't be restored by epilog sequence.
if (call->IsFastTailCall())
{
GenTree* target = getCallTarget(call, nullptr);
if (target != nullptr)
{
// Indirect fast tail calls materialize call target either in gtControlExpr or in gtCallAddr.
genConsumeReg(target);
}
#ifdef FEATURE_READYTORUN
else if (call->IsR2ROrVirtualStubRelativeIndir())
{
assert(((call->IsR2RRelativeIndir()) && (call->gtEntryPoint.accessType == IAT_PVALUE)) ||
((call->IsVirtualStubRelativeIndir()) && (call->gtEntryPoint.accessType == IAT_VALUE)));
assert(call->gtControlExpr == nullptr);
regNumber tmpReg = call->GetSingleTempReg();
// Register where we save call address in should not be overridden by epilog.
assert((tmpReg & (RBM_INT_CALLEE_TRASH & ~RBM_LR)) == tmpReg);
regNumber callAddrReg =
call->IsVirtualStubRelativeIndir() ? compiler->virtualStubParamInfo->GetReg() : REG_R2R_INDIRECT_PARAM;
GetEmitter()->emitIns_R_R(ins_Load(TYP_I_IMPL), emitActualTypeSize(TYP_I_IMPL), tmpReg, callAddrReg);
// We will use this again when emitting the jump in genCallInstruction in the epilog
call->gtRsvdRegs |= genRegMask(tmpReg);
}
#endif
return;
}
// For a pinvoke to unmanaged code we emit a label to clear
// the GC pointer state before the callsite.
// We can't utilize the typical lazy killing of GC pointers
// at (or inside) the callsite.
if (compiler->killGCRefs(call))
{
genDefineTempLabel(genCreateTempLabel());
}
genCallInstruction(call);
// for pinvoke/intrinsic/tailcalls we may have needed to get the address of
// a label. In case it is indirect with CFG enabled make sure we do not get
// the address after the validation but only after the actual call that
// comes after.
if (genPendingCallLabel && !call->IsHelperCall(compiler, CORINFO_HELP_VALIDATE_INDIRECT_CALL))
{
genDefineInlineTempLabel(genPendingCallLabel);
genPendingCallLabel = nullptr;
}
#ifdef DEBUG
// We should not have GC pointers in killed registers live around the call.
// GC info for arg registers were cleared when consuming arg nodes above
// and LSRA should ensure it for other trashed registers.
regMaskTP killMask = RBM_CALLEE_TRASH;
if (call->IsHelperCall())
{
CorInfoHelpFunc helpFunc = compiler->eeGetHelperNum(call->gtCallMethHnd);
killMask = compiler->compHelperCallKillSet(helpFunc);
}
assert((gcInfo.gcRegGCrefSetCur & killMask) == 0);
assert((gcInfo.gcRegByrefSetCur & killMask) == 0);
#endif
var_types returnType = call->TypeGet();
if (returnType != TYP_VOID)
{
regNumber returnReg;
if (call->HasMultiRegRetVal())
{
const ReturnTypeDesc* pRetTypeDesc = call->GetReturnTypeDesc();
assert(pRetTypeDesc != nullptr);
unsigned regCount = pRetTypeDesc->GetReturnRegCount();
// If regs allocated to call node are different from ABI return
// regs in which the call has returned its result, move the result
// to regs allocated to call node.
for (unsigned i = 0; i < regCount; ++i)
{
var_types regType = pRetTypeDesc->GetReturnRegType(i);
returnReg = pRetTypeDesc->GetABIReturnReg(i);
regNumber allocatedReg = call->GetRegNumByIdx(i);
inst_Mov(regType, allocatedReg, returnReg, /* canSkip */ true);
}
}
else
{
#ifdef TARGET_ARM
if (call->IsHelperCall(compiler, CORINFO_HELP_INIT_PINVOKE_FRAME))
{
// The CORINFO_HELP_INIT_PINVOKE_FRAME helper uses a custom calling convention that returns with
// TCB in REG_PINVOKE_TCB. fgMorphCall() sets the correct argument registers.
returnReg = REG_PINVOKE_TCB;
}
else if (compiler->opts.compUseSoftFP)
{
returnReg = REG_INTRET;
}
else
#endif // TARGET_ARM
if (varTypeUsesFloatArgReg(returnType))
{
returnReg = REG_FLOATRET;
}
else
{
returnReg = REG_INTRET;
}
if (call->GetRegNum() != returnReg)
{
#ifdef TARGET_ARM
if (compiler->opts.compUseSoftFP && returnType == TYP_DOUBLE)
{
inst_RV_RV_RV(INS_vmov_i2d, call->GetRegNum(), returnReg, genRegArgNext(returnReg), EA_8BYTE);
}
else if (compiler->opts.compUseSoftFP && returnType == TYP_FLOAT)
{
inst_Mov(returnType, call->GetRegNum(), returnReg, /* canSkip */ false);
}
else
#endif
{
inst_Mov(returnType, call->GetRegNum(), returnReg, /* canSkip */ false);
}
}
}
genProduceReg(call);
}
// If there is nothing next, that means the result is thrown away, so this value is not live.
// However, for minopts or debuggable code, we keep it live to support managed return value debugging.
if ((call->gtNext == nullptr) && !compiler->opts.MinOpts() && !compiler->opts.compDbgCode)
{
gcInfo.gcMarkRegSetNpt(RBM_INTRET);
}
}
//------------------------------------------------------------------------
// genCallInstruction - Generate instructions necessary to transfer control to the call.
//
// Arguments:
// call - the GT_CALL node
//
// Remaks:
// For tailcalls this function will generate a jump.
//
void CodeGen::genCallInstruction(GenTreeCall* call)
{
// Determine return value size(s).
const ReturnTypeDesc* pRetTypeDesc = call->GetReturnTypeDesc();
emitAttr retSize = EA_PTRSIZE;
emitAttr secondRetSize = EA_UNKNOWN;
if (call->HasMultiRegRetVal())
{
retSize = emitTypeSize(pRetTypeDesc->GetReturnRegType(0));
secondRetSize = emitTypeSize(pRetTypeDesc->GetReturnRegType(1));
}
else
{
assert(call->gtType != TYP_STRUCT);
if (call->gtType == TYP_REF)
{
retSize = EA_GCREF;
}
else if (call->gtType == TYP_BYREF)
{
retSize = EA_BYREF;
}
}
DebugInfo di;
// We need to propagate the debug information to the call instruction, so we can emit
// an IL to native mapping record for the call, to support managed return value debugging.
// We don't want tail call helper calls that were converted from normal calls to get a record,
// so we skip this hash table lookup logic in that case.
if (compiler->opts.compDbgInfo && compiler->genCallSite2DebugInfoMap != nullptr && !call->IsTailCall())
{
(void)compiler->genCallSite2DebugInfoMap->Lookup(call, &di);
}
CORINFO_SIG_INFO* sigInfo = nullptr;
#ifdef DEBUG
// Pass the call signature information down into the emitter so the emitter can associate
// native call sites with the signatures they were generated from.
if (call->gtCallType != CT_HELPER)
{
sigInfo = call->callSig;
}
if (call->IsFastTailCall())
{
regMaskTP trashedByEpilog = RBM_CALLEE_SAVED;
// The epilog may use and trash REG_GSCOOKIE_TMP_0/1. Make sure we have no
// non-standard args that may be trash if this is a tailcall.
if (compiler->getNeedsGSSecurityCookie())
{
trashedByEpilog |= genRegMask(REG_GSCOOKIE_TMP_0);
trashedByEpilog |= genRegMask(REG_GSCOOKIE_TMP_1);
}
for (unsigned i = 0; i < call->fgArgInfo->ArgCount(); i++)
{
fgArgTabEntry* entry = call->fgArgInfo->GetArgEntry(i);
for (unsigned j = 0; j < entry->numRegs; j++)
{
regNumber reg = entry->GetRegNum(j);
if ((trashedByEpilog & genRegMask(reg)) != 0)
{
JITDUMP("Tail call node:\n");
DISPTREE(call);
JITDUMP("Register used: %s\n", getRegName(reg));
assert(!"Argument to tailcall may be trashed by epilog");
}
}
}
}
#endif // DEBUG
CORINFO_METHOD_HANDLE methHnd;
GenTree* target = getCallTarget(call, &methHnd);
if (target != nullptr)
{
// A call target can not be a contained indirection
assert(!target->isContainedIndir());
// For fast tailcall we have already consumed the target. We ensure in
// RA that the target was allocated into a volatile register that will
// not be messed up by epilog sequence.
if (!call->IsFastTailCall())
{
genConsumeReg(target);
}
// We have already generated code for gtControlExpr evaluating it into a register.
// We just need to emit "call reg" in this case.
//
assert(genIsValidIntReg(target->GetRegNum()));
// clang-format off
genEmitCall(emitter::EC_INDIR_R,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
nullptr, // addr
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
di,
target->GetRegNum(),
call->IsFastTailCall());
// clang-format on
}
else
{
// If we have no target and this is a call with indirection cell then
// we do an optimization where we load the call address directly from
// the indirection cell instead of duplicating the tree. In BuildCall
// we ensure that get an extra register for the purpose. Note that for
// CFG the call might have changed to
// CORINFO_HELP_DISPATCH_INDIRECT_CALL in which case we still have the
// indirection cell but we should not try to optimize.
regNumber callThroughIndirReg = REG_NA;
if (!call->IsHelperCall(compiler, CORINFO_HELP_DISPATCH_INDIRECT_CALL))
{
callThroughIndirReg = getCallIndirectionCellReg(call);
}
if (callThroughIndirReg != REG_NA)
{
assert(call->IsR2ROrVirtualStubRelativeIndir());
regNumber targetAddrReg = call->GetSingleTempReg();
// For fast tailcalls we have already loaded the call target when processing the call node.
if (!call->IsFastTailCall())
{
GetEmitter()->emitIns_R_R(ins_Load(TYP_I_IMPL), emitActualTypeSize(TYP_I_IMPL), targetAddrReg,
callThroughIndirReg);
}
else
{
// Register where we save call address in should not be overridden by epilog.
assert((targetAddrReg & (RBM_INT_CALLEE_TRASH & ~RBM_LR)) == targetAddrReg);
}
// We have now generated code loading the target address from the indirection cell into `targetAddrReg`.
// We just need to emit "bl targetAddrReg" in this case.
//
assert(genIsValidIntReg(targetAddrReg));
// clang-format off
genEmitCall(emitter::EC_INDIR_R,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
nullptr, // addr
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
di,
targetAddrReg,
call->IsFastTailCall());
// clang-format on
}
else
{
// Generate a direct call to a non-virtual user defined or helper method
assert(call->gtCallType == CT_HELPER || call->gtCallType == CT_USER_FUNC);
void* addr = nullptr;
#ifdef FEATURE_READYTORUN
if (call->gtEntryPoint.addr != NULL)
{
assert(call->gtEntryPoint.accessType == IAT_VALUE);
addr = call->gtEntryPoint.addr;
}
else
#endif // FEATURE_READYTORUN
if (call->gtCallType == CT_HELPER)
{
CorInfoHelpFunc helperNum = compiler->eeGetHelperNum(methHnd);
noway_assert(helperNum != CORINFO_HELP_UNDEF);
void* pAddr = nullptr;
addr = compiler->compGetHelperFtn(helperNum, (void**)&pAddr);
assert(pAddr == nullptr);
}
else
{
// Direct call to a non-virtual user function.
addr = call->gtDirectCallAddress;
}
assert(addr != nullptr);
// Non-virtual direct call to known addresses
#ifdef TARGET_ARM
if (!validImmForBL((ssize_t)addr))
{
regNumber tmpReg = call->GetSingleTempReg();
instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, tmpReg, (ssize_t)addr);
// clang-format off
genEmitCall(emitter::EC_INDIR_R,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
NULL,
retSize,
di,
tmpReg,
call->IsFastTailCall());
// clang-format on
}
else
#endif // TARGET_ARM
{
// clang-format off
genEmitCall(emitter::EC_FUNC_TOKEN,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
addr,
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
di,
REG_NA,
call->IsFastTailCall());
// clang-format on
}
}
}
}
// Produce code for a GT_JMP node.
// The arguments of the caller needs to be transferred to the callee before exiting caller.
// The actual jump to callee is generated as part of caller epilog sequence.
// Therefore the codegen of GT_JMP is to ensure that the callee arguments are correctly setup.
void CodeGen::genJmpMethod(GenTree* jmp)
{
assert(jmp->OperGet() == GT_JMP);
assert(compiler->compJmpOpUsed);
// If no arguments, nothing to do
if (compiler->info.compArgsCount == 0)
{
return;
}
// Make sure register arguments are in their initial registers
// and stack arguments are put back as well.
unsigned varNum;
LclVarDsc* varDsc;
// First move any en-registered stack arguments back to the stack.
// At the same time any reg arg not in correct reg is moved back to its stack location.
//
// We are not strictly required to spill reg args that are not in the desired reg for a jmp call
// But that would require us to deal with circularity while moving values around. Spilling
// to stack makes the implementation simple, which is not a bad trade off given Jmp calls
// are not frequent.
for (varNum = 0; varNum < compiler->info.compArgsCount; varNum++)
{
varDsc = compiler->lvaGetDesc(varNum);
if (varDsc->lvPromoted)
{
noway_assert(varDsc->lvFieldCnt == 1); // We only handle one field here
unsigned fieldVarNum = varDsc->lvFieldLclStart;
varDsc = compiler->lvaGetDesc(fieldVarNum);
}
noway_assert(varDsc->lvIsParam);
if (varDsc->lvIsRegArg && (varDsc->GetRegNum() != REG_STK))
{
// Skip reg args which are already in its right register for jmp call.
// If not, we will spill such args to their stack locations.
//
// If we need to generate a tail call profiler hook, then spill all
// arg regs to free them up for the callback.
if (!compiler->compIsProfilerHookNeeded() && (varDsc->GetRegNum() == varDsc->GetArgReg()))
continue;
}
else if (varDsc->GetRegNum() == REG_STK)
{
// Skip args which are currently living in stack.
continue;
}
// If we came here it means either a reg argument not in the right register or
// a stack argument currently living in a register. In either case the following
// assert should hold.
assert(varDsc->GetRegNum() != REG_STK);
assert(varDsc->IsEnregisterableLcl());
var_types storeType = varDsc->GetStackSlotHomeType();
emitAttr storeSize = emitActualTypeSize(storeType);
#ifdef TARGET_ARM
if (varDsc->TypeGet() == TYP_LONG)
{
// long - at least the low half must be enregistered
GetEmitter()->emitIns_S_R(INS_str, EA_4BYTE, varDsc->GetRegNum(), varNum, 0);
// Is the upper half also enregistered?
if (varDsc->GetOtherReg() != REG_STK)
{
GetEmitter()->emitIns_S_R(INS_str, EA_4BYTE, varDsc->GetOtherReg(), varNum, sizeof(int));
}
}
else
#endif // TARGET_ARM
{
GetEmitter()->emitIns_S_R(ins_Store(storeType), storeSize, varDsc->GetRegNum(), varNum, 0);
}
// Update lvRegNum life and GC info to indicate lvRegNum is dead and varDsc stack slot is going live.
// Note that we cannot modify varDsc->GetRegNum() here because another basic block may not be expecting it.
// Therefore manually update life of varDsc->GetRegNum().
regMaskTP tempMask = genRegMask(varDsc->GetRegNum());
regSet.RemoveMaskVars(tempMask);
gcInfo.gcMarkRegSetNpt(tempMask);
if (compiler->lvaIsGCTracked(varDsc))
{
VarSetOps::AddElemD(compiler, gcInfo.gcVarPtrSetCur, varNum);
}
}
#ifdef PROFILING_SUPPORTED
// At this point all arg regs are free.
// Emit tail call profiler callback.
genProfilingLeaveCallback(CORINFO_HELP_PROF_FCN_TAILCALL);
#endif
// Next move any un-enregistered register arguments back to their register.
regMaskTP fixedIntArgMask = RBM_NONE; // tracks the int arg regs occupying fixed args in case of a vararg method.
unsigned firstArgVarNum = BAD_VAR_NUM; // varNum of the first argument in case of a vararg method.
for (varNum = 0; varNum < compiler->info.compArgsCount; varNum++)
{
varDsc = compiler->lvaGetDesc(varNum);
if (varDsc->lvPromoted)
{
noway_assert(varDsc->lvFieldCnt == 1); // We only handle one field here
unsigned fieldVarNum = varDsc->lvFieldLclStart;
varDsc = compiler->lvaGetDesc(fieldVarNum);
}
noway_assert(varDsc->lvIsParam);
// Skip if arg not passed in a register.
if (!varDsc->lvIsRegArg)
continue;
// Register argument
noway_assert(isRegParamType(genActualType(varDsc->TypeGet())));
// Is register argument already in the right register?
// If not load it from its stack location.
regNumber argReg = varDsc->GetArgReg(); // incoming arg register
regNumber argRegNext = REG_NA;
#ifdef TARGET_ARM64
if (varDsc->GetRegNum() != argReg)
{
var_types loadType = TYP_UNDEF;
if (varDsc->lvIsHfaRegArg())
{
// Note that for HFA, the argument is currently marked address exposed so lvRegNum will always be
// REG_STK. We home the incoming HFA argument registers in the prolog. Then we'll load them back
// here, whether they are already in the correct registers or not. This is such a corner case that
// it is not worth optimizing it.
assert(!compiler->info.compIsVarArgs);
loadType = varDsc->GetHfaType();
regNumber fieldReg = argReg;
emitAttr loadSize = emitActualTypeSize(loadType);
unsigned cSlots = varDsc->lvHfaSlots();
for (unsigned ofs = 0, cSlot = 0; cSlot < cSlots; cSlot++, ofs += (unsigned)loadSize)
{
GetEmitter()->emitIns_R_S(ins_Load(loadType), loadSize, fieldReg, varNum, ofs);
assert(genIsValidFloatReg(fieldReg)); // No GC register tracking for floating point registers.
fieldReg = regNextOfType(fieldReg, loadType);
}
}
else
{
if (varTypeIsStruct(varDsc))
{
// Must be <= 16 bytes or else it wouldn't be passed in registers, except for HFA,
// which can be bigger (and is handled above).
noway_assert(EA_SIZE_IN_BYTES(varDsc->lvSize()) <= 16);
loadType = varDsc->GetLayout()->GetGCPtrType(0);
}
else
{
loadType = compiler->mangleVarArgsType(genActualType(varDsc->TypeGet()));
}
emitAttr loadSize = emitActualTypeSize(loadType);
GetEmitter()->emitIns_R_S(ins_Load(loadType), loadSize, argReg, varNum, 0);
// Update argReg life and GC Info to indicate varDsc stack slot is dead and argReg is going live.
// Note that we cannot modify varDsc->GetRegNum() here because another basic block may not be
// expecting it. Therefore manually update life of argReg. Note that GT_JMP marks the end of
// the basic block and after which reg life and gc info will be recomputed for the new block
// in genCodeForBBList().
regSet.AddMaskVars(genRegMask(argReg));
gcInfo.gcMarkRegPtrVal(argReg, loadType);
if (compiler->lvaIsMultiregStruct(varDsc, compiler->info.compIsVarArgs))
{
// Restore the second register.
argRegNext = genRegArgNext(argReg);
loadType = varDsc->GetLayout()->GetGCPtrType(1);
loadSize = emitActualTypeSize(loadType);
GetEmitter()->emitIns_R_S(ins_Load(loadType), loadSize, argRegNext, varNum, TARGET_POINTER_SIZE);
regSet.AddMaskVars(genRegMask(argRegNext));
gcInfo.gcMarkRegPtrVal(argRegNext, loadType);
}
if (compiler->lvaIsGCTracked(varDsc))
{
VarSetOps::RemoveElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
}
}
}
if (compiler->info.compIsVarArgs)
{
// In case of a jmp call to a vararg method ensure only integer registers are passed.
assert((genRegMask(argReg) & (RBM_ARG_REGS | RBM_ARG_RET_BUFF)) != RBM_NONE);
assert(!varDsc->lvIsHfaRegArg());
fixedIntArgMask |= genRegMask(argReg);
if (compiler->lvaIsMultiregStruct(varDsc, compiler->info.compIsVarArgs))
{
assert(argRegNext != REG_NA);
fixedIntArgMask |= genRegMask(argRegNext);
}
if (argReg == REG_ARG_0)
{
assert(firstArgVarNum == BAD_VAR_NUM);
firstArgVarNum = varNum;
}
}
#else // !TARGET_ARM64
bool twoParts = false;
var_types loadType = TYP_UNDEF;
if (varDsc->TypeGet() == TYP_LONG)
{
twoParts = true;
}
else if (varDsc->TypeGet() == TYP_DOUBLE)
{
if (compiler->info.compIsVarArgs || compiler->opts.compUseSoftFP)
{
twoParts = true;
}
}
if (twoParts)
{
argRegNext = genRegArgNext(argReg);
if (varDsc->GetRegNum() != argReg)
{
GetEmitter()->emitIns_R_S(INS_ldr, EA_PTRSIZE, argReg, varNum, 0);
GetEmitter()->emitIns_R_S(INS_ldr, EA_PTRSIZE, argRegNext, varNum, REGSIZE_BYTES);
}
if (compiler->info.compIsVarArgs)
{
fixedIntArgMask |= genRegMask(argReg);
fixedIntArgMask |= genRegMask(argRegNext);
}
}
else if (varDsc->lvIsHfaRegArg())
{
loadType = varDsc->GetHfaType();
regNumber fieldReg = argReg;
emitAttr loadSize = emitActualTypeSize(loadType);
unsigned maxSize = min(varDsc->lvSize(), (LAST_FP_ARGREG + 1 - argReg) * REGSIZE_BYTES);
for (unsigned ofs = 0; ofs < maxSize; ofs += (unsigned)loadSize)
{
if (varDsc->GetRegNum() != argReg)
{
GetEmitter()->emitIns_R_S(ins_Load(loadType), loadSize, fieldReg, varNum, ofs);
}
assert(genIsValidFloatReg(fieldReg)); // we don't use register tracking for FP
fieldReg = regNextOfType(fieldReg, loadType);
}
}
else if (varTypeIsStruct(varDsc))
{
regNumber slotReg = argReg;
unsigned maxSize = min(varDsc->lvSize(), (REG_ARG_LAST + 1 - argReg) * REGSIZE_BYTES);
for (unsigned ofs = 0; ofs < maxSize; ofs += REGSIZE_BYTES)
{
unsigned idx = ofs / REGSIZE_BYTES;
loadType = varDsc->GetLayout()->GetGCPtrType(idx);
if (varDsc->GetRegNum() != argReg)
{
emitAttr loadSize = emitActualTypeSize(loadType);
GetEmitter()->emitIns_R_S(ins_Load(loadType), loadSize, slotReg, varNum, ofs);
}
regSet.AddMaskVars(genRegMask(slotReg));
gcInfo.gcMarkRegPtrVal(slotReg, loadType);
if (genIsValidIntReg(slotReg) && compiler->info.compIsVarArgs)
{
fixedIntArgMask |= genRegMask(slotReg);
}
slotReg = genRegArgNext(slotReg);
}
}
else
{
loadType = compiler->mangleVarArgsType(genActualType(varDsc->TypeGet()));
if (varDsc->GetRegNum() != argReg)
{
GetEmitter()->emitIns_R_S(ins_Load(loadType), emitTypeSize(loadType), argReg, varNum, 0);
}
regSet.AddMaskVars(genRegMask(argReg));
gcInfo.gcMarkRegPtrVal(argReg, loadType);
if (genIsValidIntReg(argReg) && compiler->info.compIsVarArgs)
{
fixedIntArgMask |= genRegMask(argReg);
}
}
if (compiler->lvaIsGCTracked(varDsc))
{
VarSetOps::RemoveElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
}
#endif // !TARGET_ARM64
}
// Jmp call to a vararg method - if the method has fewer than fixed arguments that can be max size of reg,
// load the remaining integer arg registers from the corresponding
// shadow stack slots. This is for the reason that we don't know the number and type
// of non-fixed params passed by the caller, therefore we have to assume the worst case
// of caller passing all integer arg regs that can be max size of reg.
//
// The caller could have passed gc-ref/byref type var args. Since these are var args
// the callee no way of knowing their gc-ness. Therefore, mark the region that loads
// remaining arg registers from shadow stack slots as non-gc interruptible.
if (fixedIntArgMask != RBM_NONE)
{
assert(compiler->info.compIsVarArgs);
assert(firstArgVarNum != BAD_VAR_NUM);
regMaskTP remainingIntArgMask = RBM_ARG_REGS & ~fixedIntArgMask;
if (remainingIntArgMask != RBM_NONE)
{
GetEmitter()->emitDisableGC();
for (int argNum = 0, argOffset = 0; argNum < MAX_REG_ARG; ++argNum)
{
regNumber argReg = intArgRegs[argNum];
regMaskTP argRegMask = genRegMask(argReg);
if ((remainingIntArgMask & argRegMask) != 0)
{
remainingIntArgMask &= ~argRegMask;
GetEmitter()->emitIns_R_S(INS_ldr, EA_PTRSIZE, argReg, firstArgVarNum, argOffset);
}
argOffset += REGSIZE_BYTES;
}
GetEmitter()->emitEnableGC();
}
}
}
//------------------------------------------------------------------------
// genIntCastOverflowCheck: Generate overflow checking code for an integer cast.
//
// Arguments:
// cast - The GT_CAST node
// desc - The cast description
// reg - The register containing the value to check
//
void CodeGen::genIntCastOverflowCheck(GenTreeCast* cast, const GenIntCastDesc& desc, regNumber reg)
{
switch (desc.CheckKind())
{
case GenIntCastDesc::CHECK_POSITIVE:
GetEmitter()->emitIns_R_I(INS_cmp, EA_ATTR(desc.CheckSrcSize()), reg, 0);
genJumpToThrowHlpBlk(EJ_lt, SCK_OVERFLOW);
break;
#ifdef TARGET_64BIT
case GenIntCastDesc::CHECK_UINT_RANGE:
// We need to check if the value is not greater than 0xFFFFFFFF but this value
// cannot be encoded in the immediate operand of CMP. Use TST instead to check
// if the upper 32 bits are zero.
GetEmitter()->emitIns_R_I(INS_tst, EA_8BYTE, reg, 0xFFFFFFFF00000000LL);
genJumpToThrowHlpBlk(EJ_ne, SCK_OVERFLOW);
break;
case GenIntCastDesc::CHECK_POSITIVE_INT_RANGE:
// We need to check if the value is not greater than 0x7FFFFFFF but this value
// cannot be encoded in the immediate operand of CMP. Use TST instead to check
// if the upper 33 bits are zero.
GetEmitter()->emitIns_R_I(INS_tst, EA_8BYTE, reg, 0xFFFFFFFF80000000LL);
genJumpToThrowHlpBlk(EJ_ne, SCK_OVERFLOW);
break;
case GenIntCastDesc::CHECK_INT_RANGE:
{
const regNumber tempReg = cast->GetSingleTempReg();
assert(tempReg != reg);
instGen_Set_Reg_To_Imm(EA_8BYTE, tempReg, INT32_MAX);
GetEmitter()->emitIns_R_R(INS_cmp, EA_8BYTE, reg, tempReg);
genJumpToThrowHlpBlk(EJ_gt, SCK_OVERFLOW);
instGen_Set_Reg_To_Imm(EA_8BYTE, tempReg, INT32_MIN);
GetEmitter()->emitIns_R_R(INS_cmp, EA_8BYTE, reg, tempReg);
genJumpToThrowHlpBlk(EJ_lt, SCK_OVERFLOW);
}
break;
#endif
default:
{
assert(desc.CheckKind() == GenIntCastDesc::CHECK_SMALL_INT_RANGE);
const int castMaxValue = desc.CheckSmallIntMax();
const int castMinValue = desc.CheckSmallIntMin();
// Values greater than 255 cannot be encoded in the immediate operand of CMP.
// Replace (x > max) with (x >= max + 1) where max + 1 (a power of 2) can be
// encoded. We could do this for all max values but on ARM32 "cmp r0, 255"
// is better than "cmp r0, 256" because it has a shorter encoding.
if (castMaxValue > 255)
{
assert((castMaxValue == 32767) || (castMaxValue == 65535));
GetEmitter()->emitIns_R_I(INS_cmp, EA_SIZE(desc.CheckSrcSize()), reg, castMaxValue + 1);
genJumpToThrowHlpBlk((castMinValue == 0) ? EJ_hs : EJ_ge, SCK_OVERFLOW);
}
else
{
GetEmitter()->emitIns_R_I(INS_cmp, EA_SIZE(desc.CheckSrcSize()), reg, castMaxValue);
genJumpToThrowHlpBlk((castMinValue == 0) ? EJ_hi : EJ_gt, SCK_OVERFLOW);
}
if (castMinValue != 0)
{
GetEmitter()->emitIns_R_I(INS_cmp, EA_SIZE(desc.CheckSrcSize()), reg, castMinValue);
genJumpToThrowHlpBlk(EJ_lt, SCK_OVERFLOW);
}
}
break;
}
}
//------------------------------------------------------------------------
// genIntToIntCast: Generate code for an integer cast, with or without overflow check.
//
// Arguments:
// cast - The GT_CAST node
//
// Assumptions:
// The cast node is not a contained node and must have an assigned register.
// Neither the source nor target type can be a floating point type.
//
// TODO-ARM64-CQ: Allow castOp to be a contained node without an assigned register.
//
void CodeGen::genIntToIntCast(GenTreeCast* cast)
{
genConsumeRegs(cast->gtGetOp1());
const regNumber srcReg = cast->gtGetOp1()->GetRegNum();
const regNumber dstReg = cast->GetRegNum();
assert(genIsValidIntReg(srcReg));
assert(genIsValidIntReg(dstReg));
GenIntCastDesc desc(cast);
if (desc.CheckKind() != GenIntCastDesc::CHECK_NONE)
{
genIntCastOverflowCheck(cast, desc, srcReg);
}
if ((desc.ExtendKind() != GenIntCastDesc::COPY) || (srcReg != dstReg))
{
instruction ins;
unsigned insSize;
switch (desc.ExtendKind())
{
case GenIntCastDesc::ZERO_EXTEND_SMALL_INT:
ins = (desc.ExtendSrcSize() == 1) ? INS_uxtb : INS_uxth;
insSize = 4;
break;
case GenIntCastDesc::SIGN_EXTEND_SMALL_INT:
ins = (desc.ExtendSrcSize() == 1) ? INS_sxtb : INS_sxth;
insSize = 4;
break;
#ifdef TARGET_64BIT
case GenIntCastDesc::ZERO_EXTEND_INT:
ins = INS_mov;
insSize = 4;
break;
case GenIntCastDesc::SIGN_EXTEND_INT:
ins = INS_sxtw;
insSize = 8;
break;
#endif
default:
assert(desc.ExtendKind() == GenIntCastDesc::COPY);
ins = INS_mov;
insSize = desc.ExtendSrcSize();
break;
}
GetEmitter()->emitIns_Mov(ins, EA_ATTR(insSize), dstReg, srcReg, /* canSkip */ false);
}
genProduceReg(cast);
}
//------------------------------------------------------------------------
// genFloatToFloatCast: Generate code for a cast between float and double
//
// Arguments:
// treeNode - The GT_CAST node
//
// Return Value:
// None.
//
// Assumptions:
// Cast is a non-overflow conversion.
// The treeNode must have an assigned register.
// The cast is between float and double.
//
void CodeGen::genFloatToFloatCast(GenTree* treeNode)
{
// float <--> double conversions are always non-overflow ones
assert(treeNode->OperGet() == GT_CAST);
assert(!treeNode->gtOverflow());
regNumber targetReg = treeNode->GetRegNum();
assert(genIsValidFloatReg(targetReg));
GenTree* op1 = treeNode->AsOp()->gtOp1;
assert(!op1->isContained()); // Cannot be contained
assert(genIsValidFloatReg(op1->GetRegNum())); // Must be a valid float reg.
var_types dstType = treeNode->CastToType();
var_types srcType = op1->TypeGet();
assert(varTypeIsFloating(srcType) && varTypeIsFloating(dstType));
genConsumeOperands(treeNode->AsOp());
// treeNode must be a reg
assert(!treeNode->isContained());
#if defined(TARGET_ARM)
if (srcType != dstType)
{
instruction insVcvt = (srcType == TYP_FLOAT) ? INS_vcvt_f2d // convert Float to Double
: INS_vcvt_d2f; // convert Double to Float
GetEmitter()->emitIns_R_R(insVcvt, emitTypeSize(treeNode), treeNode->GetRegNum(), op1->GetRegNum());
}
else
{
GetEmitter()->emitIns_Mov(INS_vmov, emitTypeSize(treeNode), treeNode->GetRegNum(), op1->GetRegNum(),
/* canSkip */ true);
}
#elif defined(TARGET_ARM64)
if (srcType != dstType)
{
insOpts cvtOption = (srcType == TYP_FLOAT) ? INS_OPTS_S_TO_D // convert Single to Double
: INS_OPTS_D_TO_S; // convert Double to Single
GetEmitter()->emitIns_R_R(INS_fcvt, emitActualTypeSize(treeNode), treeNode->GetRegNum(), op1->GetRegNum(),
cvtOption);
}
else
{
// If double to double cast or float to float cast. Emit a move instruction.
GetEmitter()->emitIns_Mov(INS_mov, emitActualTypeSize(treeNode), treeNode->GetRegNum(), op1->GetRegNum(),
/* canSkip */ true);
}
#endif // TARGET*
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genCreateAndStoreGCInfo: Create and record GC Info for the function.
//
void CodeGen::genCreateAndStoreGCInfo(unsigned codeSize,
unsigned prologSize,
unsigned epilogSize DEBUGARG(void* codePtr))
{
IAllocator* allowZeroAlloc = new (compiler, CMK_GC) CompIAllocator(compiler->getAllocatorGC());
GcInfoEncoder* gcInfoEncoder = new (compiler, CMK_GC)
GcInfoEncoder(compiler->info.compCompHnd, compiler->info.compMethodInfo, allowZeroAlloc, NOMEM);
assert(gcInfoEncoder != nullptr);
// Follow the code pattern of the x86 gc info encoder (genCreateAndStoreGCInfoJIT32).
gcInfo.gcInfoBlockHdrSave(gcInfoEncoder, codeSize, prologSize);
// We keep the call count for the second call to gcMakeRegPtrTable() below.
unsigned callCnt = 0;
// First we figure out the encoder ID's for the stack slots and registers.
gcInfo.gcMakeRegPtrTable(gcInfoEncoder, codeSize, prologSize, GCInfo::MAKE_REG_PTR_MODE_ASSIGN_SLOTS, &callCnt);
// Now we've requested all the slots we'll need; "finalize" these (make more compact data structures for them).
gcInfoEncoder->FinalizeSlotIds();
// Now we can actually use those slot ID's to declare live ranges.
gcInfo.gcMakeRegPtrTable(gcInfoEncoder, codeSize, prologSize, GCInfo::MAKE_REG_PTR_MODE_DO_WORK, &callCnt);
#ifdef TARGET_ARM64
if (compiler->opts.compDbgEnC)
{
// what we have to preserve is called the "frame header" (see comments in VM\eetwain.cpp)
// which is:
// -return address
// -saved off RBP
// -saved 'this' pointer and bool for synchronized methods
// 4 slots for RBP + return address + RSI + RDI
int preservedAreaSize = 4 * REGSIZE_BYTES;
if (compiler->info.compFlags & CORINFO_FLG_SYNCH)
{
if (!(compiler->info.compFlags & CORINFO_FLG_STATIC))
preservedAreaSize += REGSIZE_BYTES;
preservedAreaSize += 1; // bool for synchronized methods
}
// Used to signal both that the method is compiled for EnC, and also the size of the block at the top of the
// frame
gcInfoEncoder->SetSizeOfEditAndContinuePreservedArea(preservedAreaSize);
}
#endif // TARGET_ARM64
if (compiler->opts.IsReversePInvoke())
{
unsigned reversePInvokeFrameVarNumber = compiler->lvaReversePInvokeFrameVar;
assert(reversePInvokeFrameVarNumber != BAD_VAR_NUM);
const LclVarDsc* reversePInvokeFrameVar = compiler->lvaGetDesc(reversePInvokeFrameVarNumber);
gcInfoEncoder->SetReversePInvokeFrameSlot(reversePInvokeFrameVar->GetStackOffset());
}
gcInfoEncoder->Build();
// GC Encoder automatically puts the GC info in the right spot using ICorJitInfo::allocGCInfo(size_t)
// let's save the values anyway for debugging purposes
compiler->compInfoBlkAddr = gcInfoEncoder->Emit();
compiler->compInfoBlkSize = 0; // not exposed by the GCEncoder interface
}
// clang-format off
const CodeGen::GenConditionDesc CodeGen::GenConditionDesc::map[32]
{
{ }, // NONE
{ }, // 1
{ EJ_lt }, // SLT
{ EJ_le }, // SLE
{ EJ_ge }, // SGE
{ EJ_gt }, // SGT
{ EJ_mi }, // S
{ EJ_pl }, // NS
{ EJ_eq }, // EQ
{ EJ_ne }, // NE
{ EJ_lo }, // ULT
{ EJ_ls }, // ULE
{ EJ_hs }, // UGE
{ EJ_hi }, // UGT
{ EJ_hs }, // C
{ EJ_lo }, // NC
{ EJ_eq }, // FEQ
{ EJ_gt, GT_AND, EJ_lo }, // FNE
{ EJ_lo }, // FLT
{ EJ_ls }, // FLE
{ EJ_ge }, // FGE
{ EJ_gt }, // FGT
{ EJ_vs }, // O
{ EJ_vc }, // NO
{ EJ_eq, GT_OR, EJ_vs }, // FEQU
{ EJ_ne }, // FNEU
{ EJ_lt }, // FLTU
{ EJ_le }, // FLEU
{ EJ_hs }, // FGEU
{ EJ_hi }, // FGTU
{ }, // P
{ }, // NP
};
// clang-format on
//------------------------------------------------------------------------
// inst_SETCC: Generate code to set a register to 0 or 1 based on a condition.
//
// Arguments:
// condition - The condition
// type - The type of the value to be produced
// dstReg - The destination register to be set to 1 or 0
//
void CodeGen::inst_SETCC(GenCondition condition, var_types type, regNumber dstReg)
{
assert(varTypeIsIntegral(type));
assert(genIsValidIntReg(dstReg));
#ifdef TARGET_ARM64
const GenConditionDesc& desc = GenConditionDesc::Get(condition);
inst_SET(desc.jumpKind1, dstReg);
if (desc.oper != GT_NONE)
{
BasicBlock* labelNext = genCreateTempLabel();
inst_JMP((desc.oper == GT_OR) ? desc.jumpKind1 : emitter::emitReverseJumpKind(desc.jumpKind1), labelNext);
inst_SET(desc.jumpKind2, dstReg);
genDefineTempLabel(labelNext);
}
#else
// Emit code like that:
// ...
// bgt True
// movs rD, #0
// b Next
// True:
// movs rD, #1
// Next:
// ...
BasicBlock* labelTrue = genCreateTempLabel();
inst_JCC(condition, labelTrue);
GetEmitter()->emitIns_R_I(INS_mov, emitActualTypeSize(type), dstReg, 0);
BasicBlock* labelNext = genCreateTempLabel();
GetEmitter()->emitIns_J(INS_b, labelNext);
genDefineTempLabel(labelTrue);
GetEmitter()->emitIns_R_I(INS_mov, emitActualTypeSize(type), dstReg, 1);
genDefineTempLabel(labelNext);
#endif
}
//------------------------------------------------------------------------
// genCodeForStoreBlk: Produce code for a GT_STORE_OBJ/GT_STORE_DYN_BLK/GT_STORE_BLK node.
//
// Arguments:
// tree - the node
//
void CodeGen::genCodeForStoreBlk(GenTreeBlk* blkOp)
{
assert(blkOp->OperIs(GT_STORE_OBJ, GT_STORE_DYN_BLK, GT_STORE_BLK));
if (blkOp->OperIs(GT_STORE_OBJ))
{
assert(!blkOp->gtBlkOpGcUnsafe);
assert(blkOp->OperIsCopyBlkOp());
assert(blkOp->AsObj()->GetLayout()->HasGCPtr());
genCodeForCpObj(blkOp->AsObj());
return;
}
bool isCopyBlk = blkOp->OperIsCopyBlkOp();
switch (blkOp->gtBlkOpKind)
{
case GenTreeBlk::BlkOpKindHelper:
assert(!blkOp->gtBlkOpGcUnsafe);
if (isCopyBlk)
{
genCodeForCpBlkHelper(blkOp);
}
else
{
genCodeForInitBlkHelper(blkOp);
}
break;
case GenTreeBlk::BlkOpKindUnroll:
if (isCopyBlk)
{
if (blkOp->gtBlkOpGcUnsafe)
{
GetEmitter()->emitDisableGC();
}
genCodeForCpBlkUnroll(blkOp);
if (blkOp->gtBlkOpGcUnsafe)
{
GetEmitter()->emitEnableGC();
}
}
else
{
assert(!blkOp->gtBlkOpGcUnsafe);
genCodeForInitBlkUnroll(blkOp);
}
break;
default:
unreached();
}
}
//------------------------------------------------------------------------
// genScaledAdd: A helper for genLeaInstruction.
//
void CodeGen::genScaledAdd(emitAttr attr, regNumber targetReg, regNumber baseReg, regNumber indexReg, int scale)
{
emitter* emit = GetEmitter();
if (scale == 0)
{
// target = base + index
GetEmitter()->emitIns_R_R_R(INS_add, attr, targetReg, baseReg, indexReg);
}
else
{
// target = base + index<<scale
#if defined(TARGET_ARM)
emit->emitIns_R_R_R_I(INS_add, attr, targetReg, baseReg, indexReg, scale, INS_FLAGS_DONT_CARE, INS_OPTS_LSL);
#elif defined(TARGET_ARM64)
emit->emitIns_R_R_R_I(INS_add, attr, targetReg, baseReg, indexReg, scale, INS_OPTS_LSL);
#endif
}
}
//------------------------------------------------------------------------
// genCodeForMulLong: Generates code for int*int->long multiplication.
//
// Arguments:
// mul - the GT_MUL_LONG node
//
// Return Value:
// None.
//
void CodeGen::genCodeForMulLong(GenTreeOp* mul)
{
assert(mul->OperIs(GT_MUL_LONG));
genConsumeOperands(mul);
regNumber srcReg1 = mul->gtGetOp1()->GetRegNum();
regNumber srcReg2 = mul->gtGetOp2()->GetRegNum();
instruction ins = mul->IsUnsigned() ? INS_umull : INS_smull;
#ifdef TARGET_ARM
GetEmitter()->emitIns_R_R_R_R(ins, EA_4BYTE, mul->GetRegNum(), mul->AsMultiRegOp()->gtOtherReg, srcReg1, srcReg2);
#else
GetEmitter()->emitIns_R_R_R(ins, EA_4BYTE, mul->GetRegNum(), srcReg1, srcReg2);
#endif
genProduceReg(mul);
}
//------------------------------------------------------------------------
// genLeaInstruction: Produce code for a GT_LEA node.
//
// Arguments:
// lea - the node
//
void CodeGen::genLeaInstruction(GenTreeAddrMode* lea)
{
genConsumeOperands(lea);
emitter* emit = GetEmitter();
emitAttr size = emitTypeSize(lea);
int offset = lea->Offset();
// In ARM we can only load addresses of the form:
//
// [Base + index*scale]
// [Base + Offset]
// [Literal] (PC-Relative)
//
// So for the case of a LEA node of the form [Base + Index*Scale + Offset] we will generate:
// destReg = baseReg + indexReg * scale;
// destReg = destReg + offset;
//
// TODO-ARM64-CQ: The purpose of the GT_LEA node is to directly reflect a single target architecture
// addressing mode instruction. Currently we're 'cheating' by producing one or more
// instructions to generate the addressing mode so we need to modify lowering to
// produce LEAs that are a 1:1 relationship to the ARM64 architecture.
if (lea->Base() && lea->Index())
{
GenTree* memBase = lea->Base();
GenTree* index = lea->Index();
DWORD scale;
assert(isPow2(lea->gtScale));
BitScanForward(&scale, lea->gtScale);
assert(scale <= 4);
if (offset != 0)
{
regNumber tmpReg = lea->GetSingleTempReg();
// When generating fully interruptible code we have to use the "large offset" sequence
// when calculating a EA_BYREF as we can't report a byref that points outside of the object
//
bool useLargeOffsetSeq = compiler->GetInterruptible() && (size == EA_BYREF);
if (!useLargeOffsetSeq && emitter::emitIns_valid_imm_for_add(offset))
{
// Generate code to set tmpReg = base + index*scale
genScaledAdd(size, tmpReg, memBase->GetRegNum(), index->GetRegNum(), scale);
// Then compute target reg from [tmpReg + offset]
emit->emitIns_R_R_I(INS_add, size, lea->GetRegNum(), tmpReg, offset);
}
else // large offset sequence
{
noway_assert(tmpReg != index->GetRegNum());
noway_assert(tmpReg != memBase->GetRegNum());
// First load/store tmpReg with the offset constant
// rTmp = imm
instGen_Set_Reg_To_Imm(EA_PTRSIZE, tmpReg, offset);
// Then add the scaled index register
// rTmp = rTmp + index*scale
genScaledAdd(EA_PTRSIZE, tmpReg, tmpReg, index->GetRegNum(), scale);
// Then compute target reg from [base + tmpReg ]
// rDst = base + rTmp
emit->emitIns_R_R_R(INS_add, size, lea->GetRegNum(), memBase->GetRegNum(), tmpReg);
}
}
else
{
// Then compute target reg from [base + index*scale]
genScaledAdd(size, lea->GetRegNum(), memBase->GetRegNum(), index->GetRegNum(), scale);
}
}
else if (lea->Base())
{
GenTree* memBase = lea->Base();
if (emitter::emitIns_valid_imm_for_add(offset))
{
if (offset != 0)
{
// Then compute target reg from [memBase + offset]
emit->emitIns_R_R_I(INS_add, size, lea->GetRegNum(), memBase->GetRegNum(), offset);
}
else // offset is zero
{
emit->emitIns_Mov(INS_mov, size, lea->GetRegNum(), memBase->GetRegNum(), /* canSkip */ true);
}
}
else
{
// We require a tmpReg to hold the offset
regNumber tmpReg = lea->GetSingleTempReg();
// First load tmpReg with the large offset constant
instGen_Set_Reg_To_Imm(EA_PTRSIZE, tmpReg, offset);
// Then compute target reg from [memBase + tmpReg]
emit->emitIns_R_R_R(INS_add, size, lea->GetRegNum(), memBase->GetRegNum(), tmpReg);
}
}
else if (lea->Index())
{
// If we encounter a GT_LEA node without a base it means it came out
// when attempting to optimize an arbitrary arithmetic expression during lower.
// This is currently disabled in ARM64 since we need to adjust lower to account
// for the simpler instructions ARM64 supports.
// TODO-ARM64-CQ: Fix this and let LEA optimize arithmetic trees too.
assert(!"We shouldn't see a baseless address computation during CodeGen for ARM64");
}
genProduceReg(lea);
}
#ifdef FEATURE_SIMD
//------------------------------------------------------------------------
// genSIMDSplitReturn: Generates code for returning a fixed-size SIMD type that lives
// in a single register, but is returned in multiple registers.
//
// Arguments:
// src - The source of the return
// retTypeDesc - The return type descriptor.
//
void CodeGen::genSIMDSplitReturn(GenTree* src, ReturnTypeDesc* retTypeDesc)
{
assert(varTypeIsSIMD(src));
assert(src->isUsedFromReg());
regNumber srcReg = src->GetRegNum();
// Treat src register as a homogenous vector with element size equal to the reg size
// Insert pieces in order
unsigned regCount = retTypeDesc->GetReturnRegCount();
for (unsigned i = 0; i < regCount; ++i)
{
var_types type = retTypeDesc->GetReturnRegType(i);
regNumber reg = retTypeDesc->GetABIReturnReg(i);
if (varTypeIsFloating(type))
{
// If the register piece is to be passed in a floating point register
// Use a vector mov element instruction
// reg is not a vector, so it is in the first element reg[0]
// mov reg[0], src[i]
// This effectively moves from `src[i]` to `reg[0]`, upper bits of reg remain unchanged
// For the case where src == reg, since we are only writing reg[0], as long as we iterate
// so that src[0] is consumed before writing reg[0], we do not need a temporary.
GetEmitter()->emitIns_R_R_I_I(INS_mov, emitTypeSize(type), reg, srcReg, 0, i);
}
else
{
// If the register piece is to be passed in an integer register
// Use a vector mov to general purpose register instruction
// mov reg, src[i]
// This effectively moves from `src[i]` to `reg`
GetEmitter()->emitIns_R_R_I(INS_mov, emitTypeSize(type), reg, srcReg, i);
}
}
}
#endif // FEATURE_SIMD
//------------------------------------------------------------------------
// genPushCalleeSavedRegisters: Push any callee-saved registers we have used.
//
// Arguments (arm64):
// initReg - A scratch register (that gets set to zero on some platforms).
// pInitRegZeroed - OUT parameter. *pInitRegZeroed is set to 'true' if this method sets initReg register to zero,
// 'false' if initReg was set to a non-zero value, and left unchanged if initReg was not touched.
//
#if defined(TARGET_ARM64)
void CodeGen::genPushCalleeSavedRegisters(regNumber initReg, bool* pInitRegZeroed)
#else
void CodeGen::genPushCalleeSavedRegisters()
#endif
{
assert(compiler->compGeneratingProlog);
#ifdef TARGET_ARM64
// Probe large frames now, if necessary, since genPushCalleeSavedRegisters() will allocate the frame. Note that
// for arm64, genAllocLclFrame only probes the frame; it does not actually allocate it (it does not change SP).
// For arm64, we are probing the frame before the callee-saved registers are saved. The 'initReg' might have
// been calculated to be one of the callee-saved registers (say, if all the integer argument registers are
// in use, and perhaps with other conditions being satisfied). This is ok in other cases, after the callee-saved
// registers have been saved. So instead of letting genAllocLclFrame use initReg as a temporary register,
// always use REG_SCRATCH. We don't care if it trashes it, so ignore the initRegZeroed output argument.
bool ignoreInitRegZeroed = false;
genAllocLclFrame(compiler->compLclFrameSize, REG_SCRATCH, &ignoreInitRegZeroed,
intRegState.rsCalleeRegArgMaskLiveIn);
#endif
regMaskTP rsPushRegs = regSet.rsGetModifiedRegsMask() & RBM_CALLEE_SAVED;
#if ETW_EBP_FRAMED
if (!isFramePointerUsed() && regSet.rsRegsModified(RBM_FPBASE))
{
noway_assert(!"Used register RBM_FPBASE as a scratch register!");
}
#endif
// On ARM we push the FP (frame-pointer) here along with all other callee saved registers
if (isFramePointerUsed())
rsPushRegs |= RBM_FPBASE;
//
// It may be possible to skip pushing/popping lr for leaf methods. However, such optimization would require
// changes in GC suspension architecture.
//
// We would need to guarantee that a tight loop calling a virtual leaf method can be suspended for GC. Today, we
// generate partially interruptible code for both the method that contains the tight loop with the call and the leaf
// method. GC suspension depends on return address hijacking in this case. Return address hijacking depends
// on the return address to be saved on the stack. If we skipped pushing/popping lr, the return address would never
// be saved on the stack and the GC suspension would time out.
//
// So if we wanted to skip pushing pushing/popping lr for leaf frames, we would also need to do one of
// the following to make GC suspension work in the above scenario:
// - Make return address hijacking work even when lr is not saved on the stack.
// - Generate fully interruptible code for loops that contains calls
// - Generate fully interruptible code for leaf methods
//
// Given the limited benefit from this optimization (<10k for CoreLib NGen image), the extra complexity
// is not worth it.
//
rsPushRegs |= RBM_LR; // We must save the return address (in the LR register)
regSet.rsMaskCalleeSaved = rsPushRegs;
#ifdef DEBUG
if (compiler->compCalleeRegsPushed != genCountBits(rsPushRegs))
{
printf("Error: unexpected number of callee-saved registers to push. Expected: %d. Got: %d ",
compiler->compCalleeRegsPushed, genCountBits(rsPushRegs));
dspRegMask(rsPushRegs);
printf("\n");
assert(compiler->compCalleeRegsPushed == genCountBits(rsPushRegs));
}
#endif // DEBUG
#if defined(TARGET_ARM)
regMaskTP maskPushRegsFloat = rsPushRegs & RBM_ALLFLOAT;
regMaskTP maskPushRegsInt = rsPushRegs & ~maskPushRegsFloat;
maskPushRegsInt |= genStackAllocRegisterMask(compiler->compLclFrameSize, maskPushRegsFloat);
assert(FitsIn<int>(maskPushRegsInt));
inst_IV(INS_push, (int)maskPushRegsInt);
compiler->unwindPushMaskInt(maskPushRegsInt);
if (maskPushRegsFloat != 0)
{
genPushFltRegs(maskPushRegsFloat);
compiler->unwindPushMaskFloat(maskPushRegsFloat);
}
#elif defined(TARGET_ARM64)
// See the document "ARM64 JIT Frame Layout" and/or "ARM64 Exception Data" for more details or requirements and
// options. Case numbers in comments here refer to this document. See also Compiler::lvaAssignFrameOffsets()
// for pictures of the general frame layouts, and CodeGen::genFuncletProlog() implementations (per architecture)
// for pictures of the funclet frame layouts.
//
// For most frames, generate, e.g.:
// stp fp, lr, [sp,-0x80]! // predecrement SP with full frame size, and store FP/LR pair.
// stp r19, r20, [sp, 0x60] // store at positive offset from SP established above, into callee-saved area
// // at top of frame (highest addresses).
// stp r21, r22, [sp, 0x70]
//
// Notes:
// 1. We don't always need to save FP. If FP isn't saved, then LR is saved with the other callee-saved registers
// at the top of the frame.
// 2. If we save FP, then the first store is FP, LR.
// 3. General-purpose registers are 8 bytes, floating-point registers are 16 bytes, but FP/SIMD registers only
// preserve their lower 8 bytes, by calling convention.
// 4. For frames with varargs, we spill the integer register arguments to the stack, so all the arguments are
// consecutive, and at the top of the frame.
// 5. We allocate the frame here; no further changes to SP are allowed (except in the body, for localloc).
//
// For functions with GS and localloc, we change the frame so the frame pointer and LR are saved at the top
// of the frame, just under the varargs registers (if any). Note that the funclet frames must follow the same
// rule, and both main frame and funclet frames (if any) must put PSPSym in the same offset from Caller-SP.
// Since this frame type is relatively rare, we force using it via stress modes, for additional coverage.
//
// The frames look like the following (simplified to only include components that matter for establishing the
// frames). See also Compiler::lvaAssignFrameOffsets().
//
// Frames with FP, LR saved at bottom of frame (above outgoing argument space):
//
// | |
// |-----------------------|
// | incoming arguments |
// +=======================+ <---- Caller's SP
// | Varargs regs space | // Only for varargs functions; 64 bytes
// |-----------------------|
// |Callee saved registers | // not including FP/LR; multiple of 8 bytes
// |-----------------------|
// | PSP slot | // 8 bytes (omitted in CoreRT ABI)
// |-----------------------|
// | locals, temps, etc. |
// |-----------------------|
// | possible GS cookie |
// |-----------------------|
// | Saved LR | // 8 bytes
// |-----------------------|
// | Saved FP | // 8 bytes
// |-----------------------|
// | Outgoing arg space | // multiple of 8 bytes; if required (i.e., #outsz != 0)
// |-----------------------| <---- Ambient SP
// | | |
// ~ | Stack grows ~
// | | downward |
// V
//
// Frames with FP, LR saved at top of frame (below saved varargs incoming arguments):
//
// | |
// |-----------------------|
// | incoming arguments |
// +=======================+ <---- Caller's SP
// | Varargs regs space | // Only for varargs functions; 64 bytes
// |-----------------------|
// | Saved LR | // 8 bytes
// |-----------------------|
// | Saved FP | // 8 bytes
// |-----------------------|
// |Callee saved registers | // not including FP/LR; multiple of 8 bytes
// |-----------------------|
// | PSP slot | // 8 bytes (omitted in CoreRT ABI)
// |-----------------------|
// | locals, temps, etc. |
// |-----------------------|
// | possible GS cookie |
// |-----------------------|
// | Outgoing arg space | // multiple of 8 bytes; if required (i.e., #outsz != 0)
// |-----------------------| <---- Ambient SP
// | | |
// ~ | Stack grows ~
// | | downward |
// V
//
int totalFrameSize = genTotalFrameSize();
int offset; // This will be the starting place for saving the callee-saved registers, in increasing order.
regMaskTP maskSaveRegsFloat = rsPushRegs & RBM_ALLFLOAT;
regMaskTP maskSaveRegsInt = rsPushRegs & ~maskSaveRegsFloat;
#ifdef DEBUG
if (verbose)
{
printf("Save float regs: ");
dspRegMask(maskSaveRegsFloat);
printf("\n");
printf("Save int regs: ");
dspRegMask(maskSaveRegsInt);
printf("\n");
}
#endif // DEBUG
// The frameType number is arbitrary, is defined below, and corresponds to one of the frame styles we
// generate based on various sizes.
int frameType = 0;
// The amount to subtract from SP before starting to store the callee-saved registers. It might be folded into the
// first save instruction as a "predecrement" amount, if possible.
int calleeSaveSpDelta = 0;
if (isFramePointerUsed())
{
// We need to save both FP and LR.
assert((maskSaveRegsInt & RBM_FP) != 0);
assert((maskSaveRegsInt & RBM_LR) != 0);
// If we need to generate a GS cookie, we need to make sure the saved frame pointer and return address
// (FP and LR) are protected from buffer overrun by the GS cookie. If FP/LR are at the lowest addresses,
// then they are safe, since they are lower than any unsafe buffers. And the GS cookie we add will
// protect our caller's frame. If we have a localloc, however, that is dynamically placed lower than our
// saved FP/LR. In that case, we save FP/LR along with the rest of the callee-saved registers, above
// the GS cookie.
//
// After the frame is allocated, the frame pointer is established, pointing at the saved frame pointer to
// create a frame pointer chain.
//
// Do we need another frame pointer register to get good code quality in the case of having the frame pointer
// point high in the frame, so we can take advantage of arm64's preference for positive offsets? C++ native
// code dedicates callee-saved x19 to this, so generates:
// mov x19, sp
// in the prolog, then uses x19 for local var accesses. Given that this case is so rare, we currently do
// not do this. That means that negative offsets from FP might need to use the reserved register to form
// the local variable offset for an addressing mode.
if (((compiler->lvaOutgoingArgSpaceSize == 0) && (totalFrameSize <= 504)) &&
!genSaveFpLrWithAllCalleeSavedRegisters)
{
// Case #1.
//
// Generate:
// stp fp,lr,[sp,#-framesz]!
//
// The (totalFrameSize <= 504) condition ensures that both the pre-index STP instruction
// used in the prolog, and the post-index LDP instruction used in the epilog, can be generated.
// Note that STP and the unwind codes can handle -512, but LDP with a positive post-index value
// can only handle up to 504, and we want our prolog and epilog to match.
//
// After saving callee-saved registers, we establish the frame pointer with:
// mov fp,sp
// We do this *after* saving callee-saved registers, so the prolog/epilog unwind codes mostly match.
JITDUMP("Frame type 1. #outsz=0; #framesz=%d; LclFrameSize=%d\n", totalFrameSize,
compiler->compLclFrameSize);
frameType = 1;
assert(totalFrameSize <= STACK_PROBE_BOUNDARY_THRESHOLD_BYTES);
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, -totalFrameSize,
INS_OPTS_PRE_INDEX);
compiler->unwindSaveRegPairPreindexed(REG_FP, REG_LR, -totalFrameSize);
maskSaveRegsInt &= ~(RBM_FP | RBM_LR); // We've already saved FP/LR
offset = (int)compiler->compLclFrameSize + 2 * REGSIZE_BYTES; // 2 for FP/LR
}
else if (totalFrameSize <= 512)
{
// Case #2.
//
// The (totalFrameSize <= 512) condition ensures the callee-saved registers can all be saved using STP
// with signed offset encoding. The maximum positive STP offset is 504, but when storing a pair of
// 8 byte registers, the largest actual offset we use would be 512 - 8 * 2 = 496. And STR with positive
// offset has a range 0 to 32760.
//
// After saving callee-saved registers, we establish the frame pointer with:
// add fp,sp,#outsz
// We do this *after* saving callee-saved registers, so the prolog/epilog unwind codes mostly match.
if (genSaveFpLrWithAllCalleeSavedRegisters)
{
JITDUMP("Frame type 4 (save FP/LR at top). #outsz=%d; #framesz=%d; LclFrameSize=%d\n",
unsigned(compiler->lvaOutgoingArgSpaceSize), totalFrameSize, compiler->compLclFrameSize);
frameType = 4;
// The frame will be allocated below, when the callee-saved registers are saved. This might mean a
// separate SUB instruction or the SP adjustment might be folded in to the first STP if there is
// no outgoing argument space AND no local frame space, that is, if the only thing the frame does
// is save callee-saved registers (and possibly varargs argument registers).
calleeSaveSpDelta = totalFrameSize;
offset = (int)compiler->compLclFrameSize;
}
else
{
JITDUMP("Frame type 2 (save FP/LR at bottom). #outsz=%d; #framesz=%d; LclFrameSize=%d\n",
unsigned(compiler->lvaOutgoingArgSpaceSize), totalFrameSize, compiler->compLclFrameSize);
frameType = 2;
// Generate:
// sub sp,sp,#framesz
// stp fp,lr,[sp,#outsz] // note that by necessity, #outsz <= #framesz - 16, so #outsz <= 496.
assert(totalFrameSize - compiler->lvaOutgoingArgSpaceSize <= STACK_PROBE_BOUNDARY_THRESHOLD_BYTES);
GetEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, REG_SPBASE, REG_SPBASE, totalFrameSize);
compiler->unwindAllocStack(totalFrameSize);
assert(compiler->lvaOutgoingArgSpaceSize + 2 * REGSIZE_BYTES <= (unsigned)totalFrameSize);
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE,
compiler->lvaOutgoingArgSpaceSize);
compiler->unwindSaveRegPair(REG_FP, REG_LR, compiler->lvaOutgoingArgSpaceSize);
maskSaveRegsInt &= ~(RBM_FP | RBM_LR); // We've already saved FP/LR
offset = (int)compiler->compLclFrameSize + 2 * REGSIZE_BYTES; // 2 for FP/LR
}
}
else
{
// Case 5 or 6.
//
// First, the callee-saved registers will be saved, and the callee-saved register code must use
// pre-index to subtract from SP as the first instruction. It must also leave space for varargs
// registers to be stored. For example:
// stp r19,r20,[sp,#-96]!
// stp d8,d9,[sp,#16]
// ... save varargs incoming integer registers ...
// Note that all SP alterations must be 16-byte aligned. We have already calculated any alignment to be
// lower on the stack than the callee-saved registers (see lvaAlignFrame() for how we calculate
// alignment). So, if there is an odd number of callee-saved registers, we use (for example, with just
// one saved register):
// sub sp,sp,#16
// str r19,[sp,#8]
// This is one additional instruction, but it centralizes the aligned space. Otherwise, it might be
// possible to have two 8-byte alignment padding words, one below the callee-saved registers, and one
// above them. If that is preferable, we could implement it.
//
// Note that any varargs saved space will always be 16-byte aligned, since there are 8 argument
// registers.
//
// Then, define #remainingFrameSz = #framesz - (callee-saved size + varargs space + possible alignment
// padding from above). Note that #remainingFrameSz must not be zero, since we still need to save FP,SP.
//
// Generate:
// sub sp,sp,#remainingFrameSz
// or, for large frames:
// mov rX, #remainingFrameSz // maybe multiple instructions
// sub sp,sp,rX
//
// followed by:
// stp fp,lr,[sp,#outsz]
// add fp,sp,#outsz
//
// However, we need to handle the case where #outsz is larger than the constant signed offset encoding
// can handle. And, once again, we might need to deal with #outsz that is not aligned to 16-bytes (i.e.,
// STACK_ALIGN). So, in the case of large #outsz we will have an additional SP adjustment, using one of
// the following sequences:
//
// Define #remainingFrameSz2 = #remainingFrameSz - #outsz.
//
// sub sp,sp,#remainingFrameSz2 // if #remainingFrameSz2 is 16-byte aligned
// stp fp,lr,[sp]
// mov fp,sp
// sub sp,sp,#outsz // in this case, #outsz must also be 16-byte aligned
//
// Or:
//
// sub sp,sp,roundUp(#remainingFrameSz2,16) // if #remainingFrameSz2 is not 16-byte aligned (it is
// // always guaranteed to be 8 byte aligned).
// stp fp,lr,[sp,#8] // it will always be #8 in the unaligned case
// add fp,sp,#8
// sub sp,sp,#outsz - #8
//
// (As usual, for a large constant "#outsz - #8", we might need multiple instructions:
// mov rX, #outsz - #8 // maybe multiple instructions
// sub sp,sp,rX
// )
//
// Note that even if we align the SP alterations, that does not imply that we are creating empty alignment
// slots. In fact, we are not; any empty alignment slots were calculated in
// Compiler::lvaAssignFrameOffsets() and its callees.
int calleeSaveSpDeltaUnaligned = totalFrameSize - compiler->compLclFrameSize;
if (genSaveFpLrWithAllCalleeSavedRegisters)
{
JITDUMP("Frame type 5 (save FP/LR at top). #outsz=%d; #framesz=%d; LclFrameSize=%d\n",
unsigned(compiler->lvaOutgoingArgSpaceSize), totalFrameSize, compiler->compLclFrameSize);
// This case is much simpler, because we allocate space for the callee-saved register area, including
// FP/LR. Note the SP adjustment might be SUB or be folded into the first store as a predecrement.
// Then, we use a single SUB to establish the rest of the frame. We need to be careful about where
// to establish the frame pointer, as there is a limit of 2040 bytes offset from SP to FP in the
// unwind codes when FP is established.
frameType = 5;
}
else
{
JITDUMP("Frame type 3 (save FP/LR at bottom). #outsz=%d; #framesz=%d; LclFrameSize=%d\n",
unsigned(compiler->lvaOutgoingArgSpaceSize), totalFrameSize, compiler->compLclFrameSize);
frameType = 3;
calleeSaveSpDeltaUnaligned -= 2 * REGSIZE_BYTES; // 2 for FP, LR which we'll save later.
// We'll take care of these later, but callee-saved regs code shouldn't see them.
maskSaveRegsInt &= ~(RBM_FP | RBM_LR);
}
assert(calleeSaveSpDeltaUnaligned >= 0);
assert((calleeSaveSpDeltaUnaligned % 8) == 0); // It better at least be 8 byte aligned.
calleeSaveSpDelta = AlignUp((UINT)calleeSaveSpDeltaUnaligned, STACK_ALIGN);
offset = calleeSaveSpDelta - calleeSaveSpDeltaUnaligned;
JITDUMP(" calleeSaveSpDelta=%d, offset=%d\n", calleeSaveSpDelta, offset);
// At most one alignment slot between SP and where we store the callee-saved registers.
assert((offset == 0) || (offset == REGSIZE_BYTES));
}
}
else
{
// No frame pointer (no chaining).
assert((maskSaveRegsInt & RBM_FP) == 0);
assert((maskSaveRegsInt & RBM_LR) != 0);
// Note that there is no pre-indexed save_lrpair unwind code variant, so we can't allocate the frame using
// 'stp' if we only have one callee-saved register plus LR to save.
NYI("Frame without frame pointer");
offset = 0;
}
assert(frameType != 0);
const int calleeSaveSpOffset = offset;
JITDUMP(" offset=%d, calleeSaveSpDelta=%d\n", offset, calleeSaveSpDelta);
genSaveCalleeSavedRegistersHelp(maskSaveRegsInt | maskSaveRegsFloat, offset, -calleeSaveSpDelta);
offset += genCountBits(maskSaveRegsInt | maskSaveRegsFloat) * REGSIZE_BYTES;
// For varargs, home the incoming arg registers last. Note that there is nothing to unwind here,
// so we just report "NOP" unwind codes. If there's no more frame setup after this, we don't
// need to add codes at all.
if (compiler->info.compIsVarArgs)
{
JITDUMP(" compIsVarArgs=true\n");
// There are 8 general-purpose registers to home, thus 'offset' must be 16-byte aligned here.
assert((offset % 16) == 0);
for (regNumber reg1 = REG_ARG_FIRST; reg1 < REG_ARG_LAST; reg1 = REG_NEXT(REG_NEXT(reg1)))
{
regNumber reg2 = REG_NEXT(reg1);
// stp REG, REG + 1, [SP, #offset]
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, reg1, reg2, REG_SPBASE, offset);
compiler->unwindNop();
offset += 2 * REGSIZE_BYTES;
}
}
// By default, we'll establish the frame pointer chain. (Note that currently frames without FP are NYI.)
bool establishFramePointer = true;
// If we do establish the frame pointer, what is the amount we add to SP to do so?
unsigned offsetSpToSavedFp = 0;
if (frameType == 1)
{
assert(!genSaveFpLrWithAllCalleeSavedRegisters);
assert(offsetSpToSavedFp == 0);
}
else if (frameType == 2)
{
assert(!genSaveFpLrWithAllCalleeSavedRegisters);
offsetSpToSavedFp = compiler->lvaOutgoingArgSpaceSize;
}
else if (frameType == 3)
{
assert(!genSaveFpLrWithAllCalleeSavedRegisters);
int remainingFrameSz = totalFrameSize - calleeSaveSpDelta;
assert(remainingFrameSz > 0);
assert((remainingFrameSz % 16) == 0); // this is guaranteed to be 16-byte aligned because each component --
// totalFrameSize and calleeSaveSpDelta -- is 16-byte aligned.
if (compiler->lvaOutgoingArgSpaceSize > 504)
{
// We can't do "stp fp,lr,[sp,#outsz]" because #outsz is too big.
// If compiler->lvaOutgoingArgSpaceSize is not aligned, we need to align the SP adjustment.
assert(remainingFrameSz > (int)compiler->lvaOutgoingArgSpaceSize);
int spAdjustment2Unaligned = remainingFrameSz - compiler->lvaOutgoingArgSpaceSize;
int spAdjustment2 = (int)roundUp((unsigned)spAdjustment2Unaligned, STACK_ALIGN);
int alignmentAdjustment2 = spAdjustment2 - spAdjustment2Unaligned;
assert((alignmentAdjustment2 == 0) || (alignmentAdjustment2 == 8));
JITDUMP(" spAdjustment2=%d\n", spAdjustment2);
genPrologSaveRegPair(REG_FP, REG_LR, alignmentAdjustment2, -spAdjustment2, false, initReg, pInitRegZeroed);
offset += spAdjustment2;
// Now subtract off the #outsz (or the rest of the #outsz if it was unaligned, and the above "sub"
// included some of it)
int spAdjustment3 = compiler->lvaOutgoingArgSpaceSize - alignmentAdjustment2;
assert(spAdjustment3 > 0);
assert((spAdjustment3 % 16) == 0);
JITDUMP(" alignmentAdjustment2=%d\n", alignmentAdjustment2);
genEstablishFramePointer(alignmentAdjustment2, /* reportUnwindData */ true);
// We just established the frame pointer chain; don't do it again.
establishFramePointer = false;
JITDUMP(" spAdjustment3=%d\n", spAdjustment3);
// We've already established the frame pointer, so no need to report the stack pointer change to unwind
// info.
genStackPointerAdjustment(-spAdjustment3, initReg, pInitRegZeroed, /* reportUnwindData */ false);
offset += spAdjustment3;
}
else
{
genPrologSaveRegPair(REG_FP, REG_LR, compiler->lvaOutgoingArgSpaceSize, -remainingFrameSz, false, initReg,
pInitRegZeroed);
offset += remainingFrameSz;
offsetSpToSavedFp = compiler->lvaOutgoingArgSpaceSize;
}
}
else if (frameType == 4)
{
assert(genSaveFpLrWithAllCalleeSavedRegisters);
offsetSpToSavedFp = calleeSaveSpDelta - (compiler->info.compIsVarArgs ? MAX_REG_ARG * REGSIZE_BYTES : 0) -
2 * REGSIZE_BYTES; // -2 for FP, LR
}
else if (frameType == 5)
{
assert(genSaveFpLrWithAllCalleeSavedRegisters);
offsetSpToSavedFp = calleeSaveSpDelta - (compiler->info.compIsVarArgs ? MAX_REG_ARG * REGSIZE_BYTES : 0) -
2 * REGSIZE_BYTES; // -2 for FP, LR
JITDUMP(" offsetSpToSavedFp=%d\n", offsetSpToSavedFp);
genEstablishFramePointer(offsetSpToSavedFp, /* reportUnwindData */ true);
// We just established the frame pointer chain; don't do it again.
establishFramePointer = false;
int remainingFrameSz = totalFrameSize - calleeSaveSpDelta;
assert(remainingFrameSz > 0);
assert((remainingFrameSz % 16) == 0); // this is guaranteed to be 16-byte aligned because each component --
// totalFrameSize and calleeSaveSpDelta -- is 16-byte aligned.
JITDUMP(" remainingFrameSz=%d\n", remainingFrameSz);
// We've already established the frame pointer, so no need to report the stack pointer change to unwind info.
genStackPointerAdjustment(-remainingFrameSz, initReg, pInitRegZeroed, /* reportUnwindData */ false);
offset += remainingFrameSz;
}
else
{
unreached();
}
if (establishFramePointer)
{
JITDUMP(" offsetSpToSavedFp=%d\n", offsetSpToSavedFp);
genEstablishFramePointer(offsetSpToSavedFp, /* reportUnwindData */ true);
}
assert(offset == totalFrameSize);
// Save off information about the frame for later use
//
compiler->compFrameInfo.frameType = frameType;
compiler->compFrameInfo.calleeSaveSpOffset = calleeSaveSpOffset;
compiler->compFrameInfo.calleeSaveSpDelta = calleeSaveSpDelta;
compiler->compFrameInfo.offsetSpToSavedFp = offsetSpToSavedFp;
#endif // TARGET_ARM64
}
/*****************************************************************************
*
* Generates code for a function epilog.
*
* Please consult the "debugger team notification" comment in genFnProlog().
*/
void CodeGen::genFnEpilog(BasicBlock* block)
{
#ifdef DEBUG
if (verbose)
printf("*************** In genFnEpilog()\n");
#endif // DEBUG
ScopedSetVariable<bool> _setGeneratingEpilog(&compiler->compGeneratingEpilog, true);
VarSetOps::Assign(compiler, gcInfo.gcVarPtrSetCur, GetEmitter()->emitInitGCrefVars);
gcInfo.gcRegGCrefSetCur = GetEmitter()->emitInitGCrefRegs;
gcInfo.gcRegByrefSetCur = GetEmitter()->emitInitByrefRegs;
#ifdef DEBUG
if (compiler->opts.dspCode)
printf("\n__epilog:\n");
if (verbose)
{
printf("gcVarPtrSetCur=%s ", VarSetOps::ToString(compiler, gcInfo.gcVarPtrSetCur));
dumpConvertedVarSet(compiler, gcInfo.gcVarPtrSetCur);
printf(", gcRegGCrefSetCur=");
printRegMaskInt(gcInfo.gcRegGCrefSetCur);
GetEmitter()->emitDispRegSet(gcInfo.gcRegGCrefSetCur);
printf(", gcRegByrefSetCur=");
printRegMaskInt(gcInfo.gcRegByrefSetCur);
GetEmitter()->emitDispRegSet(gcInfo.gcRegByrefSetCur);
printf("\n");
}
#endif // DEBUG
bool jmpEpilog = ((block->bbFlags & BBF_HAS_JMP) != 0);
GenTree* lastNode = block->lastNode();
// Method handle and address info used in case of jump epilog
CORINFO_METHOD_HANDLE methHnd = nullptr;
CORINFO_CONST_LOOKUP addrInfo;
addrInfo.addr = nullptr;
addrInfo.accessType = IAT_VALUE;
if (jmpEpilog && lastNode->gtOper == GT_JMP)
{
methHnd = (CORINFO_METHOD_HANDLE)lastNode->AsVal()->gtVal1;
compiler->info.compCompHnd->getFunctionEntryPoint(methHnd, &addrInfo);
}
#ifdef TARGET_ARM
// We delay starting the unwind codes until we have an instruction which we know
// needs an unwind code. In particular, for large stack frames in methods without
// localloc, the sequence might look something like this:
// movw r3, 0x38e0
// add sp, r3
// pop {r4,r5,r6,r10,r11,pc}
// In this case, the "movw" should not be part of the unwind codes, since it will
// be a NOP, and it is a waste to start with a NOP. Note that calling unwindBegEpilog()
// also sets the current location as the beginning offset of the epilog, so every
// instruction afterwards needs an unwind code. In the case above, if you call
// unwindBegEpilog() before the "movw", then you must generate a NOP for the "movw".
bool unwindStarted = false;
// Tear down the stack frame
if (compiler->compLocallocUsed)
{
if (!unwindStarted)
{
compiler->unwindBegEpilog();
unwindStarted = true;
}
// mov R9 into SP
inst_Mov(TYP_I_IMPL, REG_SP, REG_SAVED_LOCALLOC_SP, /* canSkip */ false);
compiler->unwindSetFrameReg(REG_SAVED_LOCALLOC_SP, 0);
}
if (jmpEpilog ||
genStackAllocRegisterMask(compiler->compLclFrameSize, regSet.rsGetModifiedRegsMask() & RBM_FLT_CALLEE_SAVED) ==
RBM_NONE)
{
genFreeLclFrame(compiler->compLclFrameSize, &unwindStarted);
}
if (!unwindStarted)
{
// If we haven't generated anything yet, we're certainly going to generate a "pop" next.
compiler->unwindBegEpilog();
unwindStarted = true;
}
if (jmpEpilog && lastNode->gtOper == GT_JMP && addrInfo.accessType == IAT_RELPVALUE)
{
// IAT_RELPVALUE jump at the end is done using relative indirection, so,
// additional helper register is required.
// We use LR just before it is going to be restored from stack, i.e.
//
// movw r12, laddr
// movt r12, haddr
// mov lr, r12
// ldr r12, [r12]
// add r12, r12, lr
// pop {lr}
// ...
// bx r12
regNumber indCallReg = REG_R12;
regNumber vptrReg1 = REG_LR;
instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, indCallReg, (ssize_t)addrInfo.addr);
GetEmitter()->emitIns_Mov(INS_mov, EA_PTRSIZE, vptrReg1, indCallReg, /* canSkip */ false);
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, indCallReg, indCallReg, 0);
GetEmitter()->emitIns_R_R(INS_add, EA_PTRSIZE, indCallReg, vptrReg1);
}
genPopCalleeSavedRegisters(jmpEpilog);
if (regSet.rsMaskPreSpillRegs(true) != RBM_NONE)
{
// We better not have used a pop PC to return otherwise this will be unreachable code
noway_assert(!genUsedPopToReturn);
int preSpillRegArgSize = genCountBits(regSet.rsMaskPreSpillRegs(true)) * REGSIZE_BYTES;
inst_RV_IV(INS_add, REG_SPBASE, preSpillRegArgSize, EA_PTRSIZE);
compiler->unwindAllocStack(preSpillRegArgSize);
}
if (jmpEpilog)
{
// We better not have used a pop PC to return otherwise this will be unreachable code
noway_assert(!genUsedPopToReturn);
}
#else // TARGET_ARM64
compiler->unwindBegEpilog();
genPopCalleeSavedRegistersAndFreeLclFrame(jmpEpilog);
#endif // TARGET_ARM64
if (jmpEpilog)
{
SetHasTailCalls(true);
noway_assert(block->bbJumpKind == BBJ_RETURN);
noway_assert(block->GetFirstLIRNode() != nullptr);
/* figure out what jump we have */
GenTree* jmpNode = lastNode;
#if !FEATURE_FASTTAILCALL
noway_assert(jmpNode->gtOper == GT_JMP);
#else // FEATURE_FASTTAILCALL
// armarch
// If jmpNode is GT_JMP then gtNext must be null.
// If jmpNode is a fast tail call, gtNext need not be null since it could have embedded stmts.
noway_assert((jmpNode->gtOper != GT_JMP) || (jmpNode->gtNext == nullptr));
// Could either be a "jmp method" or "fast tail call" implemented as epilog+jmp
noway_assert((jmpNode->gtOper == GT_JMP) ||
((jmpNode->gtOper == GT_CALL) && jmpNode->AsCall()->IsFastTailCall()));
// The next block is associated with this "if" stmt
if (jmpNode->gtOper == GT_JMP)
#endif // FEATURE_FASTTAILCALL
{
// Simply emit a jump to the methodHnd. This is similar to a call so we can use
// the same descriptor with some minor adjustments.
assert(methHnd != nullptr);
assert(addrInfo.addr != nullptr);
#ifdef TARGET_ARMARCH
emitter::EmitCallType callType;
void* addr;
regNumber indCallReg;
switch (addrInfo.accessType)
{
case IAT_VALUE:
if (validImmForBL((ssize_t)addrInfo.addr))
{
// Simple direct call
callType = emitter::EC_FUNC_TOKEN;
addr = addrInfo.addr;
indCallReg = REG_NA;
break;
}
// otherwise the target address doesn't fit in an immediate
// so we have to burn a register...
FALLTHROUGH;
case IAT_PVALUE:
// Load the address into a register, load indirect and call through a register
// We have to use R12 since we assume the argument registers are in use
callType = emitter::EC_INDIR_R;
indCallReg = REG_INDIRECT_CALL_TARGET_REG;
addr = NULL;
instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, indCallReg, (ssize_t)addrInfo.addr);
if (addrInfo.accessType == IAT_PVALUE)
{
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, indCallReg, indCallReg, 0);
regSet.verifyRegUsed(indCallReg);
}
break;
case IAT_RELPVALUE:
{
// Load the address into a register, load relative indirect and call through a register
// We have to use R12 since we assume the argument registers are in use
// LR is used as helper register right before it is restored from stack, thus,
// all relative address calculations are performed before LR is restored.
callType = emitter::EC_INDIR_R;
indCallReg = REG_R12;
addr = NULL;
regSet.verifyRegUsed(indCallReg);
break;
}
case IAT_PPVALUE:
default:
NO_WAY("Unsupported JMP indirection");
}
/* Simply emit a jump to the methodHnd. This is similar to a call so we can use
* the same descriptor with some minor adjustments.
*/
// clang-format off
GetEmitter()->emitIns_Call(callType,
methHnd,
INDEBUG_LDISASM_COMMA(nullptr)
addr,
0, // argSize
EA_UNKNOWN, // retSize
#if defined(TARGET_ARM64)
EA_UNKNOWN, // secondRetSize
#endif
gcInfo.gcVarPtrSetCur,
gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur,
DebugInfo(),
indCallReg, // ireg
REG_NA, // xreg
0, // xmul
0, // disp
true); // isJump
// clang-format on
CLANG_FORMAT_COMMENT_ANCHOR;
#endif // TARGET_ARMARCH
}
#if FEATURE_FASTTAILCALL
else
{
genCallInstruction(jmpNode->AsCall());
}
#endif // FEATURE_FASTTAILCALL
}
else
{
#ifdef TARGET_ARM
if (!genUsedPopToReturn)
{
// If we did not use a pop to return, then we did a "pop {..., lr}" instead of "pop {..., pc}",
// so we need a "bx lr" instruction to return from the function.
inst_RV(INS_bx, REG_LR, TYP_I_IMPL);
compiler->unwindBranch16();
}
#else // TARGET_ARM64
inst_RV(INS_ret, REG_LR, TYP_I_IMPL);
compiler->unwindReturn(REG_LR);
#endif // TARGET_ARM64
}
compiler->unwindEndEpilog();
}
// return size
// alignmentWB is out param
unsigned CodeGenInterface::InferOpSizeAlign(GenTree* op, unsigned* alignmentWB)
{
unsigned alignment = 0;
unsigned opSize = 0;
if (op->gtType == TYP_STRUCT || op->OperIsCopyBlkOp())
{
opSize = InferStructOpSizeAlign(op, &alignment);
}
else
{
alignment = genTypeAlignments[op->TypeGet()];
opSize = genTypeSizes[op->TypeGet()];
}
assert(opSize != 0);
assert(alignment != 0);
(*alignmentWB) = alignment;
return opSize;
}
// return size
// alignmentWB is out param
unsigned CodeGenInterface::InferStructOpSizeAlign(GenTree* op, unsigned* alignmentWB)
{
unsigned alignment = 0;
unsigned opSize = 0;
while (op->gtOper == GT_COMMA)
{
op = op->AsOp()->gtOp2;
}
if (op->gtOper == GT_OBJ)
{
CORINFO_CLASS_HANDLE clsHnd = op->AsObj()->GetLayout()->GetClassHandle();
opSize = op->AsObj()->GetLayout()->GetSize();
alignment = roundUp(compiler->info.compCompHnd->getClassAlignmentRequirement(clsHnd), TARGET_POINTER_SIZE);
}
else if (op->gtOper == GT_LCL_VAR)
{
const LclVarDsc* varDsc = compiler->lvaGetDesc(op->AsLclVarCommon());
assert(varDsc->lvType == TYP_STRUCT);
opSize = varDsc->lvSize();
#ifndef TARGET_64BIT
if (varDsc->lvStructDoubleAlign)
{
alignment = TARGET_POINTER_SIZE * 2;
}
else
#endif // !TARGET_64BIT
{
alignment = TARGET_POINTER_SIZE;
}
}
else if (op->gtOper == GT_MKREFANY)
{
opSize = TARGET_POINTER_SIZE * 2;
alignment = TARGET_POINTER_SIZE;
}
else if (op->IsArgPlaceHolderNode())
{
CORINFO_CLASS_HANDLE clsHnd = op->AsArgPlace()->gtArgPlaceClsHnd;
assert(clsHnd != 0);
opSize = roundUp(compiler->info.compCompHnd->getClassSize(clsHnd), TARGET_POINTER_SIZE);
alignment = roundUp(compiler->info.compCompHnd->getClassAlignmentRequirement(clsHnd), TARGET_POINTER_SIZE);
}
else
{
assert(!"Unhandled gtOper");
opSize = TARGET_POINTER_SIZE;
alignment = TARGET_POINTER_SIZE;
}
assert(opSize != 0);
assert(alignment != 0);
(*alignmentWB) = alignment;
return opSize;
}
#endif // TARGET_ARMARCH
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX ARM/ARM64 Code Generator Common Code XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#ifdef TARGET_ARMARCH // This file is ONLY used for ARM and ARM64 architectures
#include "codegen.h"
#include "lower.h"
#include "gcinfo.h"
#include "emit.h"
#include "patchpointinfo.h"
//------------------------------------------------------------------------
// genStackPointerConstantAdjustment: add a specified constant value to the stack pointer.
// No probe is done.
//
// Arguments:
// spDelta - the value to add to SP. Must be negative or zero.
// regTmp - an available temporary register that is used if 'spDelta' cannot be encoded by
// 'sub sp, sp, #spDelta' instruction.
// Can be REG_NA if the caller knows for certain that 'spDelta' fits into the immediate
// value range.
//
// Return Value:
// None.
//
void CodeGen::genStackPointerConstantAdjustment(ssize_t spDelta, regNumber regTmp)
{
assert(spDelta < 0);
// We assert that the SP change is less than one page. If it's greater, you should have called a
// function that does a probe, which will in turn call this function.
assert((target_size_t)(-spDelta) <= compiler->eeGetPageSize());
#ifdef TARGET_ARM64
genInstrWithConstant(INS_sub, EA_PTRSIZE, REG_SPBASE, REG_SPBASE, -spDelta, regTmp);
#else
genInstrWithConstant(INS_sub, EA_PTRSIZE, REG_SPBASE, REG_SPBASE, -spDelta, INS_FLAGS_DONT_CARE, regTmp);
#endif
}
//------------------------------------------------------------------------
// genStackPointerConstantAdjustmentWithProbe: add a specified constant value to the stack pointer,
// and probe the stack as appropriate. Should only be called as a helper for
// genStackPointerConstantAdjustmentLoopWithProbe.
//
// Arguments:
// spDelta - the value to add to SP. Must be negative or zero. If zero, the probe happens,
// but the stack pointer doesn't move.
// regTmp - temporary register to use as target for probe load instruction
//
// Return Value:
// None.
//
void CodeGen::genStackPointerConstantAdjustmentWithProbe(ssize_t spDelta, regNumber regTmp)
{
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, regTmp, REG_SP, 0);
genStackPointerConstantAdjustment(spDelta, regTmp);
}
//------------------------------------------------------------------------
// genStackPointerConstantAdjustmentLoopWithProbe: Add a specified constant value to the stack pointer,
// and probe the stack as appropriate. Generates one probe per page, up to the total amount required.
// This will generate a sequence of probes in-line.
//
// Arguments:
// spDelta - the value to add to SP. Must be negative.
// regTmp - temporary register to use as target for probe load instruction
//
// Return Value:
// Offset in bytes from SP to last probed address.
//
target_ssize_t CodeGen::genStackPointerConstantAdjustmentLoopWithProbe(ssize_t spDelta, regNumber regTmp)
{
assert(spDelta < 0);
const target_size_t pageSize = compiler->eeGetPageSize();
ssize_t spRemainingDelta = spDelta;
do
{
ssize_t spOneDelta = -(ssize_t)min((target_size_t)-spRemainingDelta, pageSize);
genStackPointerConstantAdjustmentWithProbe(spOneDelta, regTmp);
spRemainingDelta -= spOneDelta;
} while (spRemainingDelta < 0);
// What offset from the final SP was the last probe? This depends on the fact that
// genStackPointerConstantAdjustmentWithProbe() probes first, then does "SUB SP".
target_size_t lastTouchDelta = (target_size_t)(-spDelta) % pageSize;
if ((lastTouchDelta == 0) || (lastTouchDelta + STACK_PROBE_BOUNDARY_THRESHOLD_BYTES > pageSize))
{
// We haven't probed almost a complete page. If lastTouchDelta==0, then spDelta was an exact
// multiple of pageSize, which means we last probed exactly one page back. Otherwise, we probed
// the page, but very far from the end. If the next action on the stack might subtract from SP
// first, before touching the current SP, then we do one more probe at the very bottom. This can
// happen on x86, for example, when we copy an argument to the stack using a "SUB ESP; REP MOV"
// strategy.
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, regTmp, REG_SP, 0);
lastTouchDelta = 0;
}
return lastTouchDelta;
}
//------------------------------------------------------------------------
// genCodeForTreeNode Generate code for a single node in the tree.
//
// Preconditions:
// All operands have been evaluated.
//
void CodeGen::genCodeForTreeNode(GenTree* treeNode)
{
regNumber targetReg = treeNode->GetRegNum();
var_types targetType = treeNode->TypeGet();
emitter* emit = GetEmitter();
#ifdef DEBUG
// Validate that all the operands for the current node are consumed in order.
// This is important because LSRA ensures that any necessary copies will be
// handled correctly.
lastConsumedNode = nullptr;
if (compiler->verbose)
{
unsigned seqNum = treeNode->gtSeqNum; // Useful for setting a conditional break in Visual Studio
compiler->gtDispLIRNode(treeNode, "Generating: ");
}
#endif // DEBUG
// Is this a node whose value is already in a register? LSRA denotes this by
// setting the GTF_REUSE_REG_VAL flag.
if (treeNode->IsReuseRegVal())
{
// For now, this is only used for constant nodes.
assert((treeNode->OperGet() == GT_CNS_INT) || (treeNode->OperGet() == GT_CNS_DBL));
JITDUMP(" TreeNode is marked ReuseReg\n");
return;
}
// contained nodes are part of their parents for codegen purposes
// ex : immediates, most LEAs
if (treeNode->isContained())
{
return;
}
switch (treeNode->gtOper)
{
case GT_START_NONGC:
GetEmitter()->emitDisableGC();
break;
case GT_START_PREEMPTGC:
// Kill callee saves GC registers, and create a label
// so that information gets propagated to the emitter.
gcInfo.gcMarkRegSetNpt(RBM_INT_CALLEE_SAVED);
genDefineTempLabel(genCreateTempLabel());
break;
case GT_PROF_HOOK:
// We should be seeing this only if profiler hook is needed
noway_assert(compiler->compIsProfilerHookNeeded());
#ifdef PROFILING_SUPPORTED
// Right now this node is used only for tail calls. In future if
// we intend to use it for Enter or Leave hooks, add a data member
// to this node indicating the kind of profiler hook. For example,
// helper number can be used.
genProfilingLeaveCallback(CORINFO_HELP_PROF_FCN_TAILCALL);
#endif // PROFILING_SUPPORTED
break;
case GT_LCLHEAP:
genLclHeap(treeNode);
break;
case GT_CNS_INT:
case GT_CNS_DBL:
genSetRegToConst(targetReg, targetType, treeNode);
genProduceReg(treeNode);
break;
case GT_NOT:
case GT_NEG:
genCodeForNegNot(treeNode);
break;
#if defined(TARGET_ARM64)
case GT_BSWAP:
case GT_BSWAP16:
genCodeForBswap(treeNode);
break;
#endif // defined(TARGET_ARM64)
case GT_MOD:
case GT_UMOD:
case GT_DIV:
case GT_UDIV:
genCodeForDivMod(treeNode->AsOp());
break;
case GT_OR:
case GT_XOR:
case GT_AND:
case GT_AND_NOT:
assert(varTypeIsIntegralOrI(treeNode));
FALLTHROUGH;
#if !defined(TARGET_64BIT)
case GT_ADD_LO:
case GT_ADD_HI:
case GT_SUB_LO:
case GT_SUB_HI:
#endif // !defined(TARGET_64BIT)
case GT_ADD:
case GT_SUB:
case GT_MUL:
genConsumeOperands(treeNode->AsOp());
genCodeForBinary(treeNode->AsOp());
break;
case GT_LSH:
case GT_RSH:
case GT_RSZ:
// case GT_ROL: // No ROL instruction on ARM; it has been lowered to ROR.
case GT_ROR:
genCodeForShift(treeNode);
break;
#if !defined(TARGET_64BIT)
case GT_LSH_HI:
case GT_RSH_LO:
genCodeForShiftLong(treeNode);
break;
#endif // !defined(TARGET_64BIT)
case GT_CAST:
genCodeForCast(treeNode->AsOp());
break;
case GT_BITCAST:
genCodeForBitCast(treeNode->AsOp());
break;
case GT_LCL_FLD_ADDR:
case GT_LCL_VAR_ADDR:
genCodeForLclAddr(treeNode->AsLclVarCommon());
break;
case GT_LCL_FLD:
genCodeForLclFld(treeNode->AsLclFld());
break;
case GT_LCL_VAR:
genCodeForLclVar(treeNode->AsLclVar());
break;
case GT_STORE_LCL_FLD:
genCodeForStoreLclFld(treeNode->AsLclFld());
break;
case GT_STORE_LCL_VAR:
genCodeForStoreLclVar(treeNode->AsLclVar());
break;
case GT_RETFILT:
case GT_RETURN:
genReturn(treeNode);
break;
case GT_LEA:
// If we are here, it is the case where there is an LEA that cannot be folded into a parent instruction.
genLeaInstruction(treeNode->AsAddrMode());
break;
case GT_INDEX_ADDR:
genCodeForIndexAddr(treeNode->AsIndexAddr());
break;
case GT_IND:
genCodeForIndir(treeNode->AsIndir());
break;
case GT_MUL_LONG:
genCodeForMulLong(treeNode->AsOp());
break;
#ifdef TARGET_ARM64
case GT_MADD:
genCodeForMadd(treeNode->AsOp());
break;
case GT_MSUB:
genCodeForMsub(treeNode->AsOp());
break;
case GT_INC_SATURATE:
genCodeForIncSaturate(treeNode);
break;
case GT_MULHI:
genCodeForMulHi(treeNode->AsOp());
break;
case GT_SWAP:
genCodeForSwap(treeNode->AsOp());
break;
case GT_ADDEX:
genCodeForAddEx(treeNode->AsOp());
break;
case GT_BFIZ:
genCodeForBfiz(treeNode->AsOp());
break;
case GT_CSNEG_MI:
genCodeForCond(treeNode->AsOp());
break;
#endif // TARGET_ARM64
case GT_JMP:
genJmpMethod(treeNode);
break;
case GT_CKFINITE:
genCkfinite(treeNode);
break;
case GT_INTRINSIC:
genIntrinsic(treeNode);
break;
#ifdef FEATURE_SIMD
case GT_SIMD:
genSIMDIntrinsic(treeNode->AsSIMD());
break;
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
genHWIntrinsic(treeNode->AsHWIntrinsic());
break;
#endif // FEATURE_HW_INTRINSICS
case GT_EQ:
case GT_NE:
case GT_LT:
case GT_LE:
case GT_GE:
case GT_GT:
case GT_CMP:
#ifdef TARGET_ARM64
case GT_TEST_EQ:
case GT_TEST_NE:
#endif // TARGET_ARM64
genCodeForCompare(treeNode->AsOp());
break;
case GT_JTRUE:
genCodeForJumpTrue(treeNode->AsOp());
break;
#ifdef TARGET_ARM64
case GT_JCMP:
genCodeForJumpCompare(treeNode->AsOp());
break;
#endif // TARGET_ARM64
case GT_JCC:
genCodeForJcc(treeNode->AsCC());
break;
case GT_SETCC:
genCodeForSetcc(treeNode->AsCC());
break;
case GT_RETURNTRAP:
genCodeForReturnTrap(treeNode->AsOp());
break;
case GT_STOREIND:
genCodeForStoreInd(treeNode->AsStoreInd());
break;
case GT_COPY:
// This is handled at the time we call genConsumeReg() on the GT_COPY
break;
case GT_FIELD_LIST:
// Should always be marked contained.
assert(!"LIST, FIELD_LIST nodes should always be marked contained.");
break;
case GT_PUTARG_STK:
genPutArgStk(treeNode->AsPutArgStk());
break;
case GT_PUTARG_REG:
genPutArgReg(treeNode->AsOp());
break;
case GT_PUTARG_SPLIT:
genPutArgSplit(treeNode->AsPutArgSplit());
break;
case GT_CALL:
genCall(treeNode->AsCall());
break;
case GT_MEMORYBARRIER:
{
CodeGen::BarrierKind barrierKind =
treeNode->gtFlags & GTF_MEMORYBARRIER_LOAD ? BARRIER_LOAD_ONLY : BARRIER_FULL;
instGen_MemoryBarrier(barrierKind);
break;
}
#ifdef TARGET_ARM64
case GT_XCHG:
case GT_XORR:
case GT_XAND:
case GT_XADD:
genLockedInstructions(treeNode->AsOp());
break;
case GT_CMPXCHG:
genCodeForCmpXchg(treeNode->AsCmpXchg());
break;
#endif // TARGET_ARM64
case GT_RELOAD:
// do nothing - reload is just a marker.
// The parent node will call genConsumeReg on this which will trigger the unspill of this node's child
// into the register specified in this node.
break;
case GT_NOP:
break;
case GT_KEEPALIVE:
if (treeNode->AsOp()->gtOp1->isContained())
{
// For this case we simply need to update the lifetime of the local.
genUpdateLife(treeNode->AsOp()->gtOp1);
}
else
{
genConsumeReg(treeNode->AsOp()->gtOp1);
}
break;
case GT_NO_OP:
instGen(INS_nop);
break;
case GT_BOUNDS_CHECK:
genRangeCheck(treeNode);
break;
case GT_PHYSREG:
genCodeForPhysReg(treeNode->AsPhysReg());
break;
case GT_NULLCHECK:
genCodeForNullCheck(treeNode->AsIndir());
break;
case GT_CATCH_ARG:
noway_assert(handlerGetsXcptnObj(compiler->compCurBB->bbCatchTyp));
/* Catch arguments get passed in a register. genCodeForBBlist()
would have marked it as holding a GC object, but not used. */
noway_assert(gcInfo.gcRegGCrefSetCur & RBM_EXCEPTION_OBJECT);
genConsumeReg(treeNode);
break;
case GT_PINVOKE_PROLOG:
noway_assert(((gcInfo.gcRegGCrefSetCur | gcInfo.gcRegByrefSetCur) & ~fullIntArgRegMask()) == 0);
#ifdef PSEUDORANDOM_NOP_INSERTION
// the runtime side requires the codegen here to be consistent
emit->emitDisableRandomNops();
#endif // PSEUDORANDOM_NOP_INSERTION
break;
case GT_LABEL:
genPendingCallLabel = genCreateTempLabel();
#if defined(TARGET_ARM)
genMov32RelocatableDisplacement(genPendingCallLabel, targetReg);
#else
emit->emitIns_R_L(INS_adr, EA_PTRSIZE, genPendingCallLabel, targetReg);
#endif
break;
case GT_STORE_OBJ:
case GT_STORE_DYN_BLK:
case GT_STORE_BLK:
genCodeForStoreBlk(treeNode->AsBlk());
break;
case GT_JMPTABLE:
genJumpTable(treeNode);
break;
case GT_SWITCH_TABLE:
genTableBasedSwitch(treeNode);
break;
case GT_ARR_INDEX:
genCodeForArrIndex(treeNode->AsArrIndex());
break;
case GT_ARR_OFFSET:
genCodeForArrOffset(treeNode->AsArrOffs());
break;
#ifdef TARGET_ARM
case GT_CLS_VAR_ADDR:
emit->emitIns_R_C(INS_lea, EA_PTRSIZE, targetReg, treeNode->AsClsVar()->gtClsVarHnd, 0);
genProduceReg(treeNode);
break;
case GT_LONG:
assert(treeNode->isUsedFromReg());
genConsumeRegs(treeNode);
break;
#endif // TARGET_ARM
case GT_IL_OFFSET:
// Do nothing; these nodes are simply markers for debug info.
break;
default:
{
unreached();
}
break;
}
}
//---------------------------------------------------------------------
// genSetGSSecurityCookie: Set the "GS" security cookie in the prolog.
//
// Arguments:
// initReg - register to use as a scratch register
// pInitRegZeroed - OUT parameter. *pInitRegZeroed is set to 'false' if and only if
// this call sets 'initReg' to a non-zero value.
//
// Return Value:
// None
//
void CodeGen::genSetGSSecurityCookie(regNumber initReg, bool* pInitRegZeroed)
{
assert(compiler->compGeneratingProlog);
if (!compiler->getNeedsGSSecurityCookie())
{
return;
}
if (compiler->opts.IsOSR() && compiler->info.compPatchpointInfo->HasSecurityCookie())
{
// Security cookie is on original frame and was initialized there.
return;
}
if (compiler->gsGlobalSecurityCookieAddr == nullptr)
{
noway_assert(compiler->gsGlobalSecurityCookieVal != 0);
// initReg = #GlobalSecurityCookieVal; [frame.GSSecurityCookie] = initReg
instGen_Set_Reg_To_Imm(EA_PTRSIZE, initReg, compiler->gsGlobalSecurityCookieVal);
GetEmitter()->emitIns_S_R(INS_str, EA_PTRSIZE, initReg, compiler->lvaGSSecurityCookie, 0);
}
else
{
instGen_Set_Reg_To_Imm(EA_PTR_DSP_RELOC, initReg, (ssize_t)compiler->gsGlobalSecurityCookieAddr,
INS_FLAGS_DONT_CARE DEBUGARG((size_t)THT_SetGSCookie) DEBUGARG(GTF_EMPTY));
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, initReg, initReg, 0);
regSet.verifyRegUsed(initReg);
GetEmitter()->emitIns_S_R(INS_str, EA_PTRSIZE, initReg, compiler->lvaGSSecurityCookie, 0);
}
*pInitRegZeroed = false;
}
//------------------------------------------------------------------------
// genEmitGSCookieCheck: Generate code to check that the GS cookie
// wasn't thrashed by a buffer overrun.
//
void CodeGen::genEmitGSCookieCheck(bool pushReg)
{
noway_assert(compiler->gsGlobalSecurityCookieAddr || compiler->gsGlobalSecurityCookieVal);
// Make sure that the return register is reported as live GC-ref so that any GC that kicks in while
// executing GS cookie check will not collect the object pointed to by REG_INTRET (R0).
if (!pushReg && (compiler->info.compRetNativeType == TYP_REF))
gcInfo.gcRegGCrefSetCur |= RBM_INTRET;
// We need two temporary registers, to load the GS cookie values and compare them. We can't use
// any argument registers if 'pushReg' is true (meaning we have a JMP call). They should be
// callee-trash registers, which should not contain anything interesting at this point.
// We don't have any IR node representing this check, so LSRA can't communicate registers
// for us to use.
regNumber regGSConst = REG_GSCOOKIE_TMP_0;
regNumber regGSValue = REG_GSCOOKIE_TMP_1;
if (compiler->gsGlobalSecurityCookieAddr == nullptr)
{
// load the GS cookie constant into a reg
//
instGen_Set_Reg_To_Imm(EA_PTRSIZE, regGSConst, compiler->gsGlobalSecurityCookieVal);
}
else
{
// Ngen case - GS cookie constant needs to be accessed through an indirection.
instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, regGSConst, (ssize_t)compiler->gsGlobalSecurityCookieAddr,
INS_FLAGS_DONT_CARE DEBUGARG((size_t)THT_GSCookieCheck) DEBUGARG(GTF_EMPTY));
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, regGSConst, regGSConst, 0);
}
// Load this method's GS value from the stack frame
GetEmitter()->emitIns_R_S(INS_ldr, EA_PTRSIZE, regGSValue, compiler->lvaGSSecurityCookie, 0);
// Compare with the GC cookie constant
GetEmitter()->emitIns_R_R(INS_cmp, EA_PTRSIZE, regGSConst, regGSValue);
BasicBlock* gsCheckBlk = genCreateTempLabel();
inst_JMP(EJ_eq, gsCheckBlk);
// regGSConst and regGSValue aren't needed anymore, we can use them for helper call
genEmitHelperCall(CORINFO_HELP_FAIL_FAST, 0, EA_UNKNOWN, regGSConst);
genDefineTempLabel(gsCheckBlk);
}
//---------------------------------------------------------------------
// genIntrinsic - generate code for a given intrinsic
//
// Arguments
// treeNode - the GT_INTRINSIC node
//
// Return value:
// None
//
void CodeGen::genIntrinsic(GenTree* treeNode)
{
assert(treeNode->OperIs(GT_INTRINSIC));
// Both operand and its result must be of the same floating point type.
GenTree* srcNode = treeNode->AsOp()->gtOp1;
assert(varTypeIsFloating(srcNode));
assert(srcNode->TypeGet() == treeNode->TypeGet());
// Only a subset of functions are treated as math intrinsics.
//
switch (treeNode->AsIntrinsic()->gtIntrinsicName)
{
case NI_System_Math_Abs:
genConsumeOperands(treeNode->AsOp());
GetEmitter()->emitInsBinary(INS_ABS, emitActualTypeSize(treeNode), treeNode, srcNode);
break;
#ifdef TARGET_ARM64
case NI_System_Math_Ceiling:
genConsumeOperands(treeNode->AsOp());
GetEmitter()->emitInsBinary(INS_frintp, emitActualTypeSize(treeNode), treeNode, srcNode);
break;
case NI_System_Math_Floor:
genConsumeOperands(treeNode->AsOp());
GetEmitter()->emitInsBinary(INS_frintm, emitActualTypeSize(treeNode), treeNode, srcNode);
break;
case NI_System_Math_Truncate:
genConsumeOperands(treeNode->AsOp());
GetEmitter()->emitInsBinary(INS_frintz, emitActualTypeSize(treeNode), treeNode, srcNode);
break;
case NI_System_Math_Round:
genConsumeOperands(treeNode->AsOp());
GetEmitter()->emitInsBinary(INS_frintn, emitActualTypeSize(treeNode), treeNode, srcNode);
break;
case NI_System_Math_Max:
genConsumeOperands(treeNode->AsOp());
GetEmitter()->emitIns_R_R_R(INS_fmax, emitActualTypeSize(treeNode), treeNode->GetRegNum(),
treeNode->gtGetOp1()->GetRegNum(), treeNode->gtGetOp2()->GetRegNum());
break;
case NI_System_Math_Min:
genConsumeOperands(treeNode->AsOp());
GetEmitter()->emitIns_R_R_R(INS_fmin, emitActualTypeSize(treeNode), treeNode->GetRegNum(),
treeNode->gtGetOp1()->GetRegNum(), treeNode->gtGetOp2()->GetRegNum());
break;
#endif // TARGET_ARM64
case NI_System_Math_Sqrt:
genConsumeOperands(treeNode->AsOp());
GetEmitter()->emitInsBinary(INS_SQRT, emitActualTypeSize(treeNode), treeNode, srcNode);
break;
default:
assert(!"genIntrinsic: Unsupported intrinsic");
unreached();
}
genProduceReg(treeNode);
}
//---------------------------------------------------------------------
// genPutArgStk - generate code for a GT_PUTARG_STK node
//
// Arguments
// treeNode - the GT_PUTARG_STK node
//
// Return value:
// None
//
void CodeGen::genPutArgStk(GenTreePutArgStk* treeNode)
{
assert(treeNode->OperIs(GT_PUTARG_STK));
GenTree* source = treeNode->gtOp1;
var_types targetType;
if (!compMacOsArm64Abi())
{
targetType = genActualType(source->TypeGet());
}
else
{
targetType = source->TypeGet();
}
emitter* emit = GetEmitter();
// This is the varNum for our store operations,
// typically this is the varNum for the Outgoing arg space
// When we are generating a tail call it will be the varNum for arg0
unsigned varNumOut = (unsigned)-1;
unsigned argOffsetMax = (unsigned)-1; // Records the maximum size of this area for assert checks
// Get argument offset to use with 'varNumOut'
// Here we cross check that argument offset hasn't changed from lowering to codegen since
// we are storing arg slot number in GT_PUTARG_STK node in lowering phase.
unsigned argOffsetOut = treeNode->getArgOffset();
#ifdef DEBUG
fgArgTabEntry* curArgTabEntry = compiler->gtArgEntryByNode(treeNode->gtCall, treeNode);
assert(curArgTabEntry != nullptr);
DEBUG_ARG_SLOTS_ASSERT(argOffsetOut == (curArgTabEntry->slotNum * TARGET_POINTER_SIZE));
#endif // DEBUG
// Whether to setup stk arg in incoming or out-going arg area?
// Fast tail calls implemented as epilog+jmp = stk arg is setup in incoming arg area.
// All other calls - stk arg is setup in out-going arg area.
if (treeNode->putInIncomingArgArea())
{
varNumOut = getFirstArgWithStackSlot();
argOffsetMax = compiler->compArgSize;
#if FEATURE_FASTTAILCALL
// This must be a fast tail call.
assert(treeNode->gtCall->IsFastTailCall());
// Since it is a fast tail call, the existence of first incoming arg is guaranteed
// because fast tail call requires that in-coming arg area of caller is >= out-going
// arg area required for tail call.
LclVarDsc* varDsc = compiler->lvaGetDesc(varNumOut);
assert(varDsc != nullptr);
#endif // FEATURE_FASTTAILCALL
}
else
{
varNumOut = compiler->lvaOutgoingArgSpaceVar;
argOffsetMax = compiler->lvaOutgoingArgSpaceSize;
}
bool isStruct = (targetType == TYP_STRUCT) || (source->OperGet() == GT_FIELD_LIST);
if (!isStruct) // a normal non-Struct argument
{
if (varTypeIsSIMD(targetType))
{
assert(!source->isContained());
regNumber srcReg = genConsumeReg(source);
assert((srcReg != REG_NA) && (genIsValidFloatReg(srcReg)));
assert(compMacOsArm64Abi() || treeNode->GetStackByteSize() % TARGET_POINTER_SIZE == 0);
#ifdef TARGET_ARM64
if (compMacOsArm64Abi() && (treeNode->GetStackByteSize() == 12))
{
regNumber tmpReg = treeNode->GetSingleTempReg();
GetEmitter()->emitStoreSIMD12ToLclOffset(varNumOut, argOffsetOut, srcReg, tmpReg);
argOffsetOut += 12;
}
else
#endif // TARGET_ARM64
{
emitAttr storeAttr = emitTypeSize(targetType);
emit->emitIns_S_R(INS_str, storeAttr, srcReg, varNumOut, argOffsetOut);
argOffsetOut += EA_SIZE_IN_BYTES(storeAttr);
}
assert(argOffsetOut <= argOffsetMax); // We can't write beyond the outgoing arg area
return;
}
if (compMacOsArm64Abi())
{
switch (treeNode->GetStackByteSize())
{
case 1:
targetType = TYP_BYTE;
break;
case 2:
targetType = TYP_SHORT;
break;
default:
assert(treeNode->GetStackByteSize() >= 4);
break;
}
}
instruction storeIns = ins_Store(targetType);
emitAttr storeAttr = emitTypeSize(targetType);
// If it is contained then source must be the integer constant zero
if (source->isContained())
{
#ifdef TARGET_ARM64
assert(source->OperGet() == GT_CNS_INT);
assert(source->AsIntConCommon()->IconValue() == 0);
emit->emitIns_S_R(storeIns, storeAttr, REG_ZR, varNumOut, argOffsetOut);
#else // !TARGET_ARM64
// There is no zero register on ARM32
unreached();
#endif // !TARGET_ARM64
}
else
{
genConsumeReg(source);
emit->emitIns_S_R(storeIns, storeAttr, source->GetRegNum(), varNumOut, argOffsetOut);
#ifdef TARGET_ARM
if (targetType == TYP_LONG)
{
// This case currently only occurs for double types that are passed as TYP_LONG;
// actual long types would have been decomposed by now.
assert(source->IsCopyOrReload());
regNumber otherReg = (regNumber)source->AsCopyOrReload()->GetRegNumByIdx(1);
assert(otherReg != REG_NA);
argOffsetOut += EA_4BYTE;
emit->emitIns_S_R(storeIns, storeAttr, otherReg, varNumOut, argOffsetOut);
}
#endif // TARGET_ARM
}
argOffsetOut += EA_SIZE_IN_BYTES(storeAttr);
assert(argOffsetOut <= argOffsetMax); // We can't write beyond the outgoing arg area
}
else // We have some kind of a struct argument
{
assert(source->isContained()); // We expect that this node was marked as contained in Lower
if (source->OperGet() == GT_FIELD_LIST)
{
genPutArgStkFieldList(treeNode, varNumOut);
}
else // We must have a GT_OBJ or a GT_LCL_VAR
{
noway_assert(source->OperIs(GT_LCL_VAR, GT_OBJ));
var_types targetType = source->TypeGet();
noway_assert(varTypeIsStruct(targetType));
// We will copy this struct to the stack, possibly using a ldp/ldr instruction
// in ARM64/ARM
// Setup loReg (and hiReg) from the internal registers that we reserved in lower.
//
regNumber loReg = treeNode->ExtractTempReg();
#ifdef TARGET_ARM64
regNumber hiReg = treeNode->GetSingleTempReg();
#endif // TARGET_ARM64
regNumber addrReg = REG_NA;
GenTreeLclVarCommon* varNode = nullptr;
GenTree* addrNode = nullptr;
if (source->OperGet() == GT_LCL_VAR)
{
varNode = source->AsLclVarCommon();
}
else // we must have a GT_OBJ
{
assert(source->OperGet() == GT_OBJ);
addrNode = source->AsOp()->gtOp1;
// addrNode can either be a GT_LCL_VAR_ADDR or an address expression
//
if (addrNode->OperGet() == GT_LCL_VAR_ADDR)
{
// We have a GT_OBJ(GT_LCL_VAR_ADDR)
//
// We will treat this case the same as above
// (i.e if we just had this GT_LCL_VAR directly as the source)
// so update 'source' to point this GT_LCL_VAR_ADDR node
// and continue to the codegen for the LCL_VAR node below
//
assert(addrNode->isContained());
varNode = addrNode->AsLclVarCommon();
addrNode = nullptr;
}
else // addrNode is used
{
// TODO-Cleanup: `Lowering::NewPutArg` marks only `LCL_VAR_ADDR` as contained nowadays,
// but we use `genConsumeAddress` as a precaution, use `genConsumeReg()` instead.
assert(!addrNode->isContained());
// Generate code to load the address that we need into a register
genConsumeAddress(addrNode);
addrReg = addrNode->GetRegNum();
#ifdef TARGET_ARM64
// If addrReg equal to loReg, swap(loReg, hiReg)
// This reduces code complexity by only supporting one addrReg overwrite case
if (loReg == addrReg)
{
loReg = hiReg;
hiReg = addrReg;
}
#endif // TARGET_ARM64
}
}
// Either varNode or addrNOde must have been setup above,
// the xor ensures that only one of the two is setup, not both
assert((varNode != nullptr) ^ (addrNode != nullptr));
ClassLayout* layout;
unsigned srcSize;
bool isHfa;
// Setup the srcSize, isHFa, and gcPtrCount
if (source->OperGet() == GT_LCL_VAR)
{
assert(varNode != nullptr);
LclVarDsc* varDsc = compiler->lvaGetDesc(varNode);
// This struct also must live in the stack frame
// And it can't live in a register (SIMD)
assert(varDsc->lvType == TYP_STRUCT);
assert(varDsc->lvOnFrame && !varDsc->lvRegister);
srcSize = varDsc->lvSize();
isHfa = varDsc->lvIsHfa();
layout = varDsc->GetLayout();
}
else // we must have a GT_OBJ
{
assert(source->OperGet() == GT_OBJ);
// If the source is an OBJ node then we need to use the type information
// it provides (size and GC layout) even if the node wraps a lclvar. Due
// to struct reinterpretation (e.g. Unsafe.As<X, Y>) it is possible that
// the OBJ node has a different type than the lclvar.
layout = source->AsObj()->GetLayout();
srcSize = layout->GetSize();
isHfa = compiler->IsHfa(layout->GetClassHandle());
}
// If we have an HFA we can't have any GC pointers,
// if not then the max size for the the struct is 16 bytes
if (isHfa)
{
noway_assert(!layout->HasGCPtr());
}
#ifdef TARGET_ARM64
else
{
noway_assert(srcSize <= 2 * TARGET_POINTER_SIZE);
}
noway_assert(srcSize <= MAX_PASS_MULTIREG_BYTES);
#endif // TARGET_ARM64
unsigned structSize;
unsigned dstSize = treeNode->GetStackByteSize();
if (dstSize != srcSize)
{
// We can generate a smaller code if store size is a multiple of TARGET_POINTER_SIZE.
// The dst size can be rounded up to PUTARG_STK size.
// The src size can be rounded up if it reads a local variable slot because the local
// variable stack allocation size is rounded up to be a multiple of the TARGET_POINTER_SIZE.
// The exception is arm64 apple arguments because they can be passed without padding.
if (varNode != nullptr)
{
// If we have a varNode, even if it was casted using `OBJ`, we can read its original memory size.
const LclVarDsc* varDsc = compiler->lvaGetDesc(varNode);
const unsigned varStackSize = varDsc->lvSize();
if (varStackSize >= srcSize)
{
srcSize = varStackSize;
}
}
}
if (dstSize == srcSize)
{
structSize = dstSize;
}
else
{
// With Unsafe object cast we can have different strange combinations:
// PutArgStk<8>(Obj<16>(LclVar<8>)) -> copy 8 bytes;
// PutArgStk<16>(Obj<16>(LclVar<8>)) -> copy 16 bytes, reading undefined memory after the local.
structSize = min(dstSize, srcSize);
}
int remainingSize = structSize;
unsigned structOffset = 0;
unsigned nextIndex = 0;
#ifdef TARGET_ARM64
// For a >= 16-byte structSize we will generate a ldp and stp instruction each loop
// ldp x2, x3, [x0]
// stp x2, x3, [sp, #16]
while (remainingSize >= 2 * TARGET_POINTER_SIZE)
{
var_types type0 = layout->GetGCPtrType(nextIndex + 0);
var_types type1 = layout->GetGCPtrType(nextIndex + 1);
if (varNode != nullptr)
{
// Load from our varNumImp source
emit->emitIns_R_R_S_S(INS_ldp, emitTypeSize(type0), emitTypeSize(type1), loReg, hiReg,
varNode->GetLclNum(), structOffset);
}
else
{
// check for case of destroying the addrRegister while we still need it
assert(loReg != addrReg);
noway_assert((remainingSize == 2 * TARGET_POINTER_SIZE) || (hiReg != addrReg));
// Load from our address expression source
emit->emitIns_R_R_R_I(INS_ldp, emitTypeSize(type0), loReg, hiReg, addrReg, structOffset,
INS_OPTS_NONE, emitTypeSize(type0));
}
// Emit stp instruction to store the two registers into the outgoing argument area
emit->emitIns_S_S_R_R(INS_stp, emitTypeSize(type0), emitTypeSize(type1), loReg, hiReg, varNumOut,
argOffsetOut);
argOffsetOut += (2 * TARGET_POINTER_SIZE); // We stored 16-bytes of the struct
assert(argOffsetOut <= argOffsetMax); // We can't write beyond the outgoing arg area
remainingSize -= (2 * TARGET_POINTER_SIZE); // We loaded 16-bytes of the struct
structOffset += (2 * TARGET_POINTER_SIZE);
nextIndex += 2;
}
#else // TARGET_ARM
// For a >= 4 byte structSize we will generate a ldr and str instruction each loop
// ldr r2, [r0]
// str r2, [sp, #16]
while (remainingSize >= TARGET_POINTER_SIZE)
{
var_types type = layout->GetGCPtrType(nextIndex);
if (varNode != nullptr)
{
// Load from our varNumImp source
emit->emitIns_R_S(INS_ldr, emitTypeSize(type), loReg, varNode->GetLclNum(), structOffset);
}
else
{
// check for case of destroying the addrRegister while we still need it
assert(loReg != addrReg || remainingSize == TARGET_POINTER_SIZE);
// Load from our address expression source
emit->emitIns_R_R_I(INS_ldr, emitTypeSize(type), loReg, addrReg, structOffset);
}
// Emit str instruction to store the register into the outgoing argument area
emit->emitIns_S_R(INS_str, emitTypeSize(type), loReg, varNumOut, argOffsetOut);
argOffsetOut += TARGET_POINTER_SIZE; // We stored 4-bytes of the struct
assert(argOffsetOut <= argOffsetMax); // We can't write beyond the outgoing arg area
remainingSize -= TARGET_POINTER_SIZE; // We loaded 4-bytes of the struct
structOffset += TARGET_POINTER_SIZE;
nextIndex += 1;
}
#endif // TARGET_ARM
// For a 12-byte structSize we will generate two load instructions
// ldr x2, [x0]
// ldr w3, [x0, #8]
// str x2, [sp, #16]
// str w3, [sp, #24]
while (remainingSize > 0)
{
var_types type;
if (remainingSize >= TARGET_POINTER_SIZE)
{
type = layout->GetGCPtrType(nextIndex);
}
else // (remainingSize < TARGET_POINTER_SIZE)
{
// the left over size is smaller than a pointer and thus can never be a GC type
assert(!layout->IsGCPtr(nextIndex));
if (remainingSize == 1)
{
type = TYP_UBYTE;
}
else if (remainingSize == 2)
{
type = TYP_USHORT;
}
else
{
assert(remainingSize == 4);
type = TYP_UINT;
}
}
const emitAttr attr = emitTypeSize(type);
const unsigned moveSize = genTypeSize(type);
assert(EA_SIZE_IN_BYTES(attr) == moveSize);
remainingSize -= moveSize;
instruction loadIns = ins_Load(type);
if (varNode != nullptr)
{
// Load from our varNumImp source
emit->emitIns_R_S(loadIns, attr, loReg, varNode->GetLclNum(), structOffset);
}
else
{
assert(loReg != addrReg);
// Load from our address expression source
emit->emitIns_R_R_I(loadIns, attr, loReg, addrReg, structOffset);
}
// Emit a store instruction to store the register into the outgoing argument area
instruction storeIns = ins_Store(type);
emit->emitIns_S_R(storeIns, attr, loReg, varNumOut, argOffsetOut);
argOffsetOut += moveSize;
assert(argOffsetOut <= argOffsetMax); // We can't write beyond the outgoing arg area
structOffset += moveSize;
nextIndex++;
}
}
}
}
//---------------------------------------------------------------------
// genPutArgReg - generate code for a GT_PUTARG_REG node
//
// Arguments
// tree - the GT_PUTARG_REG node
//
// Return value:
// None
//
void CodeGen::genPutArgReg(GenTreeOp* tree)
{
assert(tree->OperIs(GT_PUTARG_REG));
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->GetRegNum();
assert(targetType != TYP_STRUCT);
GenTree* op1 = tree->gtOp1;
genConsumeReg(op1);
// If child node is not already in the register we need, move it
inst_Mov(targetType, targetReg, op1->GetRegNum(), /* canSkip */ true);
genProduceReg(tree);
}
//---------------------------------------------------------------------
// genPutArgSplit - generate code for a GT_PUTARG_SPLIT node
//
// Arguments
// tree - the GT_PUTARG_SPLIT node
//
// Return value:
// None
//
void CodeGen::genPutArgSplit(GenTreePutArgSplit* treeNode)
{
assert(treeNode->OperIs(GT_PUTARG_SPLIT));
GenTree* source = treeNode->gtOp1;
emitter* emit = GetEmitter();
unsigned varNumOut = compiler->lvaOutgoingArgSpaceVar;
unsigned argOffsetMax = compiler->lvaOutgoingArgSpaceSize;
if (source->OperGet() == GT_FIELD_LIST)
{
// Evaluate each of the GT_FIELD_LIST items into their register
// and store their register into the outgoing argument area
unsigned regIndex = 0;
unsigned firstOnStackOffs = UINT_MAX;
for (GenTreeFieldList::Use& use : source->AsFieldList()->Uses())
{
GenTree* nextArgNode = use.GetNode();
regNumber fieldReg = nextArgNode->GetRegNum();
genConsumeReg(nextArgNode);
if (regIndex >= treeNode->gtNumRegs)
{
if (firstOnStackOffs == UINT_MAX)
{
firstOnStackOffs = use.GetOffset();
}
var_types type = nextArgNode->TypeGet();
emitAttr attr = emitTypeSize(type);
unsigned offset = treeNode->getArgOffset() + use.GetOffset() - firstOnStackOffs;
// We can't write beyond the outgoing arg area
assert(offset + EA_SIZE_IN_BYTES(attr) <= argOffsetMax);
// Emit store instructions to store the registers produced by the GT_FIELD_LIST into the outgoing
// argument area
emit->emitIns_S_R(ins_Store(type), attr, fieldReg, varNumOut, offset);
}
else
{
var_types type = treeNode->GetRegType(regIndex);
regNumber argReg = treeNode->GetRegNumByIdx(regIndex);
#ifdef TARGET_ARM
if (type == TYP_LONG)
{
// We should only see long fields for DOUBLEs passed in 2 integer registers, via bitcast.
// All other LONGs should have been decomposed.
// Handle the first INT, and then handle the 2nd below.
assert(nextArgNode->OperIs(GT_BITCAST));
type = TYP_INT;
inst_Mov(type, argReg, fieldReg, /* canSkip */ true);
// Now set up the next register for the 2nd INT
argReg = REG_NEXT(argReg);
regIndex++;
assert(argReg == treeNode->GetRegNumByIdx(regIndex));
fieldReg = nextArgNode->AsMultiRegOp()->GetRegNumByIdx(1);
}
#endif // TARGET_ARM
// If child node is not already in the register we need, move it
inst_Mov(type, argReg, fieldReg, /* canSkip */ true);
regIndex++;
}
}
}
else
{
var_types targetType = source->TypeGet();
assert(source->OperGet() == GT_OBJ);
assert(varTypeIsStruct(targetType));
regNumber baseReg = treeNode->ExtractTempReg();
regNumber addrReg = REG_NA;
GenTreeLclVarCommon* varNode = nullptr;
GenTree* addrNode = nullptr;
addrNode = source->AsOp()->gtOp1;
// addrNode can either be a GT_LCL_VAR_ADDR or an address expression
//
if (addrNode->OperGet() == GT_LCL_VAR_ADDR)
{
// We have a GT_OBJ(GT_LCL_VAR_ADDR)
//
// We will treat this case the same as above
// (i.e if we just had this GT_LCL_VAR directly as the source)
// so update 'source' to point this GT_LCL_VAR_ADDR node
// and continue to the codegen for the LCL_VAR node below
//
varNode = addrNode->AsLclVarCommon();
addrNode = nullptr;
}
// Either varNode or addrNOde must have been setup above,
// the xor ensures that only one of the two is setup, not both
assert((varNode != nullptr) ^ (addrNode != nullptr));
// This is the varNum for our load operations,
// only used when we have a struct with a LclVar source
unsigned srcVarNum = BAD_VAR_NUM;
if (varNode != nullptr)
{
assert(varNode->isContained());
srcVarNum = varNode->GetLclNum();
// handle promote situation
LclVarDsc* varDsc = compiler->lvaGetDesc(srcVarNum);
// This struct also must live in the stack frame
// And it can't live in a register (SIMD)
assert(varDsc->lvType == TYP_STRUCT);
assert(varDsc->lvOnFrame && !varDsc->lvRegister);
// We don't split HFA struct
assert(!varDsc->lvIsHfa());
}
else // addrNode is used
{
assert(addrNode != nullptr);
// TODO-Cleanup: `Lowering::NewPutArg` marks only `LCL_VAR_ADDR` as contained nowadays,
// but we use `genConsumeAddress` as a precaution, use `genConsumeReg()` instead.
assert(!addrNode->isContained());
// Generate code to load the address that we need into a register
genConsumeAddress(addrNode);
addrReg = addrNode->GetRegNum();
// If addrReg equal to baseReg, we use the last target register as alternative baseReg.
// Because the candidate mask for the internal baseReg does not include any of the target register,
// we can ensure that baseReg, addrReg, and the last target register are not all same.
assert(baseReg != addrReg);
// We don't split HFA struct
assert(!compiler->IsHfa(source->AsObj()->GetLayout()->GetClassHandle()));
}
ClassLayout* layout = source->AsObj()->GetLayout();
// Put on stack first
unsigned nextIndex = treeNode->gtNumRegs;
unsigned structOffset = nextIndex * TARGET_POINTER_SIZE;
int remainingSize = treeNode->GetStackByteSize();
unsigned argOffsetOut = treeNode->getArgOffset();
// remainingSize is always multiple of TARGET_POINTER_SIZE
assert(remainingSize % TARGET_POINTER_SIZE == 0);
while (remainingSize > 0)
{
var_types type = layout->GetGCPtrType(nextIndex);
if (varNode != nullptr)
{
// Load from our varNumImp source
emit->emitIns_R_S(INS_ldr, emitTypeSize(type), baseReg, srcVarNum, structOffset);
}
else
{
// check for case of destroying the addrRegister while we still need it
assert(baseReg != addrReg);
// Load from our address expression source
emit->emitIns_R_R_I(INS_ldr, emitTypeSize(type), baseReg, addrReg, structOffset);
}
// Emit str instruction to store the register into the outgoing argument area
emit->emitIns_S_R(INS_str, emitTypeSize(type), baseReg, varNumOut, argOffsetOut);
argOffsetOut += TARGET_POINTER_SIZE; // We stored 4-bytes of the struct
assert(argOffsetOut <= argOffsetMax); // We can't write beyond the outgoing arg area
remainingSize -= TARGET_POINTER_SIZE; // We loaded 4-bytes of the struct
structOffset += TARGET_POINTER_SIZE;
nextIndex += 1;
}
// We set up the registers in order, so that we assign the last target register `baseReg` is no longer in use,
// in case we had to reuse the last target register for it.
structOffset = 0;
for (unsigned idx = 0; idx < treeNode->gtNumRegs; idx++)
{
regNumber targetReg = treeNode->GetRegNumByIdx(idx);
var_types type = treeNode->GetRegType(idx);
if (varNode != nullptr)
{
// Load from our varNumImp source
emit->emitIns_R_S(INS_ldr, emitTypeSize(type), targetReg, srcVarNum, structOffset);
}
else
{
// check for case of destroying the addrRegister while we still need it
if (targetReg == addrReg && idx != treeNode->gtNumRegs - 1)
{
assert(targetReg != baseReg);
var_types addrType = addrNode->TypeGet();
emit->emitIns_Mov(INS_mov, emitActualTypeSize(addrType), baseReg, addrReg, /* canSkip */ false);
addrReg = baseReg;
}
// Load from our address expression source
emit->emitIns_R_R_I(INS_ldr, emitTypeSize(type), targetReg, addrReg, structOffset);
}
structOffset += TARGET_POINTER_SIZE;
}
}
genProduceReg(treeNode);
}
#ifdef FEATURE_SIMD
//----------------------------------------------------------------------------------
// genMultiRegStoreToSIMDLocal: store multi-reg value to a single-reg SIMD local
//
// Arguments:
// lclNode - GentreeLclVar of GT_STORE_LCL_VAR
//
// Return Value:
// None
//
void CodeGen::genMultiRegStoreToSIMDLocal(GenTreeLclVar* lclNode)
{
regNumber dst = lclNode->GetRegNum();
GenTree* op1 = lclNode->gtGetOp1();
GenTree* actualOp1 = op1->gtSkipReloadOrCopy();
unsigned regCount = actualOp1->GetMultiRegCount(compiler);
assert(op1->IsMultiRegNode());
genConsumeRegs(op1);
// Treat dst register as a homogenous vector with element size equal to the src size
// Insert pieces in reverse order
for (int i = regCount - 1; i >= 0; --i)
{
var_types type = op1->gtSkipReloadOrCopy()->GetRegTypeByIndex(i);
regNumber reg = actualOp1->GetRegByIndex(i);
if (op1->IsCopyOrReload())
{
// GT_COPY/GT_RELOAD will have valid reg for those positions
// that need to be copied or reloaded.
regNumber reloadReg = op1->AsCopyOrReload()->GetRegNumByIdx(i);
if (reloadReg != REG_NA)
{
reg = reloadReg;
}
}
assert(reg != REG_NA);
if (varTypeIsFloating(type))
{
// If the register piece was passed in a floating point register
// Use a vector mov element instruction
// src is not a vector, so it is in the first element reg[0]
// mov dst[i], reg[0]
// This effectively moves from `reg[0]` to `dst[i]`, leaving other dst bits unchanged till further
// iterations
// For the case where reg == dst, if we iterate so that we write dst[0] last, we eliminate the need for
// a temporary
GetEmitter()->emitIns_R_R_I_I(INS_mov, emitTypeSize(type), dst, reg, i, 0);
}
else
{
// If the register piece was passed in an integer register
// Use a vector mov from general purpose register instruction
// mov dst[i], reg
// This effectively moves from `reg` to `dst[i]`
GetEmitter()->emitIns_R_R_I(INS_mov, emitTypeSize(type), dst, reg, i);
}
}
genProduceReg(lclNode);
}
#endif // FEATURE_SIMD
//------------------------------------------------------------------------
// genRangeCheck: generate code for GT_BOUNDS_CHECK node.
//
void CodeGen::genRangeCheck(GenTree* oper)
{
noway_assert(oper->OperIs(GT_BOUNDS_CHECK));
GenTreeBoundsChk* bndsChk = oper->AsBoundsChk();
GenTree* arrLen = bndsChk->GetArrayLength();
GenTree* arrIndex = bndsChk->GetIndex();
GenTree* arrRef = nullptr;
int lenOffset = 0;
GenTree* src1;
GenTree* src2;
emitJumpKind jmpKind;
genConsumeRegs(arrIndex);
genConsumeRegs(arrLen);
if (arrIndex->isContainedIntOrIImmed())
{
// To encode using a cmp immediate, we place the
// constant operand in the second position
src1 = arrLen;
src2 = arrIndex;
jmpKind = EJ_ls;
}
else
{
src1 = arrIndex;
src2 = arrLen;
jmpKind = EJ_hs;
}
var_types bndsChkType = genActualType(src2->TypeGet());
#if DEBUG
// Bounds checks can only be 32 or 64 bit sized comparisons.
assert(bndsChkType == TYP_INT || bndsChkType == TYP_LONG);
// The type of the bounds check should always wide enough to compare against the index.
assert(emitTypeSize(bndsChkType) >= emitActualTypeSize(src1->TypeGet()));
#endif // DEBUG
GetEmitter()->emitInsBinary(INS_cmp, emitActualTypeSize(bndsChkType), src1, src2);
genJumpToThrowHlpBlk(jmpKind, bndsChk->gtThrowKind, bndsChk->gtIndRngFailBB);
}
//---------------------------------------------------------------------
// genCodeForPhysReg - generate code for a GT_PHYSREG node
//
// Arguments
// tree - the GT_PHYSREG node
//
// Return value:
// None
//
void CodeGen::genCodeForPhysReg(GenTreePhysReg* tree)
{
assert(tree->OperIs(GT_PHYSREG));
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->GetRegNum();
inst_Mov(targetType, targetReg, tree->gtSrcReg, /* canSkip */ true);
genTransferRegGCState(targetReg, tree->gtSrcReg);
genProduceReg(tree);
}
//---------------------------------------------------------------------
// genCodeForNullCheck - generate code for a GT_NULLCHECK node
//
// Arguments
// tree - the GT_NULLCHECK node
//
// Return value:
// None
//
void CodeGen::genCodeForNullCheck(GenTreeIndir* tree)
{
#ifdef TARGET_ARM
assert(!"GT_NULLCHECK isn't supported for Arm32; use GT_IND.");
#else
assert(tree->OperIs(GT_NULLCHECK));
GenTree* op1 = tree->gtOp1;
genConsumeRegs(op1);
regNumber targetReg = REG_ZR;
GetEmitter()->emitInsLoadStoreOp(ins_Load(tree->TypeGet()), emitActualTypeSize(tree), targetReg, tree);
#endif
}
//------------------------------------------------------------------------
// genCodeForArrIndex: Generates code to bounds check the index for one dimension of an array reference,
// producing the effective index by subtracting the lower bound.
//
// Arguments:
// arrIndex - the node for which we're generating code
//
// Return Value:
// None.
//
void CodeGen::genCodeForArrIndex(GenTreeArrIndex* arrIndex)
{
emitter* emit = GetEmitter();
GenTree* arrObj = arrIndex->ArrObj();
GenTree* indexNode = arrIndex->IndexExpr();
regNumber arrReg = genConsumeReg(arrObj);
regNumber indexReg = genConsumeReg(indexNode);
regNumber tgtReg = arrIndex->GetRegNum();
noway_assert(tgtReg != REG_NA);
// We will use a temp register to load the lower bound and dimension size values.
regNumber tmpReg = arrIndex->GetSingleTempReg();
assert(tgtReg != tmpReg);
unsigned dim = arrIndex->gtCurrDim;
unsigned rank = arrIndex->gtArrRank;
var_types elemType = arrIndex->gtArrElemType;
unsigned offset;
offset = compiler->eeGetMDArrayLowerBoundOffset(rank, dim);
emit->emitIns_R_R_I(INS_ldr, EA_4BYTE, tmpReg, arrReg, offset);
emit->emitIns_R_R_R(INS_sub, EA_4BYTE, tgtReg, indexReg, tmpReg);
offset = compiler->eeGetMDArrayLengthOffset(rank, dim);
emit->emitIns_R_R_I(INS_ldr, EA_4BYTE, tmpReg, arrReg, offset);
emit->emitIns_R_R(INS_cmp, EA_4BYTE, tgtReg, tmpReg);
genJumpToThrowHlpBlk(EJ_hs, SCK_RNGCHK_FAIL);
genProduceReg(arrIndex);
}
//------------------------------------------------------------------------
// genCodeForArrOffset: Generates code to compute the flattened array offset for
// one dimension of an array reference:
// result = (prevDimOffset * dimSize) + effectiveIndex
// where dimSize is obtained from the arrObj operand
//
// Arguments:
// arrOffset - the node for which we're generating code
//
// Return Value:
// None.
//
// Notes:
// dimSize and effectiveIndex are always non-negative, the former by design,
// and the latter because it has been normalized to be zero-based.
void CodeGen::genCodeForArrOffset(GenTreeArrOffs* arrOffset)
{
GenTree* offsetNode = arrOffset->gtOffset;
GenTree* indexNode = arrOffset->gtIndex;
regNumber tgtReg = arrOffset->GetRegNum();
noway_assert(tgtReg != REG_NA);
if (!offsetNode->IsIntegralConst(0))
{
emitter* emit = GetEmitter();
regNumber offsetReg = genConsumeReg(offsetNode);
regNumber indexReg = genConsumeReg(indexNode);
regNumber arrReg = genConsumeReg(arrOffset->gtArrObj);
noway_assert(offsetReg != REG_NA);
noway_assert(indexReg != REG_NA);
noway_assert(arrReg != REG_NA);
regNumber tmpReg = arrOffset->GetSingleTempReg();
unsigned dim = arrOffset->gtCurrDim;
unsigned rank = arrOffset->gtArrRank;
var_types elemType = arrOffset->gtArrElemType;
unsigned offset = compiler->eeGetMDArrayLengthOffset(rank, dim);
// Load tmpReg with the dimension size and evaluate
// tgtReg = offsetReg*tmpReg + indexReg.
emit->emitIns_R_R_I(INS_ldr, EA_4BYTE, tmpReg, arrReg, offset);
emit->emitIns_R_R_R_R(INS_MULADD, EA_PTRSIZE, tgtReg, tmpReg, offsetReg, indexReg);
}
else
{
regNumber indexReg = genConsumeReg(indexNode);
inst_Mov(TYP_INT, tgtReg, indexReg, /* canSkip */ true);
}
genProduceReg(arrOffset);
}
//------------------------------------------------------------------------
// genCodeForShift: Generates the code sequence for a GenTree node that
// represents a bit shift or rotate operation (<<, >>, >>>, rol, ror).
//
// Arguments:
// tree - the bit shift node (that specifies the type of bit shift to perform).
//
// Assumptions:
// a) All GenTrees are register allocated.
//
void CodeGen::genCodeForShift(GenTree* tree)
{
var_types targetType = tree->TypeGet();
genTreeOps oper = tree->OperGet();
instruction ins = genGetInsForOper(oper, targetType);
emitAttr size = emitActualTypeSize(tree);
regNumber dstReg = tree->GetRegNum();
assert(dstReg != REG_NA);
genConsumeOperands(tree->AsOp());
GenTree* operand = tree->gtGetOp1();
GenTree* shiftBy = tree->gtGetOp2();
if (!shiftBy->IsCnsIntOrI())
{
GetEmitter()->emitIns_R_R_R(ins, size, dstReg, operand->GetRegNum(), shiftBy->GetRegNum());
}
else
{
unsigned immWidth = emitter::getBitWidth(size); // For ARM64, immWidth will be set to 32 or 64
unsigned shiftByImm = (unsigned)shiftBy->AsIntCon()->gtIconVal & (immWidth - 1);
GetEmitter()->emitIns_R_R_I(ins, size, dstReg, operand->GetRegNum(), shiftByImm);
}
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genCodeForLclAddr: Generates the code for GT_LCL_FLD_ADDR/GT_LCL_VAR_ADDR.
//
// Arguments:
// lclAddrNode - the node.
//
void CodeGen::genCodeForLclAddr(GenTreeLclVarCommon* lclAddrNode)
{
assert(lclAddrNode->OperIs(GT_LCL_FLD_ADDR, GT_LCL_VAR_ADDR));
var_types targetType = lclAddrNode->TypeGet();
emitAttr size = emitTypeSize(targetType);
regNumber targetReg = lclAddrNode->GetRegNum();
// Address of a local var.
noway_assert((targetType == TYP_BYREF) || (targetType == TYP_I_IMPL));
GetEmitter()->emitIns_R_S(INS_lea, size, targetReg, lclAddrNode->GetLclNum(), lclAddrNode->GetLclOffs());
genProduceReg(lclAddrNode);
}
//------------------------------------------------------------------------
// genCodeForLclFld: Produce code for a GT_LCL_FLD node.
//
// Arguments:
// tree - the GT_LCL_FLD node
//
void CodeGen::genCodeForLclFld(GenTreeLclFld* tree)
{
assert(tree->OperIs(GT_LCL_FLD));
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->GetRegNum();
emitter* emit = GetEmitter();
NYI_IF(targetType == TYP_STRUCT, "GT_LCL_FLD: struct load local field not supported");
assert(targetReg != REG_NA);
unsigned offs = tree->GetLclOffs();
unsigned varNum = tree->GetLclNum();
assert(varNum < compiler->lvaCount);
#ifdef TARGET_ARM
if (tree->IsOffsetMisaligned())
{
// Arm supports unaligned access only for integer types,
// load the floating data as 1 or 2 integer registers and convert them to float.
regNumber addr = tree->ExtractTempReg();
emit->emitIns_R_S(INS_lea, EA_PTRSIZE, addr, varNum, offs);
if (targetType == TYP_FLOAT)
{
regNumber floatAsInt = tree->GetSingleTempReg();
emit->emitIns_R_R(INS_ldr, EA_4BYTE, floatAsInt, addr);
emit->emitIns_Mov(INS_vmov_i2f, EA_4BYTE, targetReg, floatAsInt, /* canSkip */ false);
}
else
{
regNumber halfdoubleAsInt1 = tree->ExtractTempReg();
regNumber halfdoubleAsInt2 = tree->GetSingleTempReg();
emit->emitIns_R_R_I(INS_ldr, EA_4BYTE, halfdoubleAsInt1, addr, 0);
emit->emitIns_R_R_I(INS_ldr, EA_4BYTE, halfdoubleAsInt2, addr, 4);
emit->emitIns_R_R_R(INS_vmov_i2d, EA_8BYTE, targetReg, halfdoubleAsInt1, halfdoubleAsInt2);
}
}
else
#endif // TARGET_ARM
{
emitAttr attr = emitActualTypeSize(targetType);
instruction ins = ins_Load(targetType);
emit->emitIns_R_S(ins, attr, targetReg, varNum, offs);
}
genProduceReg(tree);
}
//------------------------------------------------------------------------
// genCodeForIndexAddr: Produce code for a GT_INDEX_ADDR node.
//
// Arguments:
// tree - the GT_INDEX_ADDR node
//
void CodeGen::genCodeForIndexAddr(GenTreeIndexAddr* node)
{
GenTree* const base = node->Arr();
GenTree* const index = node->Index();
genConsumeReg(base);
genConsumeReg(index);
// NOTE: `genConsumeReg` marks the consumed register as not a GC pointer, as it assumes that the input registers
// die at the first instruction generated by the node. This is not the case for `INDEX_ADDR`, however, as the
// base register is multiply-used. As such, we need to mark the base register as containing a GC pointer until
// we are finished generating the code for this node.
gcInfo.gcMarkRegPtrVal(base->GetRegNum(), base->TypeGet());
assert(!varTypeIsGC(index->TypeGet()));
// The index is never contained, even if it is a constant.
assert(index->isUsedFromReg());
const regNumber tmpReg = node->GetSingleTempReg();
// Generate the bounds check if necessary.
if ((node->gtFlags & GTF_INX_RNGCHK) != 0)
{
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, tmpReg, base->GetRegNum(), node->gtLenOffset);
GetEmitter()->emitIns_R_R(INS_cmp, emitActualTypeSize(index->TypeGet()), index->GetRegNum(), tmpReg);
genJumpToThrowHlpBlk(EJ_hs, SCK_RNGCHK_FAIL, node->gtIndRngFailBB);
}
// Can we use a ScaledAdd instruction?
//
if (isPow2(node->gtElemSize) && (node->gtElemSize <= 32768))
{
DWORD scale;
BitScanForward(&scale, node->gtElemSize);
// dest = base + index * scale
genScaledAdd(emitActualTypeSize(node), node->GetRegNum(), base->GetRegNum(), index->GetRegNum(), scale);
}
else // we have to load the element size and use a MADD (multiply-add) instruction
{
// tmpReg = element size
instGen_Set_Reg_To_Imm(EA_4BYTE, tmpReg, (ssize_t)node->gtElemSize);
// dest = index * tmpReg + base
GetEmitter()->emitIns_R_R_R_R(INS_MULADD, emitActualTypeSize(node), node->GetRegNum(), index->GetRegNum(),
tmpReg, base->GetRegNum());
}
// dest = dest + elemOffs
GetEmitter()->emitIns_R_R_I(INS_add, emitActualTypeSize(node), node->GetRegNum(), node->GetRegNum(),
node->gtElemOffset);
gcInfo.gcMarkRegSetNpt(base->gtGetRegMask());
genProduceReg(node);
}
//------------------------------------------------------------------------
// genCodeForIndir: Produce code for a GT_IND node.
//
// Arguments:
// tree - the GT_IND node
//
void CodeGen::genCodeForIndir(GenTreeIndir* tree)
{
assert(tree->OperIs(GT_IND));
#ifdef FEATURE_SIMD
// Handling of Vector3 type values loaded through indirection.
if (tree->TypeGet() == TYP_SIMD12)
{
genLoadIndTypeSIMD12(tree);
return;
}
#endif // FEATURE_SIMD
var_types type = tree->TypeGet();
instruction ins = ins_Load(type);
regNumber targetReg = tree->GetRegNum();
genConsumeAddress(tree->Addr());
bool emitBarrier = false;
if ((tree->gtFlags & GTF_IND_VOLATILE) != 0)
{
#ifdef TARGET_ARM64
bool addrIsInReg = tree->Addr()->isUsedFromReg();
bool addrIsAligned = ((tree->gtFlags & GTF_IND_UNALIGNED) == 0);
// on arm64-v8.3+ we can use ldap* instructions with acquire/release semantics to avoid
// full memory barriers if mixed with STLR
bool hasRcpc = compiler->compOpportunisticallyDependsOn(InstructionSet_Rcpc);
if ((ins == INS_ldrb) && addrIsInReg)
{
ins = hasRcpc ? INS_ldaprb : INS_ldarb;
}
else if ((ins == INS_ldrh) && addrIsInReg && addrIsAligned)
{
ins = hasRcpc ? INS_ldaprh : INS_ldarh;
}
else if ((ins == INS_ldr) && addrIsInReg && addrIsAligned && genIsValidIntReg(targetReg))
{
ins = hasRcpc ? INS_ldapr : INS_ldar;
}
else
#endif // TARGET_ARM64
{
emitBarrier = true;
}
}
GetEmitter()->emitInsLoadStoreOp(ins, emitActualTypeSize(type), targetReg, tree);
if (emitBarrier)
{
// when INS_ldar* could not be used for a volatile load,
// we use an ordinary load followed by a load barrier.
instGen_MemoryBarrier(BARRIER_LOAD_ONLY);
}
genProduceReg(tree);
}
//----------------------------------------------------------------------------------
// genCodeForCpBlkHelper - Generate code for a CpBlk node by the means of the VM memcpy helper call
//
// Arguments:
// cpBlkNode - the GT_STORE_[BLK|OBJ|DYN_BLK]
//
// Preconditions:
// The register assignments have been set appropriately.
// This is validated by genConsumeBlockOp().
//
void CodeGen::genCodeForCpBlkHelper(GenTreeBlk* cpBlkNode)
{
// Destination address goes in arg0, source address goes in arg1, and size goes in arg2.
// genConsumeBlockOp takes care of this for us.
genConsumeBlockOp(cpBlkNode, REG_ARG_0, REG_ARG_1, REG_ARG_2);
if (cpBlkNode->gtFlags & GTF_BLK_VOLATILE)
{
// issue a full memory barrier before a volatile CpBlk operation
instGen_MemoryBarrier();
}
genEmitHelperCall(CORINFO_HELP_MEMCPY, 0, EA_UNKNOWN);
if (cpBlkNode->gtFlags & GTF_BLK_VOLATILE)
{
// issue a load barrier after a volatile CpBlk operation
instGen_MemoryBarrier(BARRIER_LOAD_ONLY);
}
}
#ifdef TARGET_ARM64
// The following classes
// - InitBlockUnrollHelper
// - CopyBlockUnrollHelper
// encapsulate algorithms that produce instruction sequences for inlined equivalents of memset() and memcpy() functions.
//
// Each class has a private template function that accepts an "InstructionStream" as a template class argument:
// - InitBlockUnrollHelper::UnrollInitBlock<InstructionStream>(startDstOffset, byteCount, initValue)
// - CopyBlockUnrollHelper::UnrollCopyBlock<InstructionStream>(startSrcOffset, startDstOffset, byteCount)
//
// The design goal is to separate optimization approaches implemented by the algorithms
// from the target platform specific details.
//
// InstructionStream is a "stream" of load/store instructions (i.e. ldr/ldp/str/stp) that represents an instruction
// sequence that will initialize a memory region with some value or copy values from one memory region to another.
//
// As far as UnrollInitBlock and UnrollCopyBlock concerned, InstructionStream implements the following class member
// functions:
// - LoadPairRegs(offset, regSizeBytes)
// - StorePairRegs(offset, regSizeBytes)
// - LoadReg(offset, regSizeBytes)
// - StoreReg(offset, regSizeBytes)
//
// There are three implementations of InstructionStream:
// - CountingStream that counts how many instructions were pushed out of the stream
// - VerifyingStream that validates that all the instructions in the stream are encodable on Arm64
// - ProducingStream that maps the function to corresponding emitter functions
//
// The idea behind the design is that decision regarding what instruction sequence to emit
// (scalar instructions vs. SIMD instructions) is made by execution an algorithm producing an instruction sequence
// while counting the number of produced instructions and verifying that all the instructions are encodable.
//
// For example, using SIMD instructions might produce a shorter sequence but require "spilling" a value of a starting
// address
// to an integer register (due to stricter offset alignment rules for 16-byte wide SIMD instructions).
// This the CodeGen can take this fact into account before emitting an instruction sequence.
//
// Alternative design might have had VerifyingStream and ProducingStream fused into one class
// that would allow to undo an instruction if the sequence is not fully encodable.
class CountingStream
{
public:
CountingStream()
{
instrCount = 0;
}
void LoadPairRegs(int offset, unsigned regSizeBytes)
{
instrCount++;
}
void StorePairRegs(int offset, unsigned regSizeBytes)
{
instrCount++;
}
void LoadReg(int offset, unsigned regSizeBytes)
{
instrCount++;
}
void StoreReg(int offset, unsigned regSizeBytes)
{
instrCount++;
}
unsigned InstructionCount() const
{
return instrCount;
}
private:
unsigned instrCount;
};
class VerifyingStream
{
public:
VerifyingStream()
{
canEncodeAllLoads = true;
canEncodeAllStores = true;
}
void LoadPairRegs(int offset, unsigned regSizeBytes)
{
canEncodeAllLoads = canEncodeAllLoads && emitter::canEncodeLoadOrStorePairOffset(offset, EA_SIZE(regSizeBytes));
}
void StorePairRegs(int offset, unsigned regSizeBytes)
{
canEncodeAllStores =
canEncodeAllStores && emitter::canEncodeLoadOrStorePairOffset(offset, EA_SIZE(regSizeBytes));
}
void LoadReg(int offset, unsigned regSizeBytes)
{
canEncodeAllLoads =
canEncodeAllLoads && emitter::emitIns_valid_imm_for_ldst_offset(offset, EA_SIZE(regSizeBytes));
}
void StoreReg(int offset, unsigned regSizeBytes)
{
canEncodeAllStores =
canEncodeAllStores && emitter::emitIns_valid_imm_for_ldst_offset(offset, EA_SIZE(regSizeBytes));
}
bool CanEncodeAllLoads() const
{
return canEncodeAllLoads;
}
bool CanEncodeAllStores() const
{
return canEncodeAllStores;
}
private:
bool canEncodeAllLoads;
bool canEncodeAllStores;
};
class ProducingStreamBaseInstrs
{
public:
ProducingStreamBaseInstrs(regNumber intReg1, regNumber intReg2, regNumber addrReg, emitter* emitter)
: intReg1(intReg1), intReg2(intReg2), addrReg(addrReg), emitter(emitter)
{
}
void LoadPairRegs(int offset, unsigned regSizeBytes)
{
assert(regSizeBytes == 8);
emitter->emitIns_R_R_R_I(INS_ldp, EA_SIZE(regSizeBytes), intReg1, intReg2, addrReg, offset);
}
void StorePairRegs(int offset, unsigned regSizeBytes)
{
assert(regSizeBytes == 8);
emitter->emitIns_R_R_R_I(INS_stp, EA_SIZE(regSizeBytes), intReg1, intReg2, addrReg, offset);
}
void LoadReg(int offset, unsigned regSizeBytes)
{
instruction ins = INS_ldr;
if (regSizeBytes == 1)
{
ins = INS_ldrb;
}
else if (regSizeBytes == 2)
{
ins = INS_ldrh;
}
emitter->emitIns_R_R_I(ins, EA_SIZE(regSizeBytes), intReg1, addrReg, offset);
}
void StoreReg(int offset, unsigned regSizeBytes)
{
instruction ins = INS_str;
if (regSizeBytes == 1)
{
ins = INS_strb;
}
else if (regSizeBytes == 2)
{
ins = INS_strh;
}
emitter->emitIns_R_R_I(ins, EA_SIZE(regSizeBytes), intReg1, addrReg, offset);
}
private:
const regNumber intReg1;
const regNumber intReg2;
const regNumber addrReg;
emitter* const emitter;
};
class ProducingStream
{
public:
ProducingStream(regNumber intReg1, regNumber simdReg1, regNumber simdReg2, regNumber addrReg, emitter* emitter)
: intReg1(intReg1), simdReg1(simdReg1), simdReg2(simdReg2), addrReg(addrReg), emitter(emitter)
{
}
void LoadPairRegs(int offset, unsigned regSizeBytes)
{
assert((regSizeBytes == 8) || (regSizeBytes == 16));
emitter->emitIns_R_R_R_I(INS_ldp, EA_SIZE(regSizeBytes), simdReg1, simdReg2, addrReg, offset);
}
void StorePairRegs(int offset, unsigned regSizeBytes)
{
assert((regSizeBytes == 8) || (regSizeBytes == 16));
emitter->emitIns_R_R_R_I(INS_stp, EA_SIZE(regSizeBytes), simdReg1, simdReg2, addrReg, offset);
}
void LoadReg(int offset, unsigned regSizeBytes)
{
instruction ins = INS_ldr;
// Note that 'intReg1' can be unavailable.
// If that is the case, then use SIMD instruction ldr and
// 'simdReg1' as a temporary register.
regNumber tempReg;
if ((regSizeBytes == 16) || (intReg1 == REG_NA))
{
tempReg = simdReg1;
}
else
{
tempReg = intReg1;
if (regSizeBytes == 1)
{
ins = INS_ldrb;
}
else if (regSizeBytes == 2)
{
ins = INS_ldrh;
}
}
emitter->emitIns_R_R_I(ins, EA_SIZE(regSizeBytes), tempReg, addrReg, offset);
}
void StoreReg(int offset, unsigned regSizeBytes)
{
instruction ins = INS_str;
// Note that 'intReg1' can be unavailable.
// If that is the case, then use SIMD instruction ldr and
// 'simdReg1' as a temporary register.
regNumber tempReg;
if ((regSizeBytes == 16) || (intReg1 == REG_NA))
{
tempReg = simdReg1;
}
else
{
tempReg = intReg1;
if (regSizeBytes == 1)
{
ins = INS_strb;
}
else if (regSizeBytes == 2)
{
ins = INS_strh;
}
}
emitter->emitIns_R_R_I(ins, EA_SIZE(regSizeBytes), tempReg, addrReg, offset);
}
private:
const regNumber intReg1;
const regNumber simdReg1;
const regNumber simdReg2;
const regNumber addrReg;
emitter* const emitter;
};
class BlockUnrollHelper
{
public:
// The following function returns a 'size' bytes that
// 1) is greater or equal to 'byteCount' and
// 2) can be read or written by a single instruction on Arm64.
// For example, Arm64 ISA has ldrb/strb and ldrh/strh that
// load/store 1 or 2 bytes, correspondingly.
// However, there are no instructions that can load/store 3 bytes and
// the next "smallest" instruction is ldr/str that operates on 4 byte granularity.
static unsigned GetRegSizeAtLeastBytes(unsigned byteCount)
{
assert(byteCount != 0);
assert(byteCount < 16);
unsigned regSizeBytes = byteCount;
if (byteCount > 8)
{
regSizeBytes = 16;
}
else if (byteCount > 4)
{
regSizeBytes = 8;
}
else if (byteCount > 2)
{
regSizeBytes = 4;
}
return regSizeBytes;
}
};
class InitBlockUnrollHelper
{
public:
InitBlockUnrollHelper(int dstOffset, unsigned byteCount) : dstStartOffset(dstOffset), byteCount(byteCount)
{
}
int GetDstOffset() const
{
return dstStartOffset;
}
void SetDstOffset(int dstOffset)
{
dstStartOffset = dstOffset;
}
bool CanEncodeAllOffsets(int regSizeBytes) const
{
VerifyingStream instrStream;
UnrollInitBlock(instrStream, regSizeBytes);
return instrStream.CanEncodeAllStores();
}
unsigned InstructionCount(int regSizeBytes) const
{
CountingStream instrStream;
UnrollInitBlock(instrStream, regSizeBytes);
return instrStream.InstructionCount();
}
void Unroll(regNumber intReg, regNumber simdReg, regNumber addrReg, emitter* emitter) const
{
ProducingStream instrStream(intReg, simdReg, simdReg, addrReg, emitter);
UnrollInitBlock(instrStream, FP_REGSIZE_BYTES);
}
void UnrollBaseInstrs(regNumber intReg, regNumber addrReg, emitter* emitter) const
{
ProducingStreamBaseInstrs instrStream(intReg, intReg, addrReg, emitter);
UnrollInitBlock(instrStream, REGSIZE_BYTES);
}
private:
template <class InstructionStream>
void UnrollInitBlock(InstructionStream& instrStream, int initialRegSizeBytes) const
{
assert((initialRegSizeBytes == 8) || (initialRegSizeBytes == 16));
int offset = dstStartOffset;
const int endOffset = offset + byteCount;
const int storePairRegsAlignment = initialRegSizeBytes;
const int storePairRegsWritesBytes = 2 * initialRegSizeBytes;
const int offsetAligned = AlignUp((UINT)offset, storePairRegsAlignment);
const int storePairRegsInstrCount = (endOffset - offsetAligned) / storePairRegsWritesBytes;
if (storePairRegsInstrCount > 0)
{
if (offset != offsetAligned)
{
const int firstRegSizeBytes = BlockUnrollHelper::GetRegSizeAtLeastBytes(offsetAligned - offset);
instrStream.StoreReg(offset, firstRegSizeBytes);
offset = offsetAligned;
}
while (endOffset - offset >= storePairRegsWritesBytes)
{
instrStream.StorePairRegs(offset, initialRegSizeBytes);
offset += storePairRegsWritesBytes;
}
if (endOffset - offset >= initialRegSizeBytes)
{
instrStream.StoreReg(offset, initialRegSizeBytes);
offset += initialRegSizeBytes;
}
if (offset != endOffset)
{
const int lastRegSizeBytes = BlockUnrollHelper::GetRegSizeAtLeastBytes(endOffset - offset);
instrStream.StoreReg(endOffset - lastRegSizeBytes, lastRegSizeBytes);
}
}
else
{
bool isSafeToWriteBehind = false;
while (endOffset - offset >= initialRegSizeBytes)
{
instrStream.StoreReg(offset, initialRegSizeBytes);
offset += initialRegSizeBytes;
isSafeToWriteBehind = true;
}
assert(endOffset - offset < initialRegSizeBytes);
while (offset != endOffset)
{
if (isSafeToWriteBehind)
{
assert(endOffset - offset < initialRegSizeBytes);
const int lastRegSizeBytes = BlockUnrollHelper::GetRegSizeAtLeastBytes(endOffset - offset);
instrStream.StoreReg(endOffset - lastRegSizeBytes, lastRegSizeBytes);
break;
}
if (offset + initialRegSizeBytes > endOffset)
{
initialRegSizeBytes = initialRegSizeBytes / 2;
}
else
{
instrStream.StoreReg(offset, initialRegSizeBytes);
offset += initialRegSizeBytes;
isSafeToWriteBehind = true;
}
}
}
}
int dstStartOffset;
const unsigned byteCount;
};
class CopyBlockUnrollHelper
{
public:
CopyBlockUnrollHelper(int srcOffset, int dstOffset, unsigned byteCount)
: srcStartOffset(srcOffset), dstStartOffset(dstOffset), byteCount(byteCount)
{
}
int GetSrcOffset() const
{
return srcStartOffset;
}
int GetDstOffset() const
{
return dstStartOffset;
}
void SetSrcOffset(int srcOffset)
{
srcStartOffset = srcOffset;
}
void SetDstOffset(int dstOffset)
{
dstStartOffset = dstOffset;
}
unsigned InstructionCount(int regSizeBytes) const
{
CountingStream instrStream;
UnrollCopyBlock(instrStream, instrStream, regSizeBytes);
return instrStream.InstructionCount();
}
bool CanEncodeAllOffsets(int regSizeBytes) const
{
bool canEncodeAllLoads = true;
bool canEncodeAllStores = true;
TryEncodeAllOffsets(regSizeBytes, &canEncodeAllLoads, &canEncodeAllStores);
return canEncodeAllLoads && canEncodeAllStores;
}
void TryEncodeAllOffsets(int regSizeBytes, bool* pCanEncodeAllLoads, bool* pCanEncodeAllStores) const
{
assert(pCanEncodeAllLoads != nullptr);
assert(pCanEncodeAllStores != nullptr);
VerifyingStream instrStream;
UnrollCopyBlock(instrStream, instrStream, regSizeBytes);
*pCanEncodeAllLoads = instrStream.CanEncodeAllLoads();
*pCanEncodeAllStores = instrStream.CanEncodeAllStores();
}
void Unroll(unsigned initialRegSizeBytes,
regNumber intReg,
regNumber simdReg1,
regNumber simdReg2,
regNumber srcAddrReg,
regNumber dstAddrReg,
emitter* emitter) const
{
ProducingStream loadStream(intReg, simdReg1, simdReg2, srcAddrReg, emitter);
ProducingStream storeStream(intReg, simdReg1, simdReg2, dstAddrReg, emitter);
UnrollCopyBlock(loadStream, storeStream, initialRegSizeBytes);
}
void UnrollBaseInstrs(
regNumber intReg1, regNumber intReg2, regNumber srcAddrReg, regNumber dstAddrReg, emitter* emitter) const
{
ProducingStreamBaseInstrs loadStream(intReg1, intReg2, srcAddrReg, emitter);
ProducingStreamBaseInstrs storeStream(intReg1, intReg2, dstAddrReg, emitter);
UnrollCopyBlock(loadStream, storeStream, REGSIZE_BYTES);
}
private:
template <class InstructionStream>
void UnrollCopyBlock(InstructionStream& loadStream, InstructionStream& storeStream, int initialRegSizeBytes) const
{
assert((initialRegSizeBytes == 8) || (initialRegSizeBytes == 16));
int srcOffset = srcStartOffset;
int dstOffset = dstStartOffset;
const int endSrcOffset = srcOffset + byteCount;
const int endDstOffset = dstOffset + byteCount;
const int storePairRegsAlignment = initialRegSizeBytes;
const int storePairRegsWritesBytes = 2 * initialRegSizeBytes;
const int dstOffsetAligned = AlignUp((UINT)dstOffset, storePairRegsAlignment);
if (byteCount >= (unsigned)storePairRegsWritesBytes)
{
const int dstBytesToAlign = dstOffsetAligned - dstOffset;
if (dstBytesToAlign != 0)
{
const int firstRegSizeBytes = BlockUnrollHelper::GetRegSizeAtLeastBytes(dstBytesToAlign);
loadStream.LoadReg(srcOffset, firstRegSizeBytes);
storeStream.StoreReg(dstOffset, firstRegSizeBytes);
srcOffset = srcOffset + dstBytesToAlign;
dstOffset = dstOffsetAligned;
}
while (endDstOffset - dstOffset >= storePairRegsWritesBytes)
{
loadStream.LoadPairRegs(srcOffset, initialRegSizeBytes);
storeStream.StorePairRegs(dstOffset, initialRegSizeBytes);
srcOffset += storePairRegsWritesBytes;
dstOffset += storePairRegsWritesBytes;
}
if (endDstOffset - dstOffset >= initialRegSizeBytes)
{
loadStream.LoadReg(srcOffset, initialRegSizeBytes);
storeStream.StoreReg(dstOffset, initialRegSizeBytes);
srcOffset += initialRegSizeBytes;
dstOffset += initialRegSizeBytes;
}
if (dstOffset != endDstOffset)
{
const int lastRegSizeBytes = BlockUnrollHelper::GetRegSizeAtLeastBytes(endDstOffset - dstOffset);
loadStream.LoadReg(endSrcOffset - lastRegSizeBytes, lastRegSizeBytes);
storeStream.StoreReg(endDstOffset - lastRegSizeBytes, lastRegSizeBytes);
}
}
else
{
bool isSafeToWriteBehind = false;
while (endDstOffset - dstOffset >= initialRegSizeBytes)
{
loadStream.LoadReg(srcOffset, initialRegSizeBytes);
storeStream.StoreReg(dstOffset, initialRegSizeBytes);
srcOffset += initialRegSizeBytes;
dstOffset += initialRegSizeBytes;
isSafeToWriteBehind = true;
}
assert(endSrcOffset - srcOffset < initialRegSizeBytes);
while (dstOffset != endDstOffset)
{
if (isSafeToWriteBehind)
{
const int lastRegSizeBytes = BlockUnrollHelper::GetRegSizeAtLeastBytes(endDstOffset - dstOffset);
loadStream.LoadReg(endSrcOffset - lastRegSizeBytes, lastRegSizeBytes);
storeStream.StoreReg(endDstOffset - lastRegSizeBytes, lastRegSizeBytes);
break;
}
if (dstOffset + initialRegSizeBytes > endDstOffset)
{
initialRegSizeBytes = initialRegSizeBytes / 2;
}
else
{
loadStream.LoadReg(srcOffset, initialRegSizeBytes);
storeStream.StoreReg(dstOffset, initialRegSizeBytes);
srcOffset += initialRegSizeBytes;
dstOffset += initialRegSizeBytes;
isSafeToWriteBehind = true;
}
}
}
}
int srcStartOffset;
int dstStartOffset;
const unsigned byteCount;
};
#endif // TARGET_ARM64
//----------------------------------------------------------------------------------
// genCodeForInitBlkUnroll: Generate unrolled block initialization code.
//
// Arguments:
// node - the GT_STORE_BLK node to generate code for
//
void CodeGen::genCodeForInitBlkUnroll(GenTreeBlk* node)
{
assert(node->OperIs(GT_STORE_BLK));
unsigned dstLclNum = BAD_VAR_NUM;
regNumber dstAddrBaseReg = REG_NA;
int dstOffset = 0;
GenTree* dstAddr = node->Addr();
if (!dstAddr->isContained())
{
dstAddrBaseReg = genConsumeReg(dstAddr);
}
else if (dstAddr->OperIsAddrMode())
{
assert(!dstAddr->AsAddrMode()->HasIndex());
dstAddrBaseReg = genConsumeReg(dstAddr->AsAddrMode()->Base());
dstOffset = dstAddr->AsAddrMode()->Offset();
}
else
{
assert(dstAddr->OperIsLocalAddr());
dstLclNum = dstAddr->AsLclVarCommon()->GetLclNum();
dstOffset = dstAddr->AsLclVarCommon()->GetLclOffs();
}
GenTree* src = node->Data();
if (src->OperIs(GT_INIT_VAL))
{
assert(src->isContained());
src = src->gtGetOp1();
}
if (node->IsVolatile())
{
instGen_MemoryBarrier();
}
emitter* emit = GetEmitter();
unsigned size = node->GetLayout()->GetSize();
assert(size <= INT32_MAX);
assert(dstOffset < INT32_MAX - static_cast<int>(size));
#ifdef TARGET_ARM64
InitBlockUnrollHelper helper(dstOffset, size);
regNumber srcReg;
if (!src->isContained())
{
srcReg = genConsumeReg(src);
}
else
{
assert(src->IsIntegralConst(0));
srcReg = REG_ZR;
}
regNumber dstReg = dstAddrBaseReg;
int dstRegAddrAlignment = 0;
bool isDstRegAddrAlignmentKnown = false;
if (dstLclNum != BAD_VAR_NUM)
{
bool fpBased;
const int baseAddr = compiler->lvaFrameAddress(dstLclNum, &fpBased);
dstReg = fpBased ? REG_FPBASE : REG_SPBASE;
dstRegAddrAlignment = fpBased ? (genSPtoFPdelta() % 16) : 0;
isDstRegAddrAlignmentKnown = true;
helper.SetDstOffset(baseAddr + dstOffset);
}
if (!helper.CanEncodeAllOffsets(REGSIZE_BYTES))
{
// If dstRegAddrAlignment is known and non-zero the following ensures that the adjusted value of dstReg is at
// 16-byte aligned boundary.
// This is done to potentially allow more cases where the JIT can use 16-byte stores.
const int dstOffsetAdjustment = helper.GetDstOffset() - dstRegAddrAlignment;
dstRegAddrAlignment = 0;
const regNumber tempReg = node->ExtractTempReg(RBM_ALLINT);
genInstrWithConstant(INS_add, EA_PTRSIZE, tempReg, dstReg, dstOffsetAdjustment, tempReg);
dstReg = tempReg;
helper.SetDstOffset(helper.GetDstOffset() - dstOffsetAdjustment);
}
bool shouldUse16ByteWideInstrs = false;
// Store operations that cross a 16-byte boundary reduce bandwidth or incur additional latency.
// The following condition prevents using 16-byte stores when dstRegAddrAlignment is:
// 1) unknown (i.e. dstReg is neither FP nor SP) or
// 2) non-zero (i.e. dstRegAddr is not 16-byte aligned).
const bool hasAvailableSimdReg = isDstRegAddrAlignmentKnown && (size > FP_REGSIZE_BYTES);
const bool canUse16ByteWideInstrs =
hasAvailableSimdReg && (dstRegAddrAlignment == 0) && helper.CanEncodeAllOffsets(FP_REGSIZE_BYTES);
if (canUse16ByteWideInstrs)
{
// The JIT would need to initialize a SIMD register with "movi simdReg.16B, #initValue".
const unsigned instrCount16ByteWide = helper.InstructionCount(FP_REGSIZE_BYTES) + 1;
shouldUse16ByteWideInstrs = instrCount16ByteWide < helper.InstructionCount(REGSIZE_BYTES);
}
if (shouldUse16ByteWideInstrs)
{
const regNumber simdReg = node->GetSingleTempReg(RBM_ALLFLOAT);
const int initValue = (src->AsIntCon()->IconValue() & 0xFF);
emit->emitIns_R_I(INS_movi, EA_16BYTE, simdReg, initValue, INS_OPTS_16B);
helper.Unroll(srcReg, simdReg, dstReg, GetEmitter());
}
else
{
helper.UnrollBaseInstrs(srcReg, dstReg, GetEmitter());
}
#endif // TARGET_ARM64
#ifdef TARGET_ARM
const regNumber srcReg = genConsumeReg(src);
for (unsigned regSize = REGSIZE_BYTES; size > 0; size -= regSize, dstOffset += regSize)
{
while (regSize > size)
{
regSize /= 2;
}
instruction storeIns;
emitAttr attr;
switch (regSize)
{
case 1:
storeIns = INS_strb;
attr = EA_4BYTE;
break;
case 2:
storeIns = INS_strh;
attr = EA_4BYTE;
break;
case 4:
storeIns = INS_str;
attr = EA_ATTR(regSize);
break;
default:
unreached();
}
if (dstLclNum != BAD_VAR_NUM)
{
emit->emitIns_S_R(storeIns, attr, srcReg, dstLclNum, dstOffset);
}
else
{
emit->emitIns_R_R_I(storeIns, attr, srcReg, dstAddrBaseReg, dstOffset);
}
}
#endif // TARGET_ARM
}
//----------------------------------------------------------------------------------
// genCodeForCpBlkUnroll: Generate unrolled block copy code.
//
// Arguments:
// node - the GT_STORE_BLK node to generate code for
//
void CodeGen::genCodeForCpBlkUnroll(GenTreeBlk* node)
{
assert(node->OperIs(GT_STORE_BLK));
unsigned dstLclNum = BAD_VAR_NUM;
regNumber dstAddrBaseReg = REG_NA;
int dstOffset = 0;
GenTree* dstAddr = node->Addr();
if (!dstAddr->isContained())
{
dstAddrBaseReg = genConsumeReg(dstAddr);
}
else if (dstAddr->OperIsAddrMode())
{
assert(!dstAddr->AsAddrMode()->HasIndex());
dstAddrBaseReg = genConsumeReg(dstAddr->AsAddrMode()->Base());
dstOffset = dstAddr->AsAddrMode()->Offset();
}
else
{
assert(dstAddr->OperIsLocalAddr());
dstLclNum = dstAddr->AsLclVarCommon()->GetLclNum();
dstOffset = dstAddr->AsLclVarCommon()->GetLclOffs();
}
unsigned srcLclNum = BAD_VAR_NUM;
regNumber srcAddrBaseReg = REG_NA;
int srcOffset = 0;
GenTree* src = node->Data();
assert(src->isContained());
if (src->OperIs(GT_LCL_VAR, GT_LCL_FLD))
{
srcLclNum = src->AsLclVarCommon()->GetLclNum();
srcOffset = src->AsLclVarCommon()->GetLclOffs();
}
else
{
assert(src->OperIs(GT_IND));
GenTree* srcAddr = src->AsIndir()->Addr();
if (!srcAddr->isContained())
{
srcAddrBaseReg = genConsumeReg(srcAddr);
}
else if (srcAddr->OperIsAddrMode())
{
srcAddrBaseReg = genConsumeReg(srcAddr->AsAddrMode()->Base());
srcOffset = srcAddr->AsAddrMode()->Offset();
}
else
{
assert(srcAddr->OperIsLocalAddr());
srcLclNum = srcAddr->AsLclVarCommon()->GetLclNum();
srcOffset = srcAddr->AsLclVarCommon()->GetLclOffs();
}
}
if (node->IsVolatile())
{
// issue a full memory barrier before a volatile CpBlk operation
instGen_MemoryBarrier();
}
emitter* emit = GetEmitter();
unsigned size = node->GetLayout()->GetSize();
assert(size <= INT32_MAX);
assert(srcOffset < INT32_MAX - static_cast<int>(size));
assert(dstOffset < INT32_MAX - static_cast<int>(size));
#ifdef TARGET_ARM64
CopyBlockUnrollHelper helper(srcOffset, dstOffset, size);
regNumber srcReg = srcAddrBaseReg;
int srcRegAddrAlignment = 0;
bool isSrcRegAddrAlignmentKnown = false;
if (srcLclNum != BAD_VAR_NUM)
{
bool fpBased;
const int baseAddr = compiler->lvaFrameAddress(srcLclNum, &fpBased);
srcReg = fpBased ? REG_FPBASE : REG_SPBASE;
srcRegAddrAlignment = fpBased ? (genSPtoFPdelta() % 16) : 0;
isSrcRegAddrAlignmentKnown = true;
helper.SetSrcOffset(baseAddr + srcOffset);
}
regNumber dstReg = dstAddrBaseReg;
int dstRegAddrAlignment = 0;
bool isDstRegAddrAlignmentKnown = false;
if (dstLclNum != BAD_VAR_NUM)
{
bool fpBased;
const int baseAddr = compiler->lvaFrameAddress(dstLclNum, &fpBased);
dstReg = fpBased ? REG_FPBASE : REG_SPBASE;
dstRegAddrAlignment = fpBased ? (genSPtoFPdelta() % 16) : 0;
isDstRegAddrAlignmentKnown = true;
helper.SetDstOffset(baseAddr + dstOffset);
}
bool canEncodeAllLoads = true;
bool canEncodeAllStores = true;
helper.TryEncodeAllOffsets(REGSIZE_BYTES, &canEncodeAllLoads, &canEncodeAllStores);
srcOffset = helper.GetSrcOffset();
dstOffset = helper.GetDstOffset();
int srcOffsetAdjustment = 0;
int dstOffsetAdjustment = 0;
if (!canEncodeAllLoads && !canEncodeAllStores)
{
srcOffsetAdjustment = srcOffset;
dstOffsetAdjustment = dstOffset;
}
else if (!canEncodeAllLoads)
{
srcOffsetAdjustment = srcOffset - dstOffset;
}
else if (!canEncodeAllStores)
{
dstOffsetAdjustment = dstOffset - srcOffset;
}
helper.SetSrcOffset(srcOffset - srcOffsetAdjustment);
helper.SetDstOffset(dstOffset - dstOffsetAdjustment);
// Quad-word load operations that are not 16-byte aligned, and store operations that cross a 16-byte boundary
// can reduce bandwidth or incur additional latency.
// Therefore, the JIT would attempt to use 16-byte variants of such instructions when both conditions are met:
// 1) the base address stored in dstReg has known alignment (modulo 16 bytes) and
// 2) the base address stored in srcReg has the same alignment as the address in dstReg.
//
// When both addresses are 16-byte aligned the CopyBlock instruction sequence looks like
//
// ldp Q_simdReg1, Q_simdReg2, [srcReg, #srcOffset]
// stp Q_simdReg1, Q_simdReg2, [dstReg, #dstOffset]
// ldp Q_simdReg1, Q_simdReg2, [srcReg, #dstOffset+32]
// stp Q_simdReg1, Q_simdReg2, [dstReg, #dstOffset+32]
// ...
//
// When both addresses are not 16-byte aligned the CopyBlock instruction sequence starts with padding
// str instruction. For example, when both addresses are 8-byte aligned the instruction sequence looks like
//
// ldr X_intReg1, [srcReg, #srcOffset]
// str X_intReg1, [dstReg, #dstOffset]
// ldp Q_simdReg1, Q_simdReg2, [srcReg, #srcOffset+8]
// stp Q_simdReg1, Q_simdReg2, [dstReg, #dstOffset+8]
// ldp Q_simdReg1, Q_simdReg2, [srcReg, #srcOffset+40]
// stp Q_simdReg1, Q_simdReg2, [dstReg, #dstOffset+40]
// ...
// LSRA allocates a pair of SIMD registers when alignments of both source and destination base addresses are
// known and the block size is larger than a single SIMD register size (i.e. when using SIMD instructions can
// be profitable).
const bool canUse16ByteWideInstrs = isSrcRegAddrAlignmentKnown && isDstRegAddrAlignmentKnown &&
(size >= 2 * FP_REGSIZE_BYTES) && (srcRegAddrAlignment == dstRegAddrAlignment);
bool shouldUse16ByteWideInstrs = false;
if (canUse16ByteWideInstrs)
{
bool canEncodeAll16ByteWideLoads = false;
bool canEncodeAll16ByteWideStores = false;
helper.TryEncodeAllOffsets(FP_REGSIZE_BYTES, &canEncodeAll16ByteWideLoads, &canEncodeAll16ByteWideStores);
if (canEncodeAll16ByteWideLoads && canEncodeAll16ByteWideStores)
{
// No further adjustments for srcOffset and dstOffset are needed.
// The JIT should use 16-byte loads and stores when the resulting sequence has fewer number of instructions.
shouldUse16ByteWideInstrs =
(helper.InstructionCount(FP_REGSIZE_BYTES) < helper.InstructionCount(REGSIZE_BYTES));
}
else if (canEncodeAllLoads && canEncodeAllStores &&
(canEncodeAll16ByteWideLoads || canEncodeAll16ByteWideStores))
{
// In order to use 16-byte instructions the JIT needs to adjust either srcOffset or dstOffset.
// The JIT should use 16-byte loads and stores when the resulting sequence (incl. an additional add
// instruction) has fewer number of instructions.
if (helper.InstructionCount(FP_REGSIZE_BYTES) + 1 < helper.InstructionCount(REGSIZE_BYTES))
{
shouldUse16ByteWideInstrs = true;
if (!canEncodeAll16ByteWideLoads)
{
srcOffsetAdjustment = srcOffset - dstOffset;
}
else
{
dstOffsetAdjustment = dstOffset - srcOffset;
}
helper.SetSrcOffset(srcOffset - srcOffsetAdjustment);
helper.SetDstOffset(dstOffset - dstOffsetAdjustment);
}
}
}
#ifdef DEBUG
if (shouldUse16ByteWideInstrs)
{
assert(helper.CanEncodeAllOffsets(FP_REGSIZE_BYTES));
}
else
{
assert(helper.CanEncodeAllOffsets(REGSIZE_BYTES));
}
#endif
if ((srcOffsetAdjustment != 0) && (dstOffsetAdjustment != 0))
{
const regNumber tempReg1 = node->ExtractTempReg(RBM_ALLINT);
genInstrWithConstant(INS_add, EA_PTRSIZE, tempReg1, srcReg, srcOffsetAdjustment, tempReg1);
srcReg = tempReg1;
const regNumber tempReg2 = node->ExtractTempReg(RBM_ALLINT);
genInstrWithConstant(INS_add, EA_PTRSIZE, tempReg2, dstReg, dstOffsetAdjustment, tempReg2);
dstReg = tempReg2;
}
else if (srcOffsetAdjustment != 0)
{
const regNumber tempReg = node->ExtractTempReg(RBM_ALLINT);
genInstrWithConstant(INS_add, EA_PTRSIZE, tempReg, srcReg, srcOffsetAdjustment, tempReg);
srcReg = tempReg;
}
else if (dstOffsetAdjustment != 0)
{
const regNumber tempReg = node->ExtractTempReg(RBM_ALLINT);
genInstrWithConstant(INS_add, EA_PTRSIZE, tempReg, dstReg, dstOffsetAdjustment, tempReg);
dstReg = tempReg;
}
regNumber intReg1 = REG_NA;
regNumber intReg2 = REG_NA;
const unsigned intRegCount = node->AvailableTempRegCount(RBM_ALLINT);
if (intRegCount >= 2)
{
intReg1 = node->ExtractTempReg(RBM_ALLINT);
intReg2 = node->ExtractTempReg(RBM_ALLINT);
}
else if (intRegCount == 1)
{
intReg1 = node->GetSingleTempReg(RBM_ALLINT);
intReg2 = rsGetRsvdReg();
}
else
{
intReg1 = rsGetRsvdReg();
}
if (shouldUse16ByteWideInstrs)
{
const regNumber simdReg1 = node->ExtractTempReg(RBM_ALLFLOAT);
const regNumber simdReg2 = node->GetSingleTempReg(RBM_ALLFLOAT);
helper.Unroll(FP_REGSIZE_BYTES, intReg1, simdReg1, simdReg2, srcReg, dstReg, GetEmitter());
}
else
{
helper.UnrollBaseInstrs(intReg1, intReg2, srcReg, dstReg, GetEmitter());
}
#endif // TARGET_ARM64
#ifdef TARGET_ARM
const regNumber tempReg = node->ExtractTempReg(RBM_ALLINT);
for (unsigned regSize = REGSIZE_BYTES; size > 0; size -= regSize, srcOffset += regSize, dstOffset += regSize)
{
while (regSize > size)
{
regSize /= 2;
}
instruction loadIns;
instruction storeIns;
emitAttr attr;
switch (regSize)
{
case 1:
loadIns = INS_ldrb;
storeIns = INS_strb;
attr = EA_4BYTE;
break;
case 2:
loadIns = INS_ldrh;
storeIns = INS_strh;
attr = EA_4BYTE;
break;
case 4:
loadIns = INS_ldr;
storeIns = INS_str;
attr = EA_ATTR(regSize);
break;
default:
unreached();
}
if (srcLclNum != BAD_VAR_NUM)
{
emit->emitIns_R_S(loadIns, attr, tempReg, srcLclNum, srcOffset);
}
else
{
emit->emitIns_R_R_I(loadIns, attr, tempReg, srcAddrBaseReg, srcOffset);
}
if (dstLclNum != BAD_VAR_NUM)
{
emit->emitIns_S_R(storeIns, attr, tempReg, dstLclNum, dstOffset);
}
else
{
emit->emitIns_R_R_I(storeIns, attr, tempReg, dstAddrBaseReg, dstOffset);
}
}
#endif // TARGET_ARM
if (node->IsVolatile())
{
// issue a load barrier after a volatile CpBlk operation
instGen_MemoryBarrier(BARRIER_LOAD_ONLY);
}
}
//------------------------------------------------------------------------
// genCodeForInitBlkHelper - Generate code for an InitBlk node by the means of the VM memcpy helper call
//
// Arguments:
// initBlkNode - the GT_STORE_[BLK|OBJ|DYN_BLK]
//
// Preconditions:
// The register assignments have been set appropriately.
// This is validated by genConsumeBlockOp().
//
void CodeGen::genCodeForInitBlkHelper(GenTreeBlk* initBlkNode)
{
// Size goes in arg2, source address goes in arg1, and size goes in arg2.
// genConsumeBlockOp takes care of this for us.
genConsumeBlockOp(initBlkNode, REG_ARG_0, REG_ARG_1, REG_ARG_2);
if (initBlkNode->gtFlags & GTF_BLK_VOLATILE)
{
// issue a full memory barrier before a volatile initBlock Operation
instGen_MemoryBarrier();
}
genEmitHelperCall(CORINFO_HELP_MEMSET, 0, EA_UNKNOWN);
}
//------------------------------------------------------------------------
// genCall: Produce code for a GT_CALL node
//
void CodeGen::genCall(GenTreeCall* call)
{
// Consume all the arg regs
for (GenTreeCall::Use& use : call->LateArgs())
{
GenTree* argNode = use.GetNode();
fgArgTabEntry* curArgTabEntry = compiler->gtArgEntryByNode(call, argNode);
assert(curArgTabEntry);
// GT_RELOAD/GT_COPY use the child node
argNode = argNode->gtSkipReloadOrCopy();
if (curArgTabEntry->GetRegNum() == REG_STK)
continue;
// Deal with multi register passed struct args.
if (argNode->OperGet() == GT_FIELD_LIST)
{
regNumber argReg = curArgTabEntry->GetRegNum();
for (GenTreeFieldList::Use& use : argNode->AsFieldList()->Uses())
{
GenTree* putArgRegNode = use.GetNode();
assert(putArgRegNode->gtOper == GT_PUTARG_REG);
genConsumeReg(putArgRegNode);
inst_Mov_Extend(putArgRegNode->TypeGet(), /* srcInReg */ true, argReg, putArgRegNode->GetRegNum(),
/* canSkip */ true, emitActualTypeSize(TYP_I_IMPL));
argReg = genRegArgNext(argReg);
#if defined(TARGET_ARM)
// A double register is modelled as an even-numbered single one
if (putArgRegNode->TypeGet() == TYP_DOUBLE)
{
argReg = genRegArgNext(argReg);
}
#endif // TARGET_ARM
}
}
else if (curArgTabEntry->IsSplit())
{
assert(compFeatureArgSplit());
assert(curArgTabEntry->numRegs >= 1);
genConsumeArgSplitStruct(argNode->AsPutArgSplit());
for (unsigned idx = 0; idx < curArgTabEntry->numRegs; idx++)
{
regNumber argReg = (regNumber)((unsigned)curArgTabEntry->GetRegNum() + idx);
regNumber allocReg = argNode->AsPutArgSplit()->GetRegNumByIdx(idx);
inst_Mov_Extend(argNode->TypeGet(), /* srcInReg */ true, argReg, allocReg, /* canSkip */ true,
emitActualTypeSize(TYP_I_IMPL));
}
}
else
{
regNumber argReg = curArgTabEntry->GetRegNum();
genConsumeReg(argNode);
inst_Mov_Extend(argNode->TypeGet(), /* srcInReg */ true, argReg, argNode->GetRegNum(), /* canSkip */ true,
emitActualTypeSize(TYP_I_IMPL));
}
}
// Insert a null check on "this" pointer if asked.
if (call->NeedsNullCheck())
{
const regNumber regThis = genGetThisArgReg(call);
#if defined(TARGET_ARM)
const regNumber tmpReg = call->ExtractTempReg();
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, tmpReg, regThis, 0);
#elif defined(TARGET_ARM64)
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_4BYTE, REG_ZR, regThis, 0);
#endif // TARGET*
}
// If fast tail call, then we are done here, we just have to load the call
// target into the right registers. We ensure in RA that target is loaded
// into a volatile register that won't be restored by epilog sequence.
if (call->IsFastTailCall())
{
GenTree* target = getCallTarget(call, nullptr);
if (target != nullptr)
{
// Indirect fast tail calls materialize call target either in gtControlExpr or in gtCallAddr.
genConsumeReg(target);
}
#ifdef FEATURE_READYTORUN
else if (call->IsR2ROrVirtualStubRelativeIndir())
{
assert(((call->IsR2RRelativeIndir()) && (call->gtEntryPoint.accessType == IAT_PVALUE)) ||
((call->IsVirtualStubRelativeIndir()) && (call->gtEntryPoint.accessType == IAT_VALUE)));
assert(call->gtControlExpr == nullptr);
regNumber tmpReg = call->GetSingleTempReg();
// Register where we save call address in should not be overridden by epilog.
assert((tmpReg & (RBM_INT_CALLEE_TRASH & ~RBM_LR)) == tmpReg);
regNumber callAddrReg =
call->IsVirtualStubRelativeIndir() ? compiler->virtualStubParamInfo->GetReg() : REG_R2R_INDIRECT_PARAM;
GetEmitter()->emitIns_R_R(ins_Load(TYP_I_IMPL), emitActualTypeSize(TYP_I_IMPL), tmpReg, callAddrReg);
// We will use this again when emitting the jump in genCallInstruction in the epilog
call->gtRsvdRegs |= genRegMask(tmpReg);
}
#endif
return;
}
// For a pinvoke to unmanaged code we emit a label to clear
// the GC pointer state before the callsite.
// We can't utilize the typical lazy killing of GC pointers
// at (or inside) the callsite.
if (compiler->killGCRefs(call))
{
genDefineTempLabel(genCreateTempLabel());
}
genCallInstruction(call);
// for pinvoke/intrinsic/tailcalls we may have needed to get the address of
// a label. In case it is indirect with CFG enabled make sure we do not get
// the address after the validation but only after the actual call that
// comes after.
if (genPendingCallLabel && !call->IsHelperCall(compiler, CORINFO_HELP_VALIDATE_INDIRECT_CALL))
{
genDefineInlineTempLabel(genPendingCallLabel);
genPendingCallLabel = nullptr;
}
#ifdef DEBUG
// We should not have GC pointers in killed registers live around the call.
// GC info for arg registers were cleared when consuming arg nodes above
// and LSRA should ensure it for other trashed registers.
regMaskTP killMask = RBM_CALLEE_TRASH;
if (call->IsHelperCall())
{
CorInfoHelpFunc helpFunc = compiler->eeGetHelperNum(call->gtCallMethHnd);
killMask = compiler->compHelperCallKillSet(helpFunc);
}
assert((gcInfo.gcRegGCrefSetCur & killMask) == 0);
assert((gcInfo.gcRegByrefSetCur & killMask) == 0);
#endif
var_types returnType = call->TypeGet();
if (returnType != TYP_VOID)
{
regNumber returnReg;
if (call->HasMultiRegRetVal())
{
const ReturnTypeDesc* pRetTypeDesc = call->GetReturnTypeDesc();
assert(pRetTypeDesc != nullptr);
unsigned regCount = pRetTypeDesc->GetReturnRegCount();
// If regs allocated to call node are different from ABI return
// regs in which the call has returned its result, move the result
// to regs allocated to call node.
for (unsigned i = 0; i < regCount; ++i)
{
var_types regType = pRetTypeDesc->GetReturnRegType(i);
returnReg = pRetTypeDesc->GetABIReturnReg(i);
regNumber allocatedReg = call->GetRegNumByIdx(i);
inst_Mov(regType, allocatedReg, returnReg, /* canSkip */ true);
}
}
else
{
#ifdef TARGET_ARM
if (call->IsHelperCall(compiler, CORINFO_HELP_INIT_PINVOKE_FRAME))
{
// The CORINFO_HELP_INIT_PINVOKE_FRAME helper uses a custom calling convention that returns with
// TCB in REG_PINVOKE_TCB. fgMorphCall() sets the correct argument registers.
returnReg = REG_PINVOKE_TCB;
}
else if (compiler->opts.compUseSoftFP)
{
returnReg = REG_INTRET;
}
else
#endif // TARGET_ARM
if (varTypeUsesFloatArgReg(returnType))
{
returnReg = REG_FLOATRET;
}
else
{
returnReg = REG_INTRET;
}
if (call->GetRegNum() != returnReg)
{
#ifdef TARGET_ARM
if (compiler->opts.compUseSoftFP && returnType == TYP_DOUBLE)
{
inst_RV_RV_RV(INS_vmov_i2d, call->GetRegNum(), returnReg, genRegArgNext(returnReg), EA_8BYTE);
}
else if (compiler->opts.compUseSoftFP && returnType == TYP_FLOAT)
{
inst_Mov(returnType, call->GetRegNum(), returnReg, /* canSkip */ false);
}
else
#endif
{
inst_Mov(returnType, call->GetRegNum(), returnReg, /* canSkip */ false);
}
}
}
genProduceReg(call);
}
// If there is nothing next, that means the result is thrown away, so this value is not live.
// However, for minopts or debuggable code, we keep it live to support managed return value debugging.
if ((call->gtNext == nullptr) && !compiler->opts.MinOpts() && !compiler->opts.compDbgCode)
{
gcInfo.gcMarkRegSetNpt(RBM_INTRET);
}
}
//------------------------------------------------------------------------
// genCallInstruction - Generate instructions necessary to transfer control to the call.
//
// Arguments:
// call - the GT_CALL node
//
// Remaks:
// For tailcalls this function will generate a jump.
//
void CodeGen::genCallInstruction(GenTreeCall* call)
{
// Determine return value size(s).
const ReturnTypeDesc* pRetTypeDesc = call->GetReturnTypeDesc();
emitAttr retSize = EA_PTRSIZE;
emitAttr secondRetSize = EA_UNKNOWN;
if (call->HasMultiRegRetVal())
{
retSize = emitTypeSize(pRetTypeDesc->GetReturnRegType(0));
secondRetSize = emitTypeSize(pRetTypeDesc->GetReturnRegType(1));
}
else
{
assert(call->gtType != TYP_STRUCT);
if (call->gtType == TYP_REF)
{
retSize = EA_GCREF;
}
else if (call->gtType == TYP_BYREF)
{
retSize = EA_BYREF;
}
}
DebugInfo di;
// We need to propagate the debug information to the call instruction, so we can emit
// an IL to native mapping record for the call, to support managed return value debugging.
// We don't want tail call helper calls that were converted from normal calls to get a record,
// so we skip this hash table lookup logic in that case.
if (compiler->opts.compDbgInfo && compiler->genCallSite2DebugInfoMap != nullptr && !call->IsTailCall())
{
(void)compiler->genCallSite2DebugInfoMap->Lookup(call, &di);
}
CORINFO_SIG_INFO* sigInfo = nullptr;
#ifdef DEBUG
// Pass the call signature information down into the emitter so the emitter can associate
// native call sites with the signatures they were generated from.
if (call->gtCallType != CT_HELPER)
{
sigInfo = call->callSig;
}
if (call->IsFastTailCall())
{
regMaskTP trashedByEpilog = RBM_CALLEE_SAVED;
// The epilog may use and trash REG_GSCOOKIE_TMP_0/1. Make sure we have no
// non-standard args that may be trash if this is a tailcall.
if (compiler->getNeedsGSSecurityCookie())
{
trashedByEpilog |= genRegMask(REG_GSCOOKIE_TMP_0);
trashedByEpilog |= genRegMask(REG_GSCOOKIE_TMP_1);
}
for (unsigned i = 0; i < call->fgArgInfo->ArgCount(); i++)
{
fgArgTabEntry* entry = call->fgArgInfo->GetArgEntry(i);
for (unsigned j = 0; j < entry->numRegs; j++)
{
regNumber reg = entry->GetRegNum(j);
if ((trashedByEpilog & genRegMask(reg)) != 0)
{
JITDUMP("Tail call node:\n");
DISPTREE(call);
JITDUMP("Register used: %s\n", getRegName(reg));
assert(!"Argument to tailcall may be trashed by epilog");
}
}
}
}
#endif // DEBUG
CORINFO_METHOD_HANDLE methHnd;
GenTree* target = getCallTarget(call, &methHnd);
if (target != nullptr)
{
// A call target can not be a contained indirection
assert(!target->isContainedIndir());
// For fast tailcall we have already consumed the target. We ensure in
// RA that the target was allocated into a volatile register that will
// not be messed up by epilog sequence.
if (!call->IsFastTailCall())
{
genConsumeReg(target);
}
// We have already generated code for gtControlExpr evaluating it into a register.
// We just need to emit "call reg" in this case.
//
assert(genIsValidIntReg(target->GetRegNum()));
// clang-format off
genEmitCall(emitter::EC_INDIR_R,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
nullptr, // addr
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
di,
target->GetRegNum(),
call->IsFastTailCall());
// clang-format on
}
else
{
// If we have no target and this is a call with indirection cell then
// we do an optimization where we load the call address directly from
// the indirection cell instead of duplicating the tree. In BuildCall
// we ensure that get an extra register for the purpose. Note that for
// CFG the call might have changed to
// CORINFO_HELP_DISPATCH_INDIRECT_CALL in which case we still have the
// indirection cell but we should not try to optimize.
regNumber callThroughIndirReg = REG_NA;
if (!call->IsHelperCall(compiler, CORINFO_HELP_DISPATCH_INDIRECT_CALL))
{
callThroughIndirReg = getCallIndirectionCellReg(call);
}
if (callThroughIndirReg != REG_NA)
{
assert(call->IsR2ROrVirtualStubRelativeIndir());
regNumber targetAddrReg = call->GetSingleTempReg();
// For fast tailcalls we have already loaded the call target when processing the call node.
if (!call->IsFastTailCall())
{
GetEmitter()->emitIns_R_R(ins_Load(TYP_I_IMPL), emitActualTypeSize(TYP_I_IMPL), targetAddrReg,
callThroughIndirReg);
}
else
{
// Register where we save call address in should not be overridden by epilog.
assert((targetAddrReg & (RBM_INT_CALLEE_TRASH & ~RBM_LR)) == targetAddrReg);
}
// We have now generated code loading the target address from the indirection cell into `targetAddrReg`.
// We just need to emit "bl targetAddrReg" in this case.
//
assert(genIsValidIntReg(targetAddrReg));
// clang-format off
genEmitCall(emitter::EC_INDIR_R,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
nullptr, // addr
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
di,
targetAddrReg,
call->IsFastTailCall());
// clang-format on
}
else
{
// Generate a direct call to a non-virtual user defined or helper method
assert(call->gtCallType == CT_HELPER || call->gtCallType == CT_USER_FUNC);
void* addr = nullptr;
#ifdef FEATURE_READYTORUN
if (call->gtEntryPoint.addr != NULL)
{
assert(call->gtEntryPoint.accessType == IAT_VALUE);
addr = call->gtEntryPoint.addr;
}
else
#endif // FEATURE_READYTORUN
if (call->gtCallType == CT_HELPER)
{
CorInfoHelpFunc helperNum = compiler->eeGetHelperNum(methHnd);
noway_assert(helperNum != CORINFO_HELP_UNDEF);
void* pAddr = nullptr;
addr = compiler->compGetHelperFtn(helperNum, (void**)&pAddr);
assert(pAddr == nullptr);
}
else
{
// Direct call to a non-virtual user function.
addr = call->gtDirectCallAddress;
}
assert(addr != nullptr);
// Non-virtual direct call to known addresses
#ifdef TARGET_ARM
if (!validImmForBL((ssize_t)addr))
{
regNumber tmpReg = call->GetSingleTempReg();
instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, tmpReg, (ssize_t)addr);
// clang-format off
genEmitCall(emitter::EC_INDIR_R,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
NULL,
retSize,
di,
tmpReg,
call->IsFastTailCall());
// clang-format on
}
else
#endif // TARGET_ARM
{
// clang-format off
genEmitCall(emitter::EC_FUNC_TOKEN,
methHnd,
INDEBUG_LDISASM_COMMA(sigInfo)
addr,
retSize
MULTIREG_HAS_SECOND_GC_RET_ONLY_ARG(secondRetSize),
di,
REG_NA,
call->IsFastTailCall());
// clang-format on
}
}
}
}
// Produce code for a GT_JMP node.
// The arguments of the caller needs to be transferred to the callee before exiting caller.
// The actual jump to callee is generated as part of caller epilog sequence.
// Therefore the codegen of GT_JMP is to ensure that the callee arguments are correctly setup.
void CodeGen::genJmpMethod(GenTree* jmp)
{
assert(jmp->OperGet() == GT_JMP);
assert(compiler->compJmpOpUsed);
// If no arguments, nothing to do
if (compiler->info.compArgsCount == 0)
{
return;
}
// Make sure register arguments are in their initial registers
// and stack arguments are put back as well.
unsigned varNum;
LclVarDsc* varDsc;
// First move any en-registered stack arguments back to the stack.
// At the same time any reg arg not in correct reg is moved back to its stack location.
//
// We are not strictly required to spill reg args that are not in the desired reg for a jmp call
// But that would require us to deal with circularity while moving values around. Spilling
// to stack makes the implementation simple, which is not a bad trade off given Jmp calls
// are not frequent.
for (varNum = 0; varNum < compiler->info.compArgsCount; varNum++)
{
varDsc = compiler->lvaGetDesc(varNum);
if (varDsc->lvPromoted)
{
noway_assert(varDsc->lvFieldCnt == 1); // We only handle one field here
unsigned fieldVarNum = varDsc->lvFieldLclStart;
varDsc = compiler->lvaGetDesc(fieldVarNum);
}
noway_assert(varDsc->lvIsParam);
if (varDsc->lvIsRegArg && (varDsc->GetRegNum() != REG_STK))
{
// Skip reg args which are already in its right register for jmp call.
// If not, we will spill such args to their stack locations.
//
// If we need to generate a tail call profiler hook, then spill all
// arg regs to free them up for the callback.
if (!compiler->compIsProfilerHookNeeded() && (varDsc->GetRegNum() == varDsc->GetArgReg()))
continue;
}
else if (varDsc->GetRegNum() == REG_STK)
{
// Skip args which are currently living in stack.
continue;
}
// If we came here it means either a reg argument not in the right register or
// a stack argument currently living in a register. In either case the following
// assert should hold.
assert(varDsc->GetRegNum() != REG_STK);
assert(varDsc->IsEnregisterableLcl());
var_types storeType = varDsc->GetStackSlotHomeType();
emitAttr storeSize = emitActualTypeSize(storeType);
#ifdef TARGET_ARM
if (varDsc->TypeGet() == TYP_LONG)
{
// long - at least the low half must be enregistered
GetEmitter()->emitIns_S_R(INS_str, EA_4BYTE, varDsc->GetRegNum(), varNum, 0);
// Is the upper half also enregistered?
if (varDsc->GetOtherReg() != REG_STK)
{
GetEmitter()->emitIns_S_R(INS_str, EA_4BYTE, varDsc->GetOtherReg(), varNum, sizeof(int));
}
}
else
#endif // TARGET_ARM
{
GetEmitter()->emitIns_S_R(ins_Store(storeType), storeSize, varDsc->GetRegNum(), varNum, 0);
}
// Update lvRegNum life and GC info to indicate lvRegNum is dead and varDsc stack slot is going live.
// Note that we cannot modify varDsc->GetRegNum() here because another basic block may not be expecting it.
// Therefore manually update life of varDsc->GetRegNum().
regMaskTP tempMask = genRegMask(varDsc->GetRegNum());
regSet.RemoveMaskVars(tempMask);
gcInfo.gcMarkRegSetNpt(tempMask);
if (compiler->lvaIsGCTracked(varDsc))
{
VarSetOps::AddElemD(compiler, gcInfo.gcVarPtrSetCur, varNum);
}
}
#ifdef PROFILING_SUPPORTED
// At this point all arg regs are free.
// Emit tail call profiler callback.
genProfilingLeaveCallback(CORINFO_HELP_PROF_FCN_TAILCALL);
#endif
// Next move any un-enregistered register arguments back to their register.
regMaskTP fixedIntArgMask = RBM_NONE; // tracks the int arg regs occupying fixed args in case of a vararg method.
unsigned firstArgVarNum = BAD_VAR_NUM; // varNum of the first argument in case of a vararg method.
for (varNum = 0; varNum < compiler->info.compArgsCount; varNum++)
{
varDsc = compiler->lvaGetDesc(varNum);
if (varDsc->lvPromoted)
{
noway_assert(varDsc->lvFieldCnt == 1); // We only handle one field here
unsigned fieldVarNum = varDsc->lvFieldLclStart;
varDsc = compiler->lvaGetDesc(fieldVarNum);
}
noway_assert(varDsc->lvIsParam);
// Skip if arg not passed in a register.
if (!varDsc->lvIsRegArg)
continue;
// Register argument
noway_assert(isRegParamType(genActualType(varDsc->TypeGet())));
// Is register argument already in the right register?
// If not load it from its stack location.
regNumber argReg = varDsc->GetArgReg(); // incoming arg register
regNumber argRegNext = REG_NA;
#ifdef TARGET_ARM64
if (varDsc->GetRegNum() != argReg)
{
var_types loadType = TYP_UNDEF;
if (varDsc->lvIsHfaRegArg())
{
// Note that for HFA, the argument is currently marked address exposed so lvRegNum will always be
// REG_STK. We home the incoming HFA argument registers in the prolog. Then we'll load them back
// here, whether they are already in the correct registers or not. This is such a corner case that
// it is not worth optimizing it.
assert(!compiler->info.compIsVarArgs);
loadType = varDsc->GetHfaType();
regNumber fieldReg = argReg;
emitAttr loadSize = emitActualTypeSize(loadType);
unsigned cSlots = varDsc->lvHfaSlots();
for (unsigned ofs = 0, cSlot = 0; cSlot < cSlots; cSlot++, ofs += (unsigned)loadSize)
{
GetEmitter()->emitIns_R_S(ins_Load(loadType), loadSize, fieldReg, varNum, ofs);
assert(genIsValidFloatReg(fieldReg)); // No GC register tracking for floating point registers.
fieldReg = regNextOfType(fieldReg, loadType);
}
}
else
{
if (varTypeIsStruct(varDsc))
{
// Must be <= 16 bytes or else it wouldn't be passed in registers, except for HFA,
// which can be bigger (and is handled above).
noway_assert(EA_SIZE_IN_BYTES(varDsc->lvSize()) <= 16);
loadType = varDsc->GetLayout()->GetGCPtrType(0);
}
else
{
loadType = compiler->mangleVarArgsType(genActualType(varDsc->TypeGet()));
}
emitAttr loadSize = emitActualTypeSize(loadType);
GetEmitter()->emitIns_R_S(ins_Load(loadType), loadSize, argReg, varNum, 0);
// Update argReg life and GC Info to indicate varDsc stack slot is dead and argReg is going live.
// Note that we cannot modify varDsc->GetRegNum() here because another basic block may not be
// expecting it. Therefore manually update life of argReg. Note that GT_JMP marks the end of
// the basic block and after which reg life and gc info will be recomputed for the new block
// in genCodeForBBList().
regSet.AddMaskVars(genRegMask(argReg));
gcInfo.gcMarkRegPtrVal(argReg, loadType);
if (compiler->lvaIsMultiregStruct(varDsc, compiler->info.compIsVarArgs))
{
// Restore the second register.
argRegNext = genRegArgNext(argReg);
loadType = varDsc->GetLayout()->GetGCPtrType(1);
loadSize = emitActualTypeSize(loadType);
GetEmitter()->emitIns_R_S(ins_Load(loadType), loadSize, argRegNext, varNum, TARGET_POINTER_SIZE);
regSet.AddMaskVars(genRegMask(argRegNext));
gcInfo.gcMarkRegPtrVal(argRegNext, loadType);
}
if (compiler->lvaIsGCTracked(varDsc))
{
VarSetOps::RemoveElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
}
}
}
if (compiler->info.compIsVarArgs)
{
// In case of a jmp call to a vararg method ensure only integer registers are passed.
assert((genRegMask(argReg) & (RBM_ARG_REGS | RBM_ARG_RET_BUFF)) != RBM_NONE);
assert(!varDsc->lvIsHfaRegArg());
fixedIntArgMask |= genRegMask(argReg);
if (compiler->lvaIsMultiregStruct(varDsc, compiler->info.compIsVarArgs))
{
assert(argRegNext != REG_NA);
fixedIntArgMask |= genRegMask(argRegNext);
}
if (argReg == REG_ARG_0)
{
assert(firstArgVarNum == BAD_VAR_NUM);
firstArgVarNum = varNum;
}
}
#else // !TARGET_ARM64
bool twoParts = false;
var_types loadType = TYP_UNDEF;
if (varDsc->TypeGet() == TYP_LONG)
{
twoParts = true;
}
else if (varDsc->TypeGet() == TYP_DOUBLE)
{
if (compiler->info.compIsVarArgs || compiler->opts.compUseSoftFP)
{
twoParts = true;
}
}
if (twoParts)
{
argRegNext = genRegArgNext(argReg);
if (varDsc->GetRegNum() != argReg)
{
GetEmitter()->emitIns_R_S(INS_ldr, EA_PTRSIZE, argReg, varNum, 0);
GetEmitter()->emitIns_R_S(INS_ldr, EA_PTRSIZE, argRegNext, varNum, REGSIZE_BYTES);
}
if (compiler->info.compIsVarArgs)
{
fixedIntArgMask |= genRegMask(argReg);
fixedIntArgMask |= genRegMask(argRegNext);
}
}
else if (varDsc->lvIsHfaRegArg())
{
loadType = varDsc->GetHfaType();
regNumber fieldReg = argReg;
emitAttr loadSize = emitActualTypeSize(loadType);
unsigned maxSize = min(varDsc->lvSize(), (LAST_FP_ARGREG + 1 - argReg) * REGSIZE_BYTES);
for (unsigned ofs = 0; ofs < maxSize; ofs += (unsigned)loadSize)
{
if (varDsc->GetRegNum() != argReg)
{
GetEmitter()->emitIns_R_S(ins_Load(loadType), loadSize, fieldReg, varNum, ofs);
}
assert(genIsValidFloatReg(fieldReg)); // we don't use register tracking for FP
fieldReg = regNextOfType(fieldReg, loadType);
}
}
else if (varTypeIsStruct(varDsc))
{
regNumber slotReg = argReg;
unsigned maxSize = min(varDsc->lvSize(), (REG_ARG_LAST + 1 - argReg) * REGSIZE_BYTES);
for (unsigned ofs = 0; ofs < maxSize; ofs += REGSIZE_BYTES)
{
unsigned idx = ofs / REGSIZE_BYTES;
loadType = varDsc->GetLayout()->GetGCPtrType(idx);
if (varDsc->GetRegNum() != argReg)
{
emitAttr loadSize = emitActualTypeSize(loadType);
GetEmitter()->emitIns_R_S(ins_Load(loadType), loadSize, slotReg, varNum, ofs);
}
regSet.AddMaskVars(genRegMask(slotReg));
gcInfo.gcMarkRegPtrVal(slotReg, loadType);
if (genIsValidIntReg(slotReg) && compiler->info.compIsVarArgs)
{
fixedIntArgMask |= genRegMask(slotReg);
}
slotReg = genRegArgNext(slotReg);
}
}
else
{
loadType = compiler->mangleVarArgsType(genActualType(varDsc->TypeGet()));
if (varDsc->GetRegNum() != argReg)
{
GetEmitter()->emitIns_R_S(ins_Load(loadType), emitTypeSize(loadType), argReg, varNum, 0);
}
regSet.AddMaskVars(genRegMask(argReg));
gcInfo.gcMarkRegPtrVal(argReg, loadType);
if (genIsValidIntReg(argReg) && compiler->info.compIsVarArgs)
{
fixedIntArgMask |= genRegMask(argReg);
}
}
if (compiler->lvaIsGCTracked(varDsc))
{
VarSetOps::RemoveElemD(compiler, gcInfo.gcVarPtrSetCur, varDsc->lvVarIndex);
}
#endif // !TARGET_ARM64
}
// Jmp call to a vararg method - if the method has fewer than fixed arguments that can be max size of reg,
// load the remaining integer arg registers from the corresponding
// shadow stack slots. This is for the reason that we don't know the number and type
// of non-fixed params passed by the caller, therefore we have to assume the worst case
// of caller passing all integer arg regs that can be max size of reg.
//
// The caller could have passed gc-ref/byref type var args. Since these are var args
// the callee no way of knowing their gc-ness. Therefore, mark the region that loads
// remaining arg registers from shadow stack slots as non-gc interruptible.
if (fixedIntArgMask != RBM_NONE)
{
assert(compiler->info.compIsVarArgs);
assert(firstArgVarNum != BAD_VAR_NUM);
regMaskTP remainingIntArgMask = RBM_ARG_REGS & ~fixedIntArgMask;
if (remainingIntArgMask != RBM_NONE)
{
GetEmitter()->emitDisableGC();
for (int argNum = 0, argOffset = 0; argNum < MAX_REG_ARG; ++argNum)
{
regNumber argReg = intArgRegs[argNum];
regMaskTP argRegMask = genRegMask(argReg);
if ((remainingIntArgMask & argRegMask) != 0)
{
remainingIntArgMask &= ~argRegMask;
GetEmitter()->emitIns_R_S(INS_ldr, EA_PTRSIZE, argReg, firstArgVarNum, argOffset);
}
argOffset += REGSIZE_BYTES;
}
GetEmitter()->emitEnableGC();
}
}
}
//------------------------------------------------------------------------
// genIntCastOverflowCheck: Generate overflow checking code for an integer cast.
//
// Arguments:
// cast - The GT_CAST node
// desc - The cast description
// reg - The register containing the value to check
//
void CodeGen::genIntCastOverflowCheck(GenTreeCast* cast, const GenIntCastDesc& desc, regNumber reg)
{
switch (desc.CheckKind())
{
case GenIntCastDesc::CHECK_POSITIVE:
GetEmitter()->emitIns_R_I(INS_cmp, EA_ATTR(desc.CheckSrcSize()), reg, 0);
genJumpToThrowHlpBlk(EJ_lt, SCK_OVERFLOW);
break;
#ifdef TARGET_64BIT
case GenIntCastDesc::CHECK_UINT_RANGE:
// We need to check if the value is not greater than 0xFFFFFFFF but this value
// cannot be encoded in the immediate operand of CMP. Use TST instead to check
// if the upper 32 bits are zero.
GetEmitter()->emitIns_R_I(INS_tst, EA_8BYTE, reg, 0xFFFFFFFF00000000LL);
genJumpToThrowHlpBlk(EJ_ne, SCK_OVERFLOW);
break;
case GenIntCastDesc::CHECK_POSITIVE_INT_RANGE:
// We need to check if the value is not greater than 0x7FFFFFFF but this value
// cannot be encoded in the immediate operand of CMP. Use TST instead to check
// if the upper 33 bits are zero.
GetEmitter()->emitIns_R_I(INS_tst, EA_8BYTE, reg, 0xFFFFFFFF80000000LL);
genJumpToThrowHlpBlk(EJ_ne, SCK_OVERFLOW);
break;
case GenIntCastDesc::CHECK_INT_RANGE:
{
const regNumber tempReg = cast->GetSingleTempReg();
assert(tempReg != reg);
instGen_Set_Reg_To_Imm(EA_8BYTE, tempReg, INT32_MAX);
GetEmitter()->emitIns_R_R(INS_cmp, EA_8BYTE, reg, tempReg);
genJumpToThrowHlpBlk(EJ_gt, SCK_OVERFLOW);
instGen_Set_Reg_To_Imm(EA_8BYTE, tempReg, INT32_MIN);
GetEmitter()->emitIns_R_R(INS_cmp, EA_8BYTE, reg, tempReg);
genJumpToThrowHlpBlk(EJ_lt, SCK_OVERFLOW);
}
break;
#endif
default:
{
assert(desc.CheckKind() == GenIntCastDesc::CHECK_SMALL_INT_RANGE);
const int castMaxValue = desc.CheckSmallIntMax();
const int castMinValue = desc.CheckSmallIntMin();
// Values greater than 255 cannot be encoded in the immediate operand of CMP.
// Replace (x > max) with (x >= max + 1) where max + 1 (a power of 2) can be
// encoded. We could do this for all max values but on ARM32 "cmp r0, 255"
// is better than "cmp r0, 256" because it has a shorter encoding.
if (castMaxValue > 255)
{
assert((castMaxValue == 32767) || (castMaxValue == 65535));
GetEmitter()->emitIns_R_I(INS_cmp, EA_SIZE(desc.CheckSrcSize()), reg, castMaxValue + 1);
genJumpToThrowHlpBlk((castMinValue == 0) ? EJ_hs : EJ_ge, SCK_OVERFLOW);
}
else
{
GetEmitter()->emitIns_R_I(INS_cmp, EA_SIZE(desc.CheckSrcSize()), reg, castMaxValue);
genJumpToThrowHlpBlk((castMinValue == 0) ? EJ_hi : EJ_gt, SCK_OVERFLOW);
}
if (castMinValue != 0)
{
GetEmitter()->emitIns_R_I(INS_cmp, EA_SIZE(desc.CheckSrcSize()), reg, castMinValue);
genJumpToThrowHlpBlk(EJ_lt, SCK_OVERFLOW);
}
}
break;
}
}
//------------------------------------------------------------------------
// genIntToIntCast: Generate code for an integer cast, with or without overflow check.
//
// Arguments:
// cast - The GT_CAST node
//
// Assumptions:
// The cast node is not a contained node and must have an assigned register.
// Neither the source nor target type can be a floating point type.
//
// TODO-ARM64-CQ: Allow castOp to be a contained node without an assigned register.
//
void CodeGen::genIntToIntCast(GenTreeCast* cast)
{
genConsumeRegs(cast->gtGetOp1());
const regNumber srcReg = cast->gtGetOp1()->GetRegNum();
const regNumber dstReg = cast->GetRegNum();
assert(genIsValidIntReg(srcReg));
assert(genIsValidIntReg(dstReg));
GenIntCastDesc desc(cast);
if (desc.CheckKind() != GenIntCastDesc::CHECK_NONE)
{
genIntCastOverflowCheck(cast, desc, srcReg);
}
if ((desc.ExtendKind() != GenIntCastDesc::COPY) || (srcReg != dstReg))
{
instruction ins;
unsigned insSize;
switch (desc.ExtendKind())
{
case GenIntCastDesc::ZERO_EXTEND_SMALL_INT:
ins = (desc.ExtendSrcSize() == 1) ? INS_uxtb : INS_uxth;
insSize = 4;
break;
case GenIntCastDesc::SIGN_EXTEND_SMALL_INT:
ins = (desc.ExtendSrcSize() == 1) ? INS_sxtb : INS_sxth;
insSize = 4;
break;
#ifdef TARGET_64BIT
case GenIntCastDesc::ZERO_EXTEND_INT:
ins = INS_mov;
insSize = 4;
break;
case GenIntCastDesc::SIGN_EXTEND_INT:
ins = INS_sxtw;
insSize = 8;
break;
#endif
default:
assert(desc.ExtendKind() == GenIntCastDesc::COPY);
ins = INS_mov;
insSize = desc.ExtendSrcSize();
break;
}
GetEmitter()->emitIns_Mov(ins, EA_ATTR(insSize), dstReg, srcReg, /* canSkip */ false);
}
genProduceReg(cast);
}
//------------------------------------------------------------------------
// genFloatToFloatCast: Generate code for a cast between float and double
//
// Arguments:
// treeNode - The GT_CAST node
//
// Return Value:
// None.
//
// Assumptions:
// Cast is a non-overflow conversion.
// The treeNode must have an assigned register.
// The cast is between float and double.
//
void CodeGen::genFloatToFloatCast(GenTree* treeNode)
{
// float <--> double conversions are always non-overflow ones
assert(treeNode->OperGet() == GT_CAST);
assert(!treeNode->gtOverflow());
regNumber targetReg = treeNode->GetRegNum();
assert(genIsValidFloatReg(targetReg));
GenTree* op1 = treeNode->AsOp()->gtOp1;
assert(!op1->isContained()); // Cannot be contained
assert(genIsValidFloatReg(op1->GetRegNum())); // Must be a valid float reg.
var_types dstType = treeNode->CastToType();
var_types srcType = op1->TypeGet();
assert(varTypeIsFloating(srcType) && varTypeIsFloating(dstType));
genConsumeOperands(treeNode->AsOp());
// treeNode must be a reg
assert(!treeNode->isContained());
#if defined(TARGET_ARM)
if (srcType != dstType)
{
instruction insVcvt = (srcType == TYP_FLOAT) ? INS_vcvt_f2d // convert Float to Double
: INS_vcvt_d2f; // convert Double to Float
GetEmitter()->emitIns_R_R(insVcvt, emitTypeSize(treeNode), treeNode->GetRegNum(), op1->GetRegNum());
}
else
{
GetEmitter()->emitIns_Mov(INS_vmov, emitTypeSize(treeNode), treeNode->GetRegNum(), op1->GetRegNum(),
/* canSkip */ true);
}
#elif defined(TARGET_ARM64)
if (srcType != dstType)
{
insOpts cvtOption = (srcType == TYP_FLOAT) ? INS_OPTS_S_TO_D // convert Single to Double
: INS_OPTS_D_TO_S; // convert Double to Single
GetEmitter()->emitIns_R_R(INS_fcvt, emitActualTypeSize(treeNode), treeNode->GetRegNum(), op1->GetRegNum(),
cvtOption);
}
else
{
// If double to double cast or float to float cast. Emit a move instruction.
GetEmitter()->emitIns_Mov(INS_mov, emitActualTypeSize(treeNode), treeNode->GetRegNum(), op1->GetRegNum(),
/* canSkip */ true);
}
#endif // TARGET*
genProduceReg(treeNode);
}
//------------------------------------------------------------------------
// genCreateAndStoreGCInfo: Create and record GC Info for the function.
//
void CodeGen::genCreateAndStoreGCInfo(unsigned codeSize,
unsigned prologSize,
unsigned epilogSize DEBUGARG(void* codePtr))
{
IAllocator* allowZeroAlloc = new (compiler, CMK_GC) CompIAllocator(compiler->getAllocatorGC());
GcInfoEncoder* gcInfoEncoder = new (compiler, CMK_GC)
GcInfoEncoder(compiler->info.compCompHnd, compiler->info.compMethodInfo, allowZeroAlloc, NOMEM);
assert(gcInfoEncoder != nullptr);
// Follow the code pattern of the x86 gc info encoder (genCreateAndStoreGCInfoJIT32).
gcInfo.gcInfoBlockHdrSave(gcInfoEncoder, codeSize, prologSize);
// We keep the call count for the second call to gcMakeRegPtrTable() below.
unsigned callCnt = 0;
// First we figure out the encoder ID's for the stack slots and registers.
gcInfo.gcMakeRegPtrTable(gcInfoEncoder, codeSize, prologSize, GCInfo::MAKE_REG_PTR_MODE_ASSIGN_SLOTS, &callCnt);
// Now we've requested all the slots we'll need; "finalize" these (make more compact data structures for them).
gcInfoEncoder->FinalizeSlotIds();
// Now we can actually use those slot ID's to declare live ranges.
gcInfo.gcMakeRegPtrTable(gcInfoEncoder, codeSize, prologSize, GCInfo::MAKE_REG_PTR_MODE_DO_WORK, &callCnt);
#ifdef TARGET_ARM64
if (compiler->opts.compDbgEnC)
{
// what we have to preserve is called the "frame header" (see comments in VM\eetwain.cpp)
// which is:
// -return address
// -saved off RBP
// -saved 'this' pointer and bool for synchronized methods
// 4 slots for RBP + return address + RSI + RDI
int preservedAreaSize = 4 * REGSIZE_BYTES;
if (compiler->info.compFlags & CORINFO_FLG_SYNCH)
{
if (!(compiler->info.compFlags & CORINFO_FLG_STATIC))
preservedAreaSize += REGSIZE_BYTES;
preservedAreaSize += 1; // bool for synchronized methods
}
// Used to signal both that the method is compiled for EnC, and also the size of the block at the top of the
// frame
gcInfoEncoder->SetSizeOfEditAndContinuePreservedArea(preservedAreaSize);
}
#endif // TARGET_ARM64
if (compiler->opts.IsReversePInvoke())
{
unsigned reversePInvokeFrameVarNumber = compiler->lvaReversePInvokeFrameVar;
assert(reversePInvokeFrameVarNumber != BAD_VAR_NUM);
const LclVarDsc* reversePInvokeFrameVar = compiler->lvaGetDesc(reversePInvokeFrameVarNumber);
gcInfoEncoder->SetReversePInvokeFrameSlot(reversePInvokeFrameVar->GetStackOffset());
}
gcInfoEncoder->Build();
// GC Encoder automatically puts the GC info in the right spot using ICorJitInfo::allocGCInfo(size_t)
// let's save the values anyway for debugging purposes
compiler->compInfoBlkAddr = gcInfoEncoder->Emit();
compiler->compInfoBlkSize = 0; // not exposed by the GCEncoder interface
}
// clang-format off
const CodeGen::GenConditionDesc CodeGen::GenConditionDesc::map[32]
{
{ }, // NONE
{ }, // 1
{ EJ_lt }, // SLT
{ EJ_le }, // SLE
{ EJ_ge }, // SGE
{ EJ_gt }, // SGT
{ EJ_mi }, // S
{ EJ_pl }, // NS
{ EJ_eq }, // EQ
{ EJ_ne }, // NE
{ EJ_lo }, // ULT
{ EJ_ls }, // ULE
{ EJ_hs }, // UGE
{ EJ_hi }, // UGT
{ EJ_hs }, // C
{ EJ_lo }, // NC
{ EJ_eq }, // FEQ
{ EJ_gt, GT_AND, EJ_lo }, // FNE
{ EJ_lo }, // FLT
{ EJ_ls }, // FLE
{ EJ_ge }, // FGE
{ EJ_gt }, // FGT
{ EJ_vs }, // O
{ EJ_vc }, // NO
{ EJ_eq, GT_OR, EJ_vs }, // FEQU
{ EJ_ne }, // FNEU
{ EJ_lt }, // FLTU
{ EJ_le }, // FLEU
{ EJ_hs }, // FGEU
{ EJ_hi }, // FGTU
{ }, // P
{ }, // NP
};
// clang-format on
//------------------------------------------------------------------------
// inst_SETCC: Generate code to set a register to 0 or 1 based on a condition.
//
// Arguments:
// condition - The condition
// type - The type of the value to be produced
// dstReg - The destination register to be set to 1 or 0
//
void CodeGen::inst_SETCC(GenCondition condition, var_types type, regNumber dstReg)
{
assert(varTypeIsIntegral(type));
assert(genIsValidIntReg(dstReg));
#ifdef TARGET_ARM64
const GenConditionDesc& desc = GenConditionDesc::Get(condition);
inst_SET(desc.jumpKind1, dstReg);
if (desc.oper != GT_NONE)
{
BasicBlock* labelNext = genCreateTempLabel();
inst_JMP((desc.oper == GT_OR) ? desc.jumpKind1 : emitter::emitReverseJumpKind(desc.jumpKind1), labelNext);
inst_SET(desc.jumpKind2, dstReg);
genDefineTempLabel(labelNext);
}
#else
// Emit code like that:
// ...
// bgt True
// movs rD, #0
// b Next
// True:
// movs rD, #1
// Next:
// ...
BasicBlock* labelTrue = genCreateTempLabel();
inst_JCC(condition, labelTrue);
GetEmitter()->emitIns_R_I(INS_mov, emitActualTypeSize(type), dstReg, 0);
BasicBlock* labelNext = genCreateTempLabel();
GetEmitter()->emitIns_J(INS_b, labelNext);
genDefineTempLabel(labelTrue);
GetEmitter()->emitIns_R_I(INS_mov, emitActualTypeSize(type), dstReg, 1);
genDefineTempLabel(labelNext);
#endif
}
//------------------------------------------------------------------------
// genCodeForStoreBlk: Produce code for a GT_STORE_OBJ/GT_STORE_DYN_BLK/GT_STORE_BLK node.
//
// Arguments:
// tree - the node
//
void CodeGen::genCodeForStoreBlk(GenTreeBlk* blkOp)
{
assert(blkOp->OperIs(GT_STORE_OBJ, GT_STORE_DYN_BLK, GT_STORE_BLK));
if (blkOp->OperIs(GT_STORE_OBJ))
{
assert(!blkOp->gtBlkOpGcUnsafe);
assert(blkOp->OperIsCopyBlkOp());
assert(blkOp->AsObj()->GetLayout()->HasGCPtr());
genCodeForCpObj(blkOp->AsObj());
return;
}
bool isCopyBlk = blkOp->OperIsCopyBlkOp();
switch (blkOp->gtBlkOpKind)
{
case GenTreeBlk::BlkOpKindHelper:
assert(!blkOp->gtBlkOpGcUnsafe);
if (isCopyBlk)
{
genCodeForCpBlkHelper(blkOp);
}
else
{
genCodeForInitBlkHelper(blkOp);
}
break;
case GenTreeBlk::BlkOpKindUnroll:
if (isCopyBlk)
{
if (blkOp->gtBlkOpGcUnsafe)
{
GetEmitter()->emitDisableGC();
}
genCodeForCpBlkUnroll(blkOp);
if (blkOp->gtBlkOpGcUnsafe)
{
GetEmitter()->emitEnableGC();
}
}
else
{
assert(!blkOp->gtBlkOpGcUnsafe);
genCodeForInitBlkUnroll(blkOp);
}
break;
default:
unreached();
}
}
//------------------------------------------------------------------------
// genScaledAdd: A helper for genLeaInstruction.
//
void CodeGen::genScaledAdd(emitAttr attr, regNumber targetReg, regNumber baseReg, regNumber indexReg, int scale)
{
emitter* emit = GetEmitter();
if (scale == 0)
{
// target = base + index
GetEmitter()->emitIns_R_R_R(INS_add, attr, targetReg, baseReg, indexReg);
}
else
{
// target = base + index<<scale
#if defined(TARGET_ARM)
emit->emitIns_R_R_R_I(INS_add, attr, targetReg, baseReg, indexReg, scale, INS_FLAGS_DONT_CARE, INS_OPTS_LSL);
#elif defined(TARGET_ARM64)
emit->emitIns_R_R_R_I(INS_add, attr, targetReg, baseReg, indexReg, scale, INS_OPTS_LSL);
#endif
}
}
//------------------------------------------------------------------------
// genCodeForMulLong: Generates code for int*int->long multiplication.
//
// Arguments:
// mul - the GT_MUL_LONG node
//
// Return Value:
// None.
//
void CodeGen::genCodeForMulLong(GenTreeOp* mul)
{
assert(mul->OperIs(GT_MUL_LONG));
genConsumeOperands(mul);
regNumber srcReg1 = mul->gtGetOp1()->GetRegNum();
regNumber srcReg2 = mul->gtGetOp2()->GetRegNum();
instruction ins = mul->IsUnsigned() ? INS_umull : INS_smull;
#ifdef TARGET_ARM
GetEmitter()->emitIns_R_R_R_R(ins, EA_4BYTE, mul->GetRegNum(), mul->AsMultiRegOp()->gtOtherReg, srcReg1, srcReg2);
#else
GetEmitter()->emitIns_R_R_R(ins, EA_4BYTE, mul->GetRegNum(), srcReg1, srcReg2);
#endif
genProduceReg(mul);
}
//------------------------------------------------------------------------
// genLeaInstruction: Produce code for a GT_LEA node.
//
// Arguments:
// lea - the node
//
void CodeGen::genLeaInstruction(GenTreeAddrMode* lea)
{
genConsumeOperands(lea);
emitter* emit = GetEmitter();
emitAttr size = emitTypeSize(lea);
int offset = lea->Offset();
// In ARM we can only load addresses of the form:
//
// [Base + index*scale]
// [Base + Offset]
// [Literal] (PC-Relative)
//
// So for the case of a LEA node of the form [Base + Index*Scale + Offset] we will generate:
// destReg = baseReg + indexReg * scale;
// destReg = destReg + offset;
//
// TODO-ARM64-CQ: The purpose of the GT_LEA node is to directly reflect a single target architecture
// addressing mode instruction. Currently we're 'cheating' by producing one or more
// instructions to generate the addressing mode so we need to modify lowering to
// produce LEAs that are a 1:1 relationship to the ARM64 architecture.
if (lea->Base() && lea->Index())
{
GenTree* memBase = lea->Base();
GenTree* index = lea->Index();
DWORD scale;
assert(isPow2(lea->gtScale));
BitScanForward(&scale, lea->gtScale);
assert(scale <= 4);
if (offset != 0)
{
regNumber tmpReg = lea->GetSingleTempReg();
// When generating fully interruptible code we have to use the "large offset" sequence
// when calculating a EA_BYREF as we can't report a byref that points outside of the object
//
bool useLargeOffsetSeq = compiler->GetInterruptible() && (size == EA_BYREF);
if (!useLargeOffsetSeq && emitter::emitIns_valid_imm_for_add(offset))
{
// Generate code to set tmpReg = base + index*scale
genScaledAdd(size, tmpReg, memBase->GetRegNum(), index->GetRegNum(), scale);
// Then compute target reg from [tmpReg + offset]
emit->emitIns_R_R_I(INS_add, size, lea->GetRegNum(), tmpReg, offset);
}
else // large offset sequence
{
noway_assert(tmpReg != index->GetRegNum());
noway_assert(tmpReg != memBase->GetRegNum());
// First load/store tmpReg with the offset constant
// rTmp = imm
instGen_Set_Reg_To_Imm(EA_PTRSIZE, tmpReg, offset);
// Then add the scaled index register
// rTmp = rTmp + index*scale
genScaledAdd(EA_PTRSIZE, tmpReg, tmpReg, index->GetRegNum(), scale);
// Then compute target reg from [base + tmpReg ]
// rDst = base + rTmp
emit->emitIns_R_R_R(INS_add, size, lea->GetRegNum(), memBase->GetRegNum(), tmpReg);
}
}
else
{
// Then compute target reg from [base + index*scale]
genScaledAdd(size, lea->GetRegNum(), memBase->GetRegNum(), index->GetRegNum(), scale);
}
}
else if (lea->Base())
{
GenTree* memBase = lea->Base();
if (emitter::emitIns_valid_imm_for_add(offset))
{
if (offset != 0)
{
// Then compute target reg from [memBase + offset]
emit->emitIns_R_R_I(INS_add, size, lea->GetRegNum(), memBase->GetRegNum(), offset);
}
else // offset is zero
{
emit->emitIns_Mov(INS_mov, size, lea->GetRegNum(), memBase->GetRegNum(), /* canSkip */ true);
}
}
else
{
// We require a tmpReg to hold the offset
regNumber tmpReg = lea->GetSingleTempReg();
// First load tmpReg with the large offset constant
instGen_Set_Reg_To_Imm(EA_PTRSIZE, tmpReg, offset);
// Then compute target reg from [memBase + tmpReg]
emit->emitIns_R_R_R(INS_add, size, lea->GetRegNum(), memBase->GetRegNum(), tmpReg);
}
}
else if (lea->Index())
{
// If we encounter a GT_LEA node without a base it means it came out
// when attempting to optimize an arbitrary arithmetic expression during lower.
// This is currently disabled in ARM64 since we need to adjust lower to account
// for the simpler instructions ARM64 supports.
// TODO-ARM64-CQ: Fix this and let LEA optimize arithmetic trees too.
assert(!"We shouldn't see a baseless address computation during CodeGen for ARM64");
}
genProduceReg(lea);
}
#ifdef FEATURE_SIMD
//------------------------------------------------------------------------
// genSIMDSplitReturn: Generates code for returning a fixed-size SIMD type that lives
// in a single register, but is returned in multiple registers.
//
// Arguments:
// src - The source of the return
// retTypeDesc - The return type descriptor.
//
void CodeGen::genSIMDSplitReturn(GenTree* src, ReturnTypeDesc* retTypeDesc)
{
assert(varTypeIsSIMD(src));
assert(src->isUsedFromReg());
regNumber srcReg = src->GetRegNum();
// Treat src register as a homogenous vector with element size equal to the reg size
// Insert pieces in order
unsigned regCount = retTypeDesc->GetReturnRegCount();
for (unsigned i = 0; i < regCount; ++i)
{
var_types type = retTypeDesc->GetReturnRegType(i);
regNumber reg = retTypeDesc->GetABIReturnReg(i);
if (varTypeIsFloating(type))
{
// If the register piece is to be passed in a floating point register
// Use a vector mov element instruction
// reg is not a vector, so it is in the first element reg[0]
// mov reg[0], src[i]
// This effectively moves from `src[i]` to `reg[0]`, upper bits of reg remain unchanged
// For the case where src == reg, since we are only writing reg[0], as long as we iterate
// so that src[0] is consumed before writing reg[0], we do not need a temporary.
GetEmitter()->emitIns_R_R_I_I(INS_mov, emitTypeSize(type), reg, srcReg, 0, i);
}
else
{
// If the register piece is to be passed in an integer register
// Use a vector mov to general purpose register instruction
// mov reg, src[i]
// This effectively moves from `src[i]` to `reg`
GetEmitter()->emitIns_R_R_I(INS_mov, emitTypeSize(type), reg, srcReg, i);
}
}
}
#endif // FEATURE_SIMD
//------------------------------------------------------------------------
// genPushCalleeSavedRegisters: Push any callee-saved registers we have used.
//
// Arguments (arm64):
// initReg - A scratch register (that gets set to zero on some platforms).
// pInitRegZeroed - OUT parameter. *pInitRegZeroed is set to 'true' if this method sets initReg register to zero,
// 'false' if initReg was set to a non-zero value, and left unchanged if initReg was not touched.
//
#if defined(TARGET_ARM64)
void CodeGen::genPushCalleeSavedRegisters(regNumber initReg, bool* pInitRegZeroed)
#else
void CodeGen::genPushCalleeSavedRegisters()
#endif
{
assert(compiler->compGeneratingProlog);
#ifdef TARGET_ARM64
// Probe large frames now, if necessary, since genPushCalleeSavedRegisters() will allocate the frame. Note that
// for arm64, genAllocLclFrame only probes the frame; it does not actually allocate it (it does not change SP).
// For arm64, we are probing the frame before the callee-saved registers are saved. The 'initReg' might have
// been calculated to be one of the callee-saved registers (say, if all the integer argument registers are
// in use, and perhaps with other conditions being satisfied). This is ok in other cases, after the callee-saved
// registers have been saved. So instead of letting genAllocLclFrame use initReg as a temporary register,
// always use REG_SCRATCH. We don't care if it trashes it, so ignore the initRegZeroed output argument.
bool ignoreInitRegZeroed = false;
genAllocLclFrame(compiler->compLclFrameSize, REG_SCRATCH, &ignoreInitRegZeroed,
intRegState.rsCalleeRegArgMaskLiveIn);
#endif
regMaskTP rsPushRegs = regSet.rsGetModifiedRegsMask() & RBM_CALLEE_SAVED;
#if ETW_EBP_FRAMED
if (!isFramePointerUsed() && regSet.rsRegsModified(RBM_FPBASE))
{
noway_assert(!"Used register RBM_FPBASE as a scratch register!");
}
#endif
// On ARM we push the FP (frame-pointer) here along with all other callee saved registers
if (isFramePointerUsed())
rsPushRegs |= RBM_FPBASE;
//
// It may be possible to skip pushing/popping lr for leaf methods. However, such optimization would require
// changes in GC suspension architecture.
//
// We would need to guarantee that a tight loop calling a virtual leaf method can be suspended for GC. Today, we
// generate partially interruptible code for both the method that contains the tight loop with the call and the leaf
// method. GC suspension depends on return address hijacking in this case. Return address hijacking depends
// on the return address to be saved on the stack. If we skipped pushing/popping lr, the return address would never
// be saved on the stack and the GC suspension would time out.
//
// So if we wanted to skip pushing pushing/popping lr for leaf frames, we would also need to do one of
// the following to make GC suspension work in the above scenario:
// - Make return address hijacking work even when lr is not saved on the stack.
// - Generate fully interruptible code for loops that contains calls
// - Generate fully interruptible code for leaf methods
//
// Given the limited benefit from this optimization (<10k for CoreLib NGen image), the extra complexity
// is not worth it.
//
rsPushRegs |= RBM_LR; // We must save the return address (in the LR register)
regSet.rsMaskCalleeSaved = rsPushRegs;
#ifdef DEBUG
if (compiler->compCalleeRegsPushed != genCountBits(rsPushRegs))
{
printf("Error: unexpected number of callee-saved registers to push. Expected: %d. Got: %d ",
compiler->compCalleeRegsPushed, genCountBits(rsPushRegs));
dspRegMask(rsPushRegs);
printf("\n");
assert(compiler->compCalleeRegsPushed == genCountBits(rsPushRegs));
}
#endif // DEBUG
#if defined(TARGET_ARM)
regMaskTP maskPushRegsFloat = rsPushRegs & RBM_ALLFLOAT;
regMaskTP maskPushRegsInt = rsPushRegs & ~maskPushRegsFloat;
maskPushRegsInt |= genStackAllocRegisterMask(compiler->compLclFrameSize, maskPushRegsFloat);
assert(FitsIn<int>(maskPushRegsInt));
inst_IV(INS_push, (int)maskPushRegsInt);
compiler->unwindPushMaskInt(maskPushRegsInt);
if (maskPushRegsFloat != 0)
{
genPushFltRegs(maskPushRegsFloat);
compiler->unwindPushMaskFloat(maskPushRegsFloat);
}
#elif defined(TARGET_ARM64)
// See the document "ARM64 JIT Frame Layout" and/or "ARM64 Exception Data" for more details or requirements and
// options. Case numbers in comments here refer to this document. See also Compiler::lvaAssignFrameOffsets()
// for pictures of the general frame layouts, and CodeGen::genFuncletProlog() implementations (per architecture)
// for pictures of the funclet frame layouts.
//
// For most frames, generate, e.g.:
// stp fp, lr, [sp,-0x80]! // predecrement SP with full frame size, and store FP/LR pair.
// stp r19, r20, [sp, 0x60] // store at positive offset from SP established above, into callee-saved area
// // at top of frame (highest addresses).
// stp r21, r22, [sp, 0x70]
//
// Notes:
// 1. We don't always need to save FP. If FP isn't saved, then LR is saved with the other callee-saved registers
// at the top of the frame.
// 2. If we save FP, then the first store is FP, LR.
// 3. General-purpose registers are 8 bytes, floating-point registers are 16 bytes, but FP/SIMD registers only
// preserve their lower 8 bytes, by calling convention.
// 4. For frames with varargs, we spill the integer register arguments to the stack, so all the arguments are
// consecutive, and at the top of the frame.
// 5. We allocate the frame here; no further changes to SP are allowed (except in the body, for localloc).
//
// For functions with GS and localloc, we change the frame so the frame pointer and LR are saved at the top
// of the frame, just under the varargs registers (if any). Note that the funclet frames must follow the same
// rule, and both main frame and funclet frames (if any) must put PSPSym in the same offset from Caller-SP.
// Since this frame type is relatively rare, we force using it via stress modes, for additional coverage.
//
// The frames look like the following (simplified to only include components that matter for establishing the
// frames). See also Compiler::lvaAssignFrameOffsets().
//
// Frames with FP, LR saved at bottom of frame (above outgoing argument space):
//
// | |
// |-----------------------|
// | incoming arguments |
// +=======================+ <---- Caller's SP
// | Varargs regs space | // Only for varargs functions; 64 bytes
// |-----------------------|
// |Callee saved registers | // not including FP/LR; multiple of 8 bytes
// |-----------------------|
// | PSP slot | // 8 bytes (omitted in CoreRT ABI)
// |-----------------------|
// | locals, temps, etc. |
// |-----------------------|
// | possible GS cookie |
// |-----------------------|
// | Saved LR | // 8 bytes
// |-----------------------|
// | Saved FP | // 8 bytes
// |-----------------------|
// | Outgoing arg space | // multiple of 8 bytes; if required (i.e., #outsz != 0)
// |-----------------------| <---- Ambient SP
// | | |
// ~ | Stack grows ~
// | | downward |
// V
//
// Frames with FP, LR saved at top of frame (below saved varargs incoming arguments):
//
// | |
// |-----------------------|
// | incoming arguments |
// +=======================+ <---- Caller's SP
// | Varargs regs space | // Only for varargs functions; 64 bytes
// |-----------------------|
// | Saved LR | // 8 bytes
// |-----------------------|
// | Saved FP | // 8 bytes
// |-----------------------|
// |Callee saved registers | // not including FP/LR; multiple of 8 bytes
// |-----------------------|
// | PSP slot | // 8 bytes (omitted in CoreRT ABI)
// |-----------------------|
// | locals, temps, etc. |
// |-----------------------|
// | possible GS cookie |
// |-----------------------|
// | Outgoing arg space | // multiple of 8 bytes; if required (i.e., #outsz != 0)
// |-----------------------| <---- Ambient SP
// | | |
// ~ | Stack grows ~
// | | downward |
// V
//
int totalFrameSize = genTotalFrameSize();
int offset; // This will be the starting place for saving the callee-saved registers, in increasing order.
regMaskTP maskSaveRegsFloat = rsPushRegs & RBM_ALLFLOAT;
regMaskTP maskSaveRegsInt = rsPushRegs & ~maskSaveRegsFloat;
#ifdef DEBUG
if (verbose)
{
printf("Save float regs: ");
dspRegMask(maskSaveRegsFloat);
printf("\n");
printf("Save int regs: ");
dspRegMask(maskSaveRegsInt);
printf("\n");
}
#endif // DEBUG
// The frameType number is arbitrary, is defined below, and corresponds to one of the frame styles we
// generate based on various sizes.
int frameType = 0;
// The amount to subtract from SP before starting to store the callee-saved registers. It might be folded into the
// first save instruction as a "predecrement" amount, if possible.
int calleeSaveSpDelta = 0;
if (isFramePointerUsed())
{
// We need to save both FP and LR.
assert((maskSaveRegsInt & RBM_FP) != 0);
assert((maskSaveRegsInt & RBM_LR) != 0);
// If we need to generate a GS cookie, we need to make sure the saved frame pointer and return address
// (FP and LR) are protected from buffer overrun by the GS cookie. If FP/LR are at the lowest addresses,
// then they are safe, since they are lower than any unsafe buffers. And the GS cookie we add will
// protect our caller's frame. If we have a localloc, however, that is dynamically placed lower than our
// saved FP/LR. In that case, we save FP/LR along with the rest of the callee-saved registers, above
// the GS cookie.
//
// After the frame is allocated, the frame pointer is established, pointing at the saved frame pointer to
// create a frame pointer chain.
//
// Do we need another frame pointer register to get good code quality in the case of having the frame pointer
// point high in the frame, so we can take advantage of arm64's preference for positive offsets? C++ native
// code dedicates callee-saved x19 to this, so generates:
// mov x19, sp
// in the prolog, then uses x19 for local var accesses. Given that this case is so rare, we currently do
// not do this. That means that negative offsets from FP might need to use the reserved register to form
// the local variable offset for an addressing mode.
if (((compiler->lvaOutgoingArgSpaceSize == 0) && (totalFrameSize <= 504)) &&
!genSaveFpLrWithAllCalleeSavedRegisters)
{
// Case #1.
//
// Generate:
// stp fp,lr,[sp,#-framesz]!
//
// The (totalFrameSize <= 504) condition ensures that both the pre-index STP instruction
// used in the prolog, and the post-index LDP instruction used in the epilog, can be generated.
// Note that STP and the unwind codes can handle -512, but LDP with a positive post-index value
// can only handle up to 504, and we want our prolog and epilog to match.
//
// After saving callee-saved registers, we establish the frame pointer with:
// mov fp,sp
// We do this *after* saving callee-saved registers, so the prolog/epilog unwind codes mostly match.
JITDUMP("Frame type 1. #outsz=0; #framesz=%d; LclFrameSize=%d\n", totalFrameSize,
compiler->compLclFrameSize);
frameType = 1;
assert(totalFrameSize <= STACK_PROBE_BOUNDARY_THRESHOLD_BYTES);
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE, -totalFrameSize,
INS_OPTS_PRE_INDEX);
compiler->unwindSaveRegPairPreindexed(REG_FP, REG_LR, -totalFrameSize);
maskSaveRegsInt &= ~(RBM_FP | RBM_LR); // We've already saved FP/LR
offset = (int)compiler->compLclFrameSize + 2 * REGSIZE_BYTES; // 2 for FP/LR
}
else if (totalFrameSize <= 512)
{
// Case #2.
//
// The (totalFrameSize <= 512) condition ensures the callee-saved registers can all be saved using STP
// with signed offset encoding. The maximum positive STP offset is 504, but when storing a pair of
// 8 byte registers, the largest actual offset we use would be 512 - 8 * 2 = 496. And STR with positive
// offset has a range 0 to 32760.
//
// After saving callee-saved registers, we establish the frame pointer with:
// add fp,sp,#outsz
// We do this *after* saving callee-saved registers, so the prolog/epilog unwind codes mostly match.
if (genSaveFpLrWithAllCalleeSavedRegisters)
{
JITDUMP("Frame type 4 (save FP/LR at top). #outsz=%d; #framesz=%d; LclFrameSize=%d\n",
unsigned(compiler->lvaOutgoingArgSpaceSize), totalFrameSize, compiler->compLclFrameSize);
frameType = 4;
// The frame will be allocated below, when the callee-saved registers are saved. This might mean a
// separate SUB instruction or the SP adjustment might be folded in to the first STP if there is
// no outgoing argument space AND no local frame space, that is, if the only thing the frame does
// is save callee-saved registers (and possibly varargs argument registers).
calleeSaveSpDelta = totalFrameSize;
offset = (int)compiler->compLclFrameSize;
}
else
{
JITDUMP("Frame type 2 (save FP/LR at bottom). #outsz=%d; #framesz=%d; LclFrameSize=%d\n",
unsigned(compiler->lvaOutgoingArgSpaceSize), totalFrameSize, compiler->compLclFrameSize);
frameType = 2;
// Generate:
// sub sp,sp,#framesz
// stp fp,lr,[sp,#outsz] // note that by necessity, #outsz <= #framesz - 16, so #outsz <= 496.
assert(totalFrameSize - compiler->lvaOutgoingArgSpaceSize <= STACK_PROBE_BOUNDARY_THRESHOLD_BYTES);
GetEmitter()->emitIns_R_R_I(INS_sub, EA_PTRSIZE, REG_SPBASE, REG_SPBASE, totalFrameSize);
compiler->unwindAllocStack(totalFrameSize);
assert(compiler->lvaOutgoingArgSpaceSize + 2 * REGSIZE_BYTES <= (unsigned)totalFrameSize);
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, REG_FP, REG_LR, REG_SPBASE,
compiler->lvaOutgoingArgSpaceSize);
compiler->unwindSaveRegPair(REG_FP, REG_LR, compiler->lvaOutgoingArgSpaceSize);
maskSaveRegsInt &= ~(RBM_FP | RBM_LR); // We've already saved FP/LR
offset = (int)compiler->compLclFrameSize + 2 * REGSIZE_BYTES; // 2 for FP/LR
}
}
else
{
// Case 5 or 6.
//
// First, the callee-saved registers will be saved, and the callee-saved register code must use
// pre-index to subtract from SP as the first instruction. It must also leave space for varargs
// registers to be stored. For example:
// stp r19,r20,[sp,#-96]!
// stp d8,d9,[sp,#16]
// ... save varargs incoming integer registers ...
// Note that all SP alterations must be 16-byte aligned. We have already calculated any alignment to be
// lower on the stack than the callee-saved registers (see lvaAlignFrame() for how we calculate
// alignment). So, if there is an odd number of callee-saved registers, we use (for example, with just
// one saved register):
// sub sp,sp,#16
// str r19,[sp,#8]
// This is one additional instruction, but it centralizes the aligned space. Otherwise, it might be
// possible to have two 8-byte alignment padding words, one below the callee-saved registers, and one
// above them. If that is preferable, we could implement it.
//
// Note that any varargs saved space will always be 16-byte aligned, since there are 8 argument
// registers.
//
// Then, define #remainingFrameSz = #framesz - (callee-saved size + varargs space + possible alignment
// padding from above). Note that #remainingFrameSz must not be zero, since we still need to save FP,SP.
//
// Generate:
// sub sp,sp,#remainingFrameSz
// or, for large frames:
// mov rX, #remainingFrameSz // maybe multiple instructions
// sub sp,sp,rX
//
// followed by:
// stp fp,lr,[sp,#outsz]
// add fp,sp,#outsz
//
// However, we need to handle the case where #outsz is larger than the constant signed offset encoding
// can handle. And, once again, we might need to deal with #outsz that is not aligned to 16-bytes (i.e.,
// STACK_ALIGN). So, in the case of large #outsz we will have an additional SP adjustment, using one of
// the following sequences:
//
// Define #remainingFrameSz2 = #remainingFrameSz - #outsz.
//
// sub sp,sp,#remainingFrameSz2 // if #remainingFrameSz2 is 16-byte aligned
// stp fp,lr,[sp]
// mov fp,sp
// sub sp,sp,#outsz // in this case, #outsz must also be 16-byte aligned
//
// Or:
//
// sub sp,sp,roundUp(#remainingFrameSz2,16) // if #remainingFrameSz2 is not 16-byte aligned (it is
// // always guaranteed to be 8 byte aligned).
// stp fp,lr,[sp,#8] // it will always be #8 in the unaligned case
// add fp,sp,#8
// sub sp,sp,#outsz - #8
//
// (As usual, for a large constant "#outsz - #8", we might need multiple instructions:
// mov rX, #outsz - #8 // maybe multiple instructions
// sub sp,sp,rX
// )
//
// Note that even if we align the SP alterations, that does not imply that we are creating empty alignment
// slots. In fact, we are not; any empty alignment slots were calculated in
// Compiler::lvaAssignFrameOffsets() and its callees.
int calleeSaveSpDeltaUnaligned = totalFrameSize - compiler->compLclFrameSize;
if (genSaveFpLrWithAllCalleeSavedRegisters)
{
JITDUMP("Frame type 5 (save FP/LR at top). #outsz=%d; #framesz=%d; LclFrameSize=%d\n",
unsigned(compiler->lvaOutgoingArgSpaceSize), totalFrameSize, compiler->compLclFrameSize);
// This case is much simpler, because we allocate space for the callee-saved register area, including
// FP/LR. Note the SP adjustment might be SUB or be folded into the first store as a predecrement.
// Then, we use a single SUB to establish the rest of the frame. We need to be careful about where
// to establish the frame pointer, as there is a limit of 2040 bytes offset from SP to FP in the
// unwind codes when FP is established.
frameType = 5;
}
else
{
JITDUMP("Frame type 3 (save FP/LR at bottom). #outsz=%d; #framesz=%d; LclFrameSize=%d\n",
unsigned(compiler->lvaOutgoingArgSpaceSize), totalFrameSize, compiler->compLclFrameSize);
frameType = 3;
calleeSaveSpDeltaUnaligned -= 2 * REGSIZE_BYTES; // 2 for FP, LR which we'll save later.
// We'll take care of these later, but callee-saved regs code shouldn't see them.
maskSaveRegsInt &= ~(RBM_FP | RBM_LR);
}
assert(calleeSaveSpDeltaUnaligned >= 0);
assert((calleeSaveSpDeltaUnaligned % 8) == 0); // It better at least be 8 byte aligned.
calleeSaveSpDelta = AlignUp((UINT)calleeSaveSpDeltaUnaligned, STACK_ALIGN);
offset = calleeSaveSpDelta - calleeSaveSpDeltaUnaligned;
JITDUMP(" calleeSaveSpDelta=%d, offset=%d\n", calleeSaveSpDelta, offset);
// At most one alignment slot between SP and where we store the callee-saved registers.
assert((offset == 0) || (offset == REGSIZE_BYTES));
}
}
else
{
// No frame pointer (no chaining).
assert((maskSaveRegsInt & RBM_FP) == 0);
assert((maskSaveRegsInt & RBM_LR) != 0);
// Note that there is no pre-indexed save_lrpair unwind code variant, so we can't allocate the frame using
// 'stp' if we only have one callee-saved register plus LR to save.
NYI("Frame without frame pointer");
offset = 0;
}
assert(frameType != 0);
const int calleeSaveSpOffset = offset;
JITDUMP(" offset=%d, calleeSaveSpDelta=%d\n", offset, calleeSaveSpDelta);
genSaveCalleeSavedRegistersHelp(maskSaveRegsInt | maskSaveRegsFloat, offset, -calleeSaveSpDelta);
offset += genCountBits(maskSaveRegsInt | maskSaveRegsFloat) * REGSIZE_BYTES;
// For varargs, home the incoming arg registers last. Note that there is nothing to unwind here,
// so we just report "NOP" unwind codes. If there's no more frame setup after this, we don't
// need to add codes at all.
if (compiler->info.compIsVarArgs)
{
JITDUMP(" compIsVarArgs=true\n");
// There are 8 general-purpose registers to home, thus 'offset' must be 16-byte aligned here.
assert((offset % 16) == 0);
for (regNumber reg1 = REG_ARG_FIRST; reg1 < REG_ARG_LAST; reg1 = REG_NEXT(REG_NEXT(reg1)))
{
regNumber reg2 = REG_NEXT(reg1);
// stp REG, REG + 1, [SP, #offset]
GetEmitter()->emitIns_R_R_R_I(INS_stp, EA_PTRSIZE, reg1, reg2, REG_SPBASE, offset);
compiler->unwindNop();
offset += 2 * REGSIZE_BYTES;
}
}
// By default, we'll establish the frame pointer chain. (Note that currently frames without FP are NYI.)
bool establishFramePointer = true;
// If we do establish the frame pointer, what is the amount we add to SP to do so?
unsigned offsetSpToSavedFp = 0;
if (frameType == 1)
{
assert(!genSaveFpLrWithAllCalleeSavedRegisters);
assert(offsetSpToSavedFp == 0);
}
else if (frameType == 2)
{
assert(!genSaveFpLrWithAllCalleeSavedRegisters);
offsetSpToSavedFp = compiler->lvaOutgoingArgSpaceSize;
}
else if (frameType == 3)
{
assert(!genSaveFpLrWithAllCalleeSavedRegisters);
int remainingFrameSz = totalFrameSize - calleeSaveSpDelta;
assert(remainingFrameSz > 0);
assert((remainingFrameSz % 16) == 0); // this is guaranteed to be 16-byte aligned because each component --
// totalFrameSize and calleeSaveSpDelta -- is 16-byte aligned.
if (compiler->lvaOutgoingArgSpaceSize > 504)
{
// We can't do "stp fp,lr,[sp,#outsz]" because #outsz is too big.
// If compiler->lvaOutgoingArgSpaceSize is not aligned, we need to align the SP adjustment.
assert(remainingFrameSz > (int)compiler->lvaOutgoingArgSpaceSize);
int spAdjustment2Unaligned = remainingFrameSz - compiler->lvaOutgoingArgSpaceSize;
int spAdjustment2 = (int)roundUp((unsigned)spAdjustment2Unaligned, STACK_ALIGN);
int alignmentAdjustment2 = spAdjustment2 - spAdjustment2Unaligned;
assert((alignmentAdjustment2 == 0) || (alignmentAdjustment2 == 8));
JITDUMP(" spAdjustment2=%d\n", spAdjustment2);
genPrologSaveRegPair(REG_FP, REG_LR, alignmentAdjustment2, -spAdjustment2, false, initReg, pInitRegZeroed);
offset += spAdjustment2;
// Now subtract off the #outsz (or the rest of the #outsz if it was unaligned, and the above "sub"
// included some of it)
int spAdjustment3 = compiler->lvaOutgoingArgSpaceSize - alignmentAdjustment2;
assert(spAdjustment3 > 0);
assert((spAdjustment3 % 16) == 0);
JITDUMP(" alignmentAdjustment2=%d\n", alignmentAdjustment2);
genEstablishFramePointer(alignmentAdjustment2, /* reportUnwindData */ true);
// We just established the frame pointer chain; don't do it again.
establishFramePointer = false;
JITDUMP(" spAdjustment3=%d\n", spAdjustment3);
// We've already established the frame pointer, so no need to report the stack pointer change to unwind
// info.
genStackPointerAdjustment(-spAdjustment3, initReg, pInitRegZeroed, /* reportUnwindData */ false);
offset += spAdjustment3;
}
else
{
genPrologSaveRegPair(REG_FP, REG_LR, compiler->lvaOutgoingArgSpaceSize, -remainingFrameSz, false, initReg,
pInitRegZeroed);
offset += remainingFrameSz;
offsetSpToSavedFp = compiler->lvaOutgoingArgSpaceSize;
}
}
else if (frameType == 4)
{
assert(genSaveFpLrWithAllCalleeSavedRegisters);
offsetSpToSavedFp = calleeSaveSpDelta - (compiler->info.compIsVarArgs ? MAX_REG_ARG * REGSIZE_BYTES : 0) -
2 * REGSIZE_BYTES; // -2 for FP, LR
}
else if (frameType == 5)
{
assert(genSaveFpLrWithAllCalleeSavedRegisters);
offsetSpToSavedFp = calleeSaveSpDelta - (compiler->info.compIsVarArgs ? MAX_REG_ARG * REGSIZE_BYTES : 0) -
2 * REGSIZE_BYTES; // -2 for FP, LR
JITDUMP(" offsetSpToSavedFp=%d\n", offsetSpToSavedFp);
genEstablishFramePointer(offsetSpToSavedFp, /* reportUnwindData */ true);
// We just established the frame pointer chain; don't do it again.
establishFramePointer = false;
int remainingFrameSz = totalFrameSize - calleeSaveSpDelta;
assert(remainingFrameSz > 0);
assert((remainingFrameSz % 16) == 0); // this is guaranteed to be 16-byte aligned because each component --
// totalFrameSize and calleeSaveSpDelta -- is 16-byte aligned.
JITDUMP(" remainingFrameSz=%d\n", remainingFrameSz);
// We've already established the frame pointer, so no need to report the stack pointer change to unwind info.
genStackPointerAdjustment(-remainingFrameSz, initReg, pInitRegZeroed, /* reportUnwindData */ false);
offset += remainingFrameSz;
}
else
{
unreached();
}
if (establishFramePointer)
{
JITDUMP(" offsetSpToSavedFp=%d\n", offsetSpToSavedFp);
genEstablishFramePointer(offsetSpToSavedFp, /* reportUnwindData */ true);
}
assert(offset == totalFrameSize);
// Save off information about the frame for later use
//
compiler->compFrameInfo.frameType = frameType;
compiler->compFrameInfo.calleeSaveSpOffset = calleeSaveSpOffset;
compiler->compFrameInfo.calleeSaveSpDelta = calleeSaveSpDelta;
compiler->compFrameInfo.offsetSpToSavedFp = offsetSpToSavedFp;
#endif // TARGET_ARM64
}
/*****************************************************************************
*
* Generates code for a function epilog.
*
* Please consult the "debugger team notification" comment in genFnProlog().
*/
void CodeGen::genFnEpilog(BasicBlock* block)
{
#ifdef DEBUG
if (verbose)
printf("*************** In genFnEpilog()\n");
#endif // DEBUG
ScopedSetVariable<bool> _setGeneratingEpilog(&compiler->compGeneratingEpilog, true);
VarSetOps::Assign(compiler, gcInfo.gcVarPtrSetCur, GetEmitter()->emitInitGCrefVars);
gcInfo.gcRegGCrefSetCur = GetEmitter()->emitInitGCrefRegs;
gcInfo.gcRegByrefSetCur = GetEmitter()->emitInitByrefRegs;
#ifdef DEBUG
if (compiler->opts.dspCode)
printf("\n__epilog:\n");
if (verbose)
{
printf("gcVarPtrSetCur=%s ", VarSetOps::ToString(compiler, gcInfo.gcVarPtrSetCur));
dumpConvertedVarSet(compiler, gcInfo.gcVarPtrSetCur);
printf(", gcRegGCrefSetCur=");
printRegMaskInt(gcInfo.gcRegGCrefSetCur);
GetEmitter()->emitDispRegSet(gcInfo.gcRegGCrefSetCur);
printf(", gcRegByrefSetCur=");
printRegMaskInt(gcInfo.gcRegByrefSetCur);
GetEmitter()->emitDispRegSet(gcInfo.gcRegByrefSetCur);
printf("\n");
}
#endif // DEBUG
bool jmpEpilog = ((block->bbFlags & BBF_HAS_JMP) != 0);
GenTree* lastNode = block->lastNode();
// Method handle and address info used in case of jump epilog
CORINFO_METHOD_HANDLE methHnd = nullptr;
CORINFO_CONST_LOOKUP addrInfo;
addrInfo.addr = nullptr;
addrInfo.accessType = IAT_VALUE;
if (jmpEpilog && lastNode->gtOper == GT_JMP)
{
methHnd = (CORINFO_METHOD_HANDLE)lastNode->AsVal()->gtVal1;
compiler->info.compCompHnd->getFunctionEntryPoint(methHnd, &addrInfo);
}
#ifdef TARGET_ARM
// We delay starting the unwind codes until we have an instruction which we know
// needs an unwind code. In particular, for large stack frames in methods without
// localloc, the sequence might look something like this:
// movw r3, 0x38e0
// add sp, r3
// pop {r4,r5,r6,r10,r11,pc}
// In this case, the "movw" should not be part of the unwind codes, since it will
// be a NOP, and it is a waste to start with a NOP. Note that calling unwindBegEpilog()
// also sets the current location as the beginning offset of the epilog, so every
// instruction afterwards needs an unwind code. In the case above, if you call
// unwindBegEpilog() before the "movw", then you must generate a NOP for the "movw".
bool unwindStarted = false;
// Tear down the stack frame
if (compiler->compLocallocUsed)
{
if (!unwindStarted)
{
compiler->unwindBegEpilog();
unwindStarted = true;
}
// mov R9 into SP
inst_Mov(TYP_I_IMPL, REG_SP, REG_SAVED_LOCALLOC_SP, /* canSkip */ false);
compiler->unwindSetFrameReg(REG_SAVED_LOCALLOC_SP, 0);
}
if (jmpEpilog ||
genStackAllocRegisterMask(compiler->compLclFrameSize, regSet.rsGetModifiedRegsMask() & RBM_FLT_CALLEE_SAVED) ==
RBM_NONE)
{
genFreeLclFrame(compiler->compLclFrameSize, &unwindStarted);
}
if (!unwindStarted)
{
// If we haven't generated anything yet, we're certainly going to generate a "pop" next.
compiler->unwindBegEpilog();
unwindStarted = true;
}
if (jmpEpilog && lastNode->gtOper == GT_JMP && addrInfo.accessType == IAT_RELPVALUE)
{
// IAT_RELPVALUE jump at the end is done using relative indirection, so,
// additional helper register is required.
// We use LR just before it is going to be restored from stack, i.e.
//
// movw r12, laddr
// movt r12, haddr
// mov lr, r12
// ldr r12, [r12]
// add r12, r12, lr
// pop {lr}
// ...
// bx r12
regNumber indCallReg = REG_R12;
regNumber vptrReg1 = REG_LR;
instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, indCallReg, (ssize_t)addrInfo.addr);
GetEmitter()->emitIns_Mov(INS_mov, EA_PTRSIZE, vptrReg1, indCallReg, /* canSkip */ false);
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, indCallReg, indCallReg, 0);
GetEmitter()->emitIns_R_R(INS_add, EA_PTRSIZE, indCallReg, vptrReg1);
}
genPopCalleeSavedRegisters(jmpEpilog);
if (regSet.rsMaskPreSpillRegs(true) != RBM_NONE)
{
// We better not have used a pop PC to return otherwise this will be unreachable code
noway_assert(!genUsedPopToReturn);
int preSpillRegArgSize = genCountBits(regSet.rsMaskPreSpillRegs(true)) * REGSIZE_BYTES;
inst_RV_IV(INS_add, REG_SPBASE, preSpillRegArgSize, EA_PTRSIZE);
compiler->unwindAllocStack(preSpillRegArgSize);
}
if (jmpEpilog)
{
// We better not have used a pop PC to return otherwise this will be unreachable code
noway_assert(!genUsedPopToReturn);
}
#else // TARGET_ARM64
compiler->unwindBegEpilog();
genPopCalleeSavedRegistersAndFreeLclFrame(jmpEpilog);
#endif // TARGET_ARM64
if (jmpEpilog)
{
SetHasTailCalls(true);
noway_assert(block->bbJumpKind == BBJ_RETURN);
noway_assert(block->GetFirstLIRNode() != nullptr);
/* figure out what jump we have */
GenTree* jmpNode = lastNode;
#if !FEATURE_FASTTAILCALL
noway_assert(jmpNode->gtOper == GT_JMP);
#else // FEATURE_FASTTAILCALL
// armarch
// If jmpNode is GT_JMP then gtNext must be null.
// If jmpNode is a fast tail call, gtNext need not be null since it could have embedded stmts.
noway_assert((jmpNode->gtOper != GT_JMP) || (jmpNode->gtNext == nullptr));
// Could either be a "jmp method" or "fast tail call" implemented as epilog+jmp
noway_assert((jmpNode->gtOper == GT_JMP) ||
((jmpNode->gtOper == GT_CALL) && jmpNode->AsCall()->IsFastTailCall()));
// The next block is associated with this "if" stmt
if (jmpNode->gtOper == GT_JMP)
#endif // FEATURE_FASTTAILCALL
{
// Simply emit a jump to the methodHnd. This is similar to a call so we can use
// the same descriptor with some minor adjustments.
assert(methHnd != nullptr);
assert(addrInfo.addr != nullptr);
#ifdef TARGET_ARMARCH
emitter::EmitCallType callType;
void* addr;
regNumber indCallReg;
switch (addrInfo.accessType)
{
case IAT_VALUE:
if (validImmForBL((ssize_t)addrInfo.addr))
{
// Simple direct call
callType = emitter::EC_FUNC_TOKEN;
addr = addrInfo.addr;
indCallReg = REG_NA;
break;
}
// otherwise the target address doesn't fit in an immediate
// so we have to burn a register...
FALLTHROUGH;
case IAT_PVALUE:
// Load the address into a register, load indirect and call through a register
// We have to use R12 since we assume the argument registers are in use
callType = emitter::EC_INDIR_R;
indCallReg = REG_INDIRECT_CALL_TARGET_REG;
addr = NULL;
instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, indCallReg, (ssize_t)addrInfo.addr);
if (addrInfo.accessType == IAT_PVALUE)
{
GetEmitter()->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, indCallReg, indCallReg, 0);
regSet.verifyRegUsed(indCallReg);
}
break;
case IAT_RELPVALUE:
{
// Load the address into a register, load relative indirect and call through a register
// We have to use R12 since we assume the argument registers are in use
// LR is used as helper register right before it is restored from stack, thus,
// all relative address calculations are performed before LR is restored.
callType = emitter::EC_INDIR_R;
indCallReg = REG_R12;
addr = NULL;
regSet.verifyRegUsed(indCallReg);
break;
}
case IAT_PPVALUE:
default:
NO_WAY("Unsupported JMP indirection");
}
/* Simply emit a jump to the methodHnd. This is similar to a call so we can use
* the same descriptor with some minor adjustments.
*/
// clang-format off
GetEmitter()->emitIns_Call(callType,
methHnd,
INDEBUG_LDISASM_COMMA(nullptr)
addr,
0, // argSize
EA_UNKNOWN, // retSize
#if defined(TARGET_ARM64)
EA_UNKNOWN, // secondRetSize
#endif
gcInfo.gcVarPtrSetCur,
gcInfo.gcRegGCrefSetCur,
gcInfo.gcRegByrefSetCur,
DebugInfo(),
indCallReg, // ireg
REG_NA, // xreg
0, // xmul
0, // disp
true); // isJump
// clang-format on
CLANG_FORMAT_COMMENT_ANCHOR;
#endif // TARGET_ARMARCH
}
#if FEATURE_FASTTAILCALL
else
{
genCallInstruction(jmpNode->AsCall());
}
#endif // FEATURE_FASTTAILCALL
}
else
{
#ifdef TARGET_ARM
if (!genUsedPopToReturn)
{
// If we did not use a pop to return, then we did a "pop {..., lr}" instead of "pop {..., pc}",
// so we need a "bx lr" instruction to return from the function.
inst_RV(INS_bx, REG_LR, TYP_I_IMPL);
compiler->unwindBranch16();
}
#else // TARGET_ARM64
inst_RV(INS_ret, REG_LR, TYP_I_IMPL);
compiler->unwindReturn(REG_LR);
#endif // TARGET_ARM64
}
compiler->unwindEndEpilog();
}
// return size
// alignmentWB is out param
unsigned CodeGenInterface::InferOpSizeAlign(GenTree* op, unsigned* alignmentWB)
{
unsigned alignment = 0;
unsigned opSize = 0;
if (op->gtType == TYP_STRUCT || op->OperIsCopyBlkOp())
{
opSize = InferStructOpSizeAlign(op, &alignment);
}
else
{
alignment = genTypeAlignments[op->TypeGet()];
opSize = genTypeSizes[op->TypeGet()];
}
assert(opSize != 0);
assert(alignment != 0);
(*alignmentWB) = alignment;
return opSize;
}
// return size
// alignmentWB is out param
unsigned CodeGenInterface::InferStructOpSizeAlign(GenTree* op, unsigned* alignmentWB)
{
unsigned alignment = 0;
unsigned opSize = 0;
while (op->gtOper == GT_COMMA)
{
op = op->AsOp()->gtOp2;
}
if (op->gtOper == GT_OBJ)
{
CORINFO_CLASS_HANDLE clsHnd = op->AsObj()->GetLayout()->GetClassHandle();
opSize = op->AsObj()->GetLayout()->GetSize();
alignment = roundUp(compiler->info.compCompHnd->getClassAlignmentRequirement(clsHnd), TARGET_POINTER_SIZE);
}
else if (op->gtOper == GT_LCL_VAR)
{
const LclVarDsc* varDsc = compiler->lvaGetDesc(op->AsLclVarCommon());
assert(varDsc->lvType == TYP_STRUCT);
opSize = varDsc->lvSize();
#ifndef TARGET_64BIT
if (varDsc->lvStructDoubleAlign)
{
alignment = TARGET_POINTER_SIZE * 2;
}
else
#endif // !TARGET_64BIT
{
alignment = TARGET_POINTER_SIZE;
}
}
else if (op->gtOper == GT_MKREFANY)
{
opSize = TARGET_POINTER_SIZE * 2;
alignment = TARGET_POINTER_SIZE;
}
else if (op->IsArgPlaceHolderNode())
{
CORINFO_CLASS_HANDLE clsHnd = op->AsArgPlace()->gtArgPlaceClsHnd;
assert(clsHnd != 0);
opSize = roundUp(compiler->info.compCompHnd->getClassSize(clsHnd), TARGET_POINTER_SIZE);
alignment = roundUp(compiler->info.compCompHnd->getClassAlignmentRequirement(clsHnd), TARGET_POINTER_SIZE);
}
else
{
assert(!"Unhandled gtOper");
opSize = TARGET_POINTER_SIZE;
alignment = TARGET_POINTER_SIZE;
}
assert(opSize != 0);
assert(alignment != 0);
(*alignmentWB) = alignment;
return opSize;
}
#endif // TARGET_ARMARCH
| 1 |
dotnet/runtime
| 66,407 |
ARM64 - Optimizing a % b operations part 2
|
Addressing part of this issue: https://github.com/dotnet/runtime/issues/34937
**Description**
There are various ways to optimize `%` for integers on ARM64.
`a % b` can be transformed into a specific sequence of instructions for ARM64 if the operation is signed and `b` is a constant with the power of 2.
**Acceptance Criteria**
- [x] Merge https://github.com/dotnet/runtime/pull/65535
- [x] Add Tests
ARM64 diffs based on tests
```diff
- asr w1, w0, #31
- and w1, w1, #15
- add w1, w1, w0
- asr w1, w1, #4
- lsl w1, w1, #4
- sub w0, w0, w1
- ;; bbWeight=1 PerfScore 4.50
+ and w1, w0, #15
+ negs w0, w0
+ and w0, w0, #15
+ csneg w0, w1, w0, mi
+ ;; bbWeight=1 PerfScore 2.00
```
|
TIHan
| 2022-03-09T20:33:02Z | 2022-04-13T20:01:26Z |
3b6a539c7219383ec6181a112465f904fe9e2edb
|
a81f4bc9d53dedefe03f49da53ed198abbf9c467
|
ARM64 - Optimizing a % b operations part 2. Addressing part of this issue: https://github.com/dotnet/runtime/issues/34937
**Description**
There are various ways to optimize `%` for integers on ARM64.
`a % b` can be transformed into a specific sequence of instructions for ARM64 if the operation is signed and `b` is a constant with the power of 2.
**Acceptance Criteria**
- [x] Merge https://github.com/dotnet/runtime/pull/65535
- [x] Add Tests
ARM64 diffs based on tests
```diff
- asr w1, w0, #31
- and w1, w1, #15
- add w1, w1, w0
- asr w1, w1, #4
- lsl w1, w1, #4
- sub w0, w0, w1
- ;; bbWeight=1 PerfScore 4.50
+ and w1, w0, #15
+ negs w0, w0
+ and w0, w0, #15
+ csneg w0, w1, w0, mi
+ ;; bbWeight=1 PerfScore 2.00
```
|
./src/coreclr/jit/gentree.h
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX GenTree XX
XX XX
XX This is the node in the semantic tree graph. It represents the operation XX
XX corresponding to the node, and other information during code-gen. XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
/*****************************************************************************/
#ifndef _GENTREE_H_
#define _GENTREE_H_
/*****************************************************************************/
#include "vartype.h" // For "var_types"
#include "target.h" // For "regNumber"
#include "ssaconfig.h" // For "SsaConfig::RESERVED_SSA_NUM"
#include "valuenumtype.h"
#include "jitstd.h"
#include "jithashtable.h"
#include "simd.h"
#include "namedintrinsiclist.h"
#include "layout.h"
#include "debuginfo.h"
// Debugging GenTree is much easier if we add a magic virtual function to make the debugger able to figure out what type
// it's got. This is enabled by default in DEBUG. To enable it in RET builds (temporarily!), you need to change the
// build to define DEBUGGABLE_GENTREE=1, as well as pass /OPT:NOICF to the linker (or else all the vtables get merged,
// making the debugging value supplied by them useless).
#ifndef DEBUGGABLE_GENTREE
#ifdef DEBUG
#define DEBUGGABLE_GENTREE 1
#else // !DEBUG
#define DEBUGGABLE_GENTREE 0
#endif // !DEBUG
#endif // !DEBUGGABLE_GENTREE
// The SpecialCodeKind enum is used to indicate the type of special (unique)
// target block that will be targeted by an instruction.
// These are used by:
// GenTreeBoundsChk nodes (SCK_RNGCHK_FAIL, SCK_ARG_EXCPN, SCK_ARG_RNG_EXCPN)
// - these nodes have a field (gtThrowKind) to indicate which kind
// GenTreeOps nodes, for which codegen will generate the branch
// - it will use the appropriate kind based on the opcode, though it's not
// clear why SCK_OVERFLOW == SCK_ARITH_EXCPN
//
enum SpecialCodeKind
{
SCK_NONE,
SCK_RNGCHK_FAIL, // target when range check fails
SCK_DIV_BY_ZERO, // target for divide by zero (Not used on X86/X64)
SCK_ARITH_EXCPN, // target on arithmetic exception
SCK_OVERFLOW = SCK_ARITH_EXCPN, // target on overflow
SCK_ARG_EXCPN, // target on ArgumentException (currently used only for SIMD intrinsics)
SCK_ARG_RNG_EXCPN, // target on ArgumentOutOfRangeException (currently used only for SIMD intrinsics)
SCK_COUNT
};
/*****************************************************************************/
enum genTreeOps : BYTE
{
#define GTNODE(en, st, cm, ok) GT_##en,
#include "gtlist.h"
GT_COUNT,
#ifdef TARGET_64BIT
// GT_CNS_NATIVELONG is the gtOper symbol for GT_CNS_LNG or GT_CNS_INT, depending on the target.
// For the 64-bit targets we will only use GT_CNS_INT as it used to represent all the possible sizes
GT_CNS_NATIVELONG = GT_CNS_INT,
#else
// For the 32-bit targets we use a GT_CNS_LNG to hold a 64-bit integer constant and GT_CNS_INT for all others.
// In the future when we retarget the JIT for x86 we should consider eliminating GT_CNS_LNG
GT_CNS_NATIVELONG = GT_CNS_LNG,
#endif
};
// The following enum defines a set of bit flags that can be used
// to classify expression tree nodes.
//
enum GenTreeOperKind
{
GTK_SPECIAL = 0x00, // special operator
GTK_LEAF = 0x01, // leaf operator
GTK_UNOP = 0x02, // unary operator
GTK_BINOP = 0x04, // binary operator
GTK_KINDMASK = (GTK_SPECIAL | GTK_LEAF | GTK_UNOP | GTK_BINOP), // operator kind mask
GTK_SMPOP = (GTK_UNOP | GTK_BINOP),
GTK_COMMUTE = 0x08, // commutative operator
GTK_EXOP = 0x10, // Indicates that an oper for a node type that extends GenTreeOp (or GenTreeUnOp)
// by adding non-node fields to unary or binary operator.
GTK_NOVALUE = 0x20, // node does not produce a value
GTK_MASK = 0xFF
};
// The following enum defines a set of bit flags that describe opers for the purposes
// of DEBUG-only checks. This is separate from the above "GenTreeOperKind"s to avoid
// making the table for those larger in Release builds. However, it resides in the same
// "namespace" and so all values here must be distinct from those in "GenTreeOperKind".
//
enum GenTreeDebugOperKind
{
DBK_FIRST_FLAG = GTK_MASK + 1,
DBK_NOTHIR = DBK_FIRST_FLAG, // This oper is not supported in HIR (before rationalization).
DBK_NOTLIR = DBK_FIRST_FLAG << 1, // This oper is not supported in LIR (after rationalization).
DBK_NOCONTAIN = DBK_FIRST_FLAG << 2, // This oper produces a value, but may not be contained.
DBK_MASK = ~GTK_MASK
};
/*****************************************************************************/
enum gtCallTypes : BYTE
{
CT_USER_FUNC, // User function
CT_HELPER, // Jit-helper
CT_INDIRECT, // Indirect call
CT_COUNT // fake entry (must be last)
};
#ifdef DEBUG
/*****************************************************************************
*
* TargetHandleTypes are used to determine the type of handle present inside GenTreeIntCon node.
* The values are such that they don't overlap with helper's or user function's handle.
*/
enum TargetHandleType : BYTE
{
THT_Unknown = 2,
THT_GSCookieCheck = 4,
THT_SetGSCookie = 6,
THT_IntializeArrayIntrinsics = 8
};
#endif
/*****************************************************************************/
struct BasicBlock;
enum BasicBlockFlags : unsigned __int64;
struct InlineCandidateInfo;
struct GuardedDevirtualizationCandidateInfo;
struct ClassProfileCandidateInfo;
struct LateDevirtualizationInfo;
typedef unsigned short AssertionIndex;
static const AssertionIndex NO_ASSERTION_INDEX = 0;
//------------------------------------------------------------------------
// GetAssertionIndex: return 1-based AssertionIndex from 0-based int index.
//
// Arguments:
// index - 0-based index
// Return Value:
// 1-based AssertionIndex.
inline AssertionIndex GetAssertionIndex(unsigned index)
{
return (AssertionIndex)(index + 1);
}
class AssertionInfo
{
// true if the assertion holds on the bbNext edge instead of the bbJumpDest edge (for GT_JTRUE nodes)
unsigned short m_isNextEdgeAssertion : 1;
// 1-based index of the assertion
unsigned short m_assertionIndex : 15;
AssertionInfo(bool isNextEdgeAssertion, AssertionIndex assertionIndex)
: m_isNextEdgeAssertion(isNextEdgeAssertion), m_assertionIndex(assertionIndex)
{
assert(m_assertionIndex == assertionIndex);
}
public:
AssertionInfo() : AssertionInfo(false, 0)
{
}
AssertionInfo(AssertionIndex assertionIndex) : AssertionInfo(false, assertionIndex)
{
}
static AssertionInfo ForNextEdge(AssertionIndex assertionIndex)
{
// Ignore the edge information if there's no assertion
bool isNextEdge = (assertionIndex != NO_ASSERTION_INDEX);
return AssertionInfo(isNextEdge, assertionIndex);
}
void Clear()
{
m_isNextEdgeAssertion = 0;
m_assertionIndex = NO_ASSERTION_INDEX;
}
bool HasAssertion() const
{
return m_assertionIndex != NO_ASSERTION_INDEX;
}
AssertionIndex GetAssertionIndex() const
{
return m_assertionIndex;
}
bool IsNextEdgeAssertion() const
{
return m_isNextEdgeAssertion;
}
};
// GT_FIELD nodes will be lowered into more "code-gen-able" representations, like
// GT_IND's of addresses, or GT_LCL_FLD nodes. We'd like to preserve the more abstract
// information, and will therefore annotate such lowered nodes with FieldSeq's. A FieldSeq
// represents a (possibly) empty sequence of fields. The fields are in the order
// in which they are dereferenced. The first field may be an object field or a struct field;
// all subsequent fields must be struct fields.
class FieldSeqNode
{
public:
enum class FieldKind : uintptr_t
{
Instance = 0, // An instance field, object or struct.
SimpleStatic = 1, // Simple static field - the handle represents a unique location.
SharedStatic = 2, // Static field on a shared generic type: "Class<__Canon>.StaticField".
};
private:
static const uintptr_t FIELD_KIND_MASK = 0b11;
static_assert_no_msg(sizeof(CORINFO_FIELD_HANDLE) == sizeof(uintptr_t));
uintptr_t m_fieldHandleAndKind;
FieldSeqNode* m_next;
public:
FieldSeqNode(CORINFO_FIELD_HANDLE fieldHnd, FieldSeqNode* next, FieldKind fieldKind);
FieldKind GetKind() const
{
return static_cast<FieldKind>(m_fieldHandleAndKind & FIELD_KIND_MASK);
}
CORINFO_FIELD_HANDLE GetFieldHandle() const
{
assert(GetFieldHandleValue() != NO_FIELD_HANDLE);
return GetFieldHandleValue();
}
CORINFO_FIELD_HANDLE GetFieldHandleValue() const
{
return CORINFO_FIELD_HANDLE(m_fieldHandleAndKind & ~FIELD_KIND_MASK);
}
FieldSeqNode* GetNext() const
{
return m_next;
}
bool IsStaticField() const
{
return (GetKind() == FieldKind::SimpleStatic) || (GetKind() == FieldKind::SharedStatic);
}
bool IsSharedStaticField() const
{
return GetKind() == FieldKind::SharedStatic;
}
FieldSeqNode* GetTail()
{
FieldSeqNode* tail = this;
while (tail->m_next != nullptr)
{
tail = tail->m_next;
}
return tail;
}
// Make sure this provides methods that allow it to be used as a KeyFuncs type in JitHashTable.
// Note that there is a one-to-one relationship between the field handle and the field kind, so
// we do not need to mask away the latter for comparison purposes.
static int GetHashCode(FieldSeqNode fsn)
{
return static_cast<int>(fsn.m_fieldHandleAndKind) ^ static_cast<int>(reinterpret_cast<intptr_t>(fsn.m_next));
}
static bool Equals(const FieldSeqNode& fsn1, const FieldSeqNode& fsn2)
{
return fsn1.m_fieldHandleAndKind == fsn2.m_fieldHandleAndKind && fsn1.m_next == fsn2.m_next;
}
};
// This class canonicalizes field sequences.
class FieldSeqStore
{
typedef JitHashTable<FieldSeqNode, /*KeyFuncs*/ FieldSeqNode, FieldSeqNode*> FieldSeqNodeCanonMap;
CompAllocator m_alloc;
FieldSeqNodeCanonMap* m_canonMap;
static FieldSeqNode s_notAField; // No value, just exists to provide an address.
public:
FieldSeqStore(CompAllocator alloc);
// Returns the (canonical in the store) singleton field sequence for the given handle.
FieldSeqNode* CreateSingleton(CORINFO_FIELD_HANDLE fieldHnd,
FieldSeqNode::FieldKind fieldKind = FieldSeqNode::FieldKind::Instance);
// This is a special distinguished FieldSeqNode indicating that a constant does *not*
// represent a valid field sequence. This is "infectious", in the sense that appending it
// (on either side) to any field sequence yields the "NotAField()" sequence.
static FieldSeqNode* NotAField()
{
return &s_notAField;
}
// Returns the (canonical in the store) field sequence representing the concatenation of
// the sequences represented by "a" and "b". Assumes that "a" and "b" are canonical; that is,
// they are the results of CreateSingleton, NotAField, or Append calls. If either of the arguments
// are the "NotAField" value, so is the result.
FieldSeqNode* Append(FieldSeqNode* a, FieldSeqNode* b);
};
class GenTreeUseEdgeIterator;
class GenTreeOperandIterator;
struct Statement;
/*****************************************************************************/
// Forward declarations of the subtypes
#define GTSTRUCT_0(fn, en) struct GenTree##fn;
#define GTSTRUCT_1(fn, en) struct GenTree##fn;
#define GTSTRUCT_2(fn, en, en2) struct GenTree##fn;
#define GTSTRUCT_3(fn, en, en2, en3) struct GenTree##fn;
#define GTSTRUCT_4(fn, en, en2, en3, en4) struct GenTree##fn;
#define GTSTRUCT_N(fn, ...) struct GenTree##fn;
#define GTSTRUCT_2_SPECIAL(fn, en, en2) GTSTRUCT_2(fn, en, en2)
#define GTSTRUCT_3_SPECIAL(fn, en, en2, en3) GTSTRUCT_3(fn, en, en2, en3)
#include "gtstructs.h"
/*****************************************************************************/
// Don't format the GenTreeFlags declaration
// clang-format off
//------------------------------------------------------------------------
// GenTreeFlags: a bitmask of flags for GenTree stored in gtFlags
//
enum GenTreeFlags : unsigned int
{
GTF_EMPTY = 0,
//---------------------------------------------------------------------
// The first set of flags can be used with a large set of nodes, and
// thus they must all have distinct values. That is, one can test any
// expression node for one of these flags.
//---------------------------------------------------------------------
GTF_ASG = 0x00000001, // sub-expression contains an assignment
GTF_CALL = 0x00000002, // sub-expression contains a func. call
GTF_EXCEPT = 0x00000004, // sub-expression might throw an exception
GTF_GLOB_REF = 0x00000008, // sub-expression uses global variable(s)
GTF_ORDER_SIDEEFF = 0x00000010, // sub-expression has a re-ordering side effect
// If you set these flags, make sure that code:gtExtractSideEffList knows how to find the tree,
// otherwise the C# (run csc /o-) code:
// var v = side_eff_operation
// with no use of `v` will drop your tree on the floor.
GTF_PERSISTENT_SIDE_EFFECTS = GTF_ASG | GTF_CALL,
GTF_SIDE_EFFECT = GTF_PERSISTENT_SIDE_EFFECTS | GTF_EXCEPT,
GTF_GLOB_EFFECT = GTF_SIDE_EFFECT | GTF_GLOB_REF,
GTF_ALL_EFFECT = GTF_GLOB_EFFECT | GTF_ORDER_SIDEEFF,
GTF_REVERSE_OPS = 0x00000020, // operand op2 should be evaluated before op1 (normally, op1 is evaluated first and op2 is evaluated second)
GTF_CONTAINED = 0x00000040, // This node is contained (executed as part of its parent)
GTF_SPILLED = 0x00000080, // the value has been spilled
GTF_NOREG_AT_USE = 0x00000100, // tree node is in memory at the point of use
GTF_SET_FLAGS = 0x00000200, // Requires that codegen for this node set the flags. Use gtSetFlags() to check this flag.
GTF_USE_FLAGS = 0x00000400, // Indicates that this node uses the flags bits.
GTF_MAKE_CSE = 0x00000800, // Hoisted expression: try hard to make this into CSE (see optPerformHoistExpr)
GTF_DONT_CSE = 0x00001000, // Don't bother CSE'ing this expr
GTF_COLON_COND = 0x00002000, // This node is conditionally executed (part of ? :)
GTF_NODE_MASK = GTF_COLON_COND,
GTF_BOOLEAN = 0x00004000, // value is known to be 0/1
GTF_UNSIGNED = 0x00008000, // With GT_CAST: the source operand is an unsigned type
// With operators: the specified node is an unsigned operator
GTF_LATE_ARG = 0x00010000, // The specified node is evaluated to a temp in the arg list, and this temp is added to gtCallLateArgs.
GTF_SPILL = 0x00020000, // Needs to be spilled here
// The extra flag GTF_IS_IN_CSE is used to tell the consumer of the side effect flags
// that we are calling in the context of performing a CSE, thus we
// should allow the run-once side effects of running a class constructor.
//
// The only requirement of this flag is that it not overlap any of the
// side-effect flags. The actual bit used is otherwise arbitrary.
GTF_IS_IN_CSE = GTF_BOOLEAN,
GTF_COMMON_MASK = 0x0003FFFF, // mask of all the flags above
GTF_REUSE_REG_VAL = 0x00800000, // This is set by the register allocator on nodes whose value already exists in the
// register assigned to this node, so the code generator does not have to generate
// code to produce the value. It is currently used only on constant nodes.
// It CANNOT be set on var (GT_LCL*) nodes, or on indir (GT_IND or GT_STOREIND) nodes, since
// it is not needed for lclVars and is highly unlikely to be useful for indir nodes.
//---------------------------------------------------------------------
// The following flags can be used only with a small set of nodes, and
// thus their values need not be distinct (other than within the set
// that goes with a particular node/nodes, of course). That is, one can
// only test for one of these flags if the 'gtOper' value is tested as
// well to make sure it's the right operator for the particular flag.
//---------------------------------------------------------------------
// NB: GTF_VAR_* and GTF_REG_* share the same namespace of flags.
// These flags are also used by GT_LCL_FLD, and the last-use (DEATH) flags are also used by GenTreeCopyOrReload.
GTF_VAR_DEF = 0x80000000, // GT_LCL_VAR -- this is a definition
GTF_VAR_USEASG = 0x40000000, // GT_LCL_VAR -- this is a partial definition, a use of the previous definition is implied
// A partial definition usually occurs when a struct field is assigned to (s.f = ...) or
// when a scalar typed variable is assigned to via a narrow store (*((byte*)&i) = ...).
// Last-use bits.
// Note that a node marked GTF_VAR_MULTIREG can only be a pure definition of all the fields, or a pure use of all the fields,
// so we don't need the equivalent of GTF_VAR_USEASG.
GTF_VAR_MULTIREG_DEATH0 = 0x04000000, // GT_LCL_VAR -- The last-use bit for a lclVar (the first register if it is multireg).
GTF_VAR_DEATH = GTF_VAR_MULTIREG_DEATH0,
GTF_VAR_MULTIREG_DEATH1 = 0x08000000, // GT_LCL_VAR -- The last-use bit for the second register of a multireg lclVar.
GTF_VAR_MULTIREG_DEATH2 = 0x10000000, // GT_LCL_VAR -- The last-use bit for the third register of a multireg lclVar.
GTF_VAR_MULTIREG_DEATH3 = 0x20000000, // GT_LCL_VAR -- The last-use bit for the fourth register of a multireg lclVar.
GTF_VAR_DEATH_MASK = GTF_VAR_MULTIREG_DEATH0 | GTF_VAR_MULTIREG_DEATH1 | GTF_VAR_MULTIREG_DEATH2 | GTF_VAR_MULTIREG_DEATH3,
// This is the amount we have to shift, plus the regIndex, to get the last use bit we want.
#define MULTIREG_LAST_USE_SHIFT 26
GTF_VAR_MULTIREG = 0x02000000, // This is a struct or (on 32-bit platforms) long variable that is used or defined
// to/from a multireg source or destination (e.g. a call arg or return, or an op
// that returns its result in multiple registers such as a long multiply).
GTF_LIVENESS_MASK = GTF_VAR_DEF | GTF_VAR_USEASG | GTF_VAR_DEATH_MASK,
GTF_VAR_CAST = 0x01000000, // GT_LCL_VAR -- has been explictly cast (variable node may not be type of local)
GTF_VAR_ITERATOR = 0x00800000, // GT_LCL_VAR -- this is a iterator reference in the loop condition
GTF_VAR_CLONED = 0x00400000, // GT_LCL_VAR -- this node has been cloned or is a clone
GTF_VAR_CONTEXT = 0x00200000, // GT_LCL_VAR -- this node is part of a runtime lookup
GTF_VAR_FOLDED_IND = 0x00100000, // GT_LCL_VAR -- this node was folded from *(typ*)&lclVar expression tree in fgMorphSmpOp()
// where 'typ' is a small type and 'lclVar' corresponds to a normalized-on-store local variable.
// This flag identifies such nodes in order to make sure that fgDoNormalizeOnStore() is called
// on their parents in post-order morph.
// Relevant for inlining optimizations (see fgInlinePrependStatements)
// For additional flags for GT_CALL node see GTF_CALL_M_*
GTF_CALL_UNMANAGED = 0x80000000, // GT_CALL -- direct call to unmanaged code
GTF_CALL_INLINE_CANDIDATE = 0x40000000, // GT_CALL -- this call has been marked as an inline candidate
GTF_CALL_VIRT_KIND_MASK = 0x30000000, // GT_CALL -- mask of the below call kinds
GTF_CALL_NONVIRT = 0x00000000, // GT_CALL -- a non virtual call
GTF_CALL_VIRT_STUB = 0x10000000, // GT_CALL -- a stub-dispatch virtual call
GTF_CALL_VIRT_VTABLE = 0x20000000, // GT_CALL -- a vtable-based virtual call
GTF_CALL_NULLCHECK = 0x08000000, // GT_CALL -- must check instance pointer for null
GTF_CALL_POP_ARGS = 0x04000000, // GT_CALL -- caller pop arguments?
GTF_CALL_HOISTABLE = 0x02000000, // GT_CALL -- call is hoistable
GTF_MEMORYBARRIER_LOAD = 0x40000000, // GT_MEMORYBARRIER -- Load barrier
GTF_FLD_VOLATILE = 0x40000000, // GT_FIELD/GT_CLS_VAR -- same as GTF_IND_VOLATILE
GTF_FLD_INITCLASS = 0x20000000, // GT_FIELD/GT_CLS_VAR -- field access requires preceding class/static init helper
GTF_INX_RNGCHK = 0x80000000, // GT_INDEX/GT_INDEX_ADDR -- the array reference should be range-checked.
GTF_INX_STRING_LAYOUT = 0x40000000, // GT_INDEX -- this uses the special string array layout
GTF_INX_NOFAULT = 0x20000000, // GT_INDEX -- the INDEX does not throw an exception (morph to GTF_IND_NONFAULTING)
GTF_IND_TGT_NOT_HEAP = 0x80000000, // GT_IND -- the target is not on the heap
GTF_IND_VOLATILE = 0x40000000, // GT_IND -- the load or store must use volatile sematics (this is a nop on X86)
GTF_IND_NONFAULTING = 0x20000000, // Operations for which OperIsIndir() is true -- An indir that cannot fault.
// Same as GTF_ARRLEN_NONFAULTING.
GTF_IND_TGTANYWHERE = 0x10000000, // GT_IND -- the target could be anywhere
GTF_IND_TLS_REF = 0x08000000, // GT_IND -- the target is accessed via TLS
GTF_IND_ASG_LHS = 0x04000000, // GT_IND -- this GT_IND node is (the effective val) of the LHS of an
// assignment; don't evaluate it independently.
GTF_IND_REQ_ADDR_IN_REG = GTF_IND_ASG_LHS, // GT_IND -- requires its addr operand to be evaluated
// into a register. This flag is useful in cases where it
// is required to generate register indirect addressing mode.
// One such case is virtual stub calls on xarch. This is only
// valid in the backend, where GTF_IND_ASG_LHS is not necessary
// (all such indirections will be lowered to GT_STOREIND).
GTF_IND_UNALIGNED = 0x02000000, // GT_IND -- the load or store is unaligned (we assume worst case
// alignment of 1 byte)
GTF_IND_INVARIANT = 0x01000000, // GT_IND -- the target is invariant (a prejit indirection)
GTF_IND_NONNULL = 0x00400000, // GT_IND -- the indirection never returns null (zero)
GTF_IND_FLAGS = GTF_IND_VOLATILE | GTF_IND_TGTANYWHERE | GTF_IND_NONFAULTING | GTF_IND_TLS_REF |
GTF_IND_UNALIGNED | GTF_IND_INVARIANT | GTF_IND_NONNULL | GTF_IND_TGT_NOT_HEAP,
GTF_CLS_VAR_VOLATILE = 0x40000000, // GT_FIELD/GT_CLS_VAR -- same as GTF_IND_VOLATILE
GTF_CLS_VAR_INITCLASS = 0x20000000, // GT_FIELD/GT_CLS_VAR -- same as GTF_FLD_INITCLASS
GTF_CLS_VAR_ASG_LHS = 0x04000000, // GT_CLS_VAR -- this GT_CLS_VAR node is (the effective val) of the LHS
// of an assignment; don't evaluate it independently.
GTF_ADDRMODE_NO_CSE = 0x80000000, // GT_ADD/GT_MUL/GT_LSH -- Do not CSE this node only, forms complex
// addressing mode
GTF_MUL_64RSLT = 0x40000000, // GT_MUL -- produce 64-bit result
GTF_RELOP_NAN_UN = 0x80000000, // GT_<relop> -- Is branch taken if ops are NaN?
GTF_RELOP_JMP_USED = 0x40000000, // GT_<relop> -- result of compare used for jump or ?:
GTF_RELOP_ZTT = 0x08000000, // GT_<relop> -- Loop test cloned for converting while-loops into do-while
// with explicit "loop test" in the header block.
GTF_RELOP_SJUMP_OPT = 0x04000000, // GT_<relop> -- Swap signed jl/jge with js/jns during emitter, reuses flags
// from previous instruction.
GTF_JCMP_EQ = 0x80000000, // GTF_JCMP_EQ -- Branch on equal rather than not equal
GTF_JCMP_TST = 0x40000000, // GTF_JCMP_TST -- Use bit test instruction rather than compare against zero instruction
GTF_RET_MERGED = 0x80000000, // GT_RETURN -- This is a return generated during epilog merging.
GTF_QMARK_CAST_INSTOF = 0x80000000, // GT_QMARK -- Is this a top (not nested) level qmark created for
// castclass or instanceof?
GTF_BOX_VALUE = 0x80000000, // GT_BOX -- "box" is on a value type
GTF_ARR_ADDR_NONNULL = 0x80000000, // GT_ARR_ADDR -- this array's address is not null
GTF_ICON_HDL_MASK = 0xFF000000, // Bits used by handle types below
GTF_ICON_SCOPE_HDL = 0x01000000, // GT_CNS_INT -- constant is a scope handle
GTF_ICON_CLASS_HDL = 0x02000000, // GT_CNS_INT -- constant is a class handle
GTF_ICON_METHOD_HDL = 0x03000000, // GT_CNS_INT -- constant is a method handle
GTF_ICON_FIELD_HDL = 0x04000000, // GT_CNS_INT -- constant is a field handle
GTF_ICON_STATIC_HDL = 0x05000000, // GT_CNS_INT -- constant is a handle to static data
GTF_ICON_STR_HDL = 0x06000000, // GT_CNS_INT -- constant is a string handle
GTF_ICON_CONST_PTR = 0x07000000, // GT_CNS_INT -- constant is a pointer to immutable data, (e.g. IAT_PPVALUE)
GTF_ICON_GLOBAL_PTR = 0x08000000, // GT_CNS_INT -- constant is a pointer to mutable data (e.g. from the VM state)
GTF_ICON_VARG_HDL = 0x09000000, // GT_CNS_INT -- constant is a var arg cookie handle
GTF_ICON_PINVKI_HDL = 0x0A000000, // GT_CNS_INT -- constant is a pinvoke calli handle
GTF_ICON_TOKEN_HDL = 0x0B000000, // GT_CNS_INT -- constant is a token handle (other than class, method or field)
GTF_ICON_TLS_HDL = 0x0C000000, // GT_CNS_INT -- constant is a TLS ref with offset
GTF_ICON_FTN_ADDR = 0x0D000000, // GT_CNS_INT -- constant is a function address
GTF_ICON_CIDMID_HDL = 0x0E000000, // GT_CNS_INT -- constant is a class ID or a module ID
GTF_ICON_BBC_PTR = 0x0F000000, // GT_CNS_INT -- constant is a basic block count pointer
GTF_ICON_STATIC_BOX_PTR = 0x10000000, // GT_CNS_INT -- constant is an address of the box for a STATIC_IN_HEAP field
GTF_ICON_FIELD_SEQ = 0x11000000, // <--------> -- constant is a FieldSeqNode* (used only as VNHandle)
// GTF_ICON_REUSE_REG_VAL = 0x00800000 // GT_CNS_INT -- GTF_REUSE_REG_VAL, defined above
GTF_ICON_FIELD_OFF = 0x00400000, // GT_CNS_INT -- constant is a field offset
GTF_ICON_SIMD_COUNT = 0x00200000, // GT_CNS_INT -- constant is Vector<T>.Count
GTF_ICON_INITCLASS = 0x00100000, // GT_CNS_INT -- Constant is used to access a static that requires preceding
// class/static init helper. In some cases, the constant is
// the address of the static field itself, and in other cases
// there's an extra layer of indirection and it is the address
// of the cell that the runtime will fill in with the address
// of the static field; in both of those cases, the constant
// is what gets flagged.
GTF_BLK_VOLATILE = GTF_IND_VOLATILE, // GT_ASG, GT_STORE_BLK, GT_STORE_OBJ, GT_STORE_DYNBLK -- is a volatile block operation
GTF_BLK_UNALIGNED = GTF_IND_UNALIGNED, // GT_ASG, GT_STORE_BLK, GT_STORE_OBJ, GT_STORE_DYNBLK -- is an unaligned block operation
GTF_OVERFLOW = 0x10000000, // Supported for: GT_ADD, GT_SUB, GT_MUL and GT_CAST.
// Requires an overflow check. Use gtOverflow(Ex)() to check this flag.
GTF_DIV_BY_CNS_OPT = 0x80000000, // GT_DIV -- Uses the division by constant optimization to compute this division
GTF_CHK_INDEX_INBND = 0x80000000, // GT_BOUNDS_CHECK -- have proven this check is always in-bounds
GTF_ARRLEN_NONFAULTING = 0x20000000, // GT_ARR_LENGTH -- An array length operation that cannot fault. Same as GT_IND_NONFAULTING.
GTF_SIMDASHW_OP = 0x80000000, // GT_HWINTRINSIC -- Indicates that the structHandle should be gotten from gtGetStructHandleForSIMD
// rather than from gtGetStructHandleForHWSIMD.
// Flag used by assertion prop to indicate that a type is a TYP_LONG
#ifdef TARGET_64BIT
GTF_ASSERTION_PROP_LONG = 0x00000001,
#endif // TARGET_64BIT
};
inline constexpr GenTreeFlags operator ~(GenTreeFlags a)
{
return (GenTreeFlags)(~(unsigned int)a);
}
inline constexpr GenTreeFlags operator |(GenTreeFlags a, GenTreeFlags b)
{
return (GenTreeFlags)((unsigned int)a | (unsigned int)b);
}
inline constexpr GenTreeFlags operator &(GenTreeFlags a, GenTreeFlags b)
{
return (GenTreeFlags)((unsigned int)a & (unsigned int)b);
}
inline GenTreeFlags& operator |=(GenTreeFlags& a, GenTreeFlags b)
{
return a = (GenTreeFlags)((unsigned int)a | (unsigned int)b);
}
inline GenTreeFlags& operator &=(GenTreeFlags& a, GenTreeFlags b)
{
return a = (GenTreeFlags)((unsigned int)a & (unsigned int)b);
}
inline GenTreeFlags& operator ^=(GenTreeFlags& a, GenTreeFlags b)
{
return a = (GenTreeFlags)((unsigned int)a ^ (unsigned int)b);
}
// Can any side-effects be observed externally, say by a caller method?
// For assignments, only assignments to global memory can be observed
// externally, whereas simple assignments to local variables can not.
//
// Be careful when using this inside a "try" protected region as the
// order of assignments to local variables would need to be preserved
// wrt side effects if the variables are alive on entry to the
// "catch/finally" region. In such cases, even assignments to locals
// will have to be restricted.
#define GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(flags) \
(((flags) & (GTF_CALL | GTF_EXCEPT)) || (((flags) & (GTF_ASG | GTF_GLOB_REF)) == (GTF_ASG | GTF_GLOB_REF)))
#if defined(DEBUG)
//------------------------------------------------------------------------
// GenTreeDebugFlags: a bitmask of debug-only flags for GenTree stored in gtDebugFlags
//
enum GenTreeDebugFlags : unsigned int
{
GTF_DEBUG_NONE = 0x00000000, // No debug flags.
GTF_DEBUG_NODE_MORPHED = 0x00000001, // the node has been morphed (in the global morphing phase)
GTF_DEBUG_NODE_SMALL = 0x00000002,
GTF_DEBUG_NODE_LARGE = 0x00000004,
GTF_DEBUG_NODE_CG_PRODUCED = 0x00000008, // genProduceReg has been called on this node
GTF_DEBUG_NODE_CG_CONSUMED = 0x00000010, // genConsumeReg has been called on this node
GTF_DEBUG_NODE_LSRA_ADDED = 0x00000020, // This node was added by LSRA
GTF_DEBUG_NODE_MASK = 0x0000003F, // These flags are all node (rather than operation) properties.
GTF_DEBUG_VAR_CSE_REF = 0x00800000, // GT_LCL_VAR -- This is a CSE LCL_VAR node
};
inline constexpr GenTreeDebugFlags operator ~(GenTreeDebugFlags a)
{
return (GenTreeDebugFlags)(~(unsigned int)a);
}
inline constexpr GenTreeDebugFlags operator |(GenTreeDebugFlags a, GenTreeDebugFlags b)
{
return (GenTreeDebugFlags)((unsigned int)a | (unsigned int)b);
}
inline constexpr GenTreeDebugFlags operator &(GenTreeDebugFlags a, GenTreeDebugFlags b)
{
return (GenTreeDebugFlags)((unsigned int)a & (unsigned int)b);
}
inline GenTreeDebugFlags& operator |=(GenTreeDebugFlags& a, GenTreeDebugFlags b)
{
return a = (GenTreeDebugFlags)((unsigned int)a | (unsigned int)b);
}
inline GenTreeDebugFlags& operator &=(GenTreeDebugFlags& a, GenTreeDebugFlags b)
{
return a = (GenTreeDebugFlags)((unsigned int)a & (unsigned int)b);
}
#endif // defined(DEBUG)
// clang-format on
#ifndef HOST_64BIT
#include <pshpack4.h>
#endif
struct GenTree
{
// We use GT_STRUCT_0 only for the category of simple ops.
#define GTSTRUCT_0(fn, en) \
GenTree##fn* As##fn() \
{ \
assert(OperIsSimple()); \
return reinterpret_cast<GenTree##fn*>(this); \
} \
const GenTree##fn* As##fn() const \
{ \
assert(OperIsSimple()); \
return reinterpret_cast<const GenTree##fn*>(this); \
} \
GenTree##fn& As##fn##Ref() \
{ \
return *As##fn(); \
}
#define GTSTRUCT_N(fn, ...) \
GenTree##fn* As##fn() \
{ \
assert(OperIs(__VA_ARGS__)); \
return reinterpret_cast<GenTree##fn*>(this); \
} \
const GenTree##fn* As##fn() const \
{ \
assert(OperIs(__VA_ARGS__)); \
return reinterpret_cast<const GenTree##fn*>(this); \
} \
GenTree##fn& As##fn##Ref() \
{ \
return *As##fn(); \
}
#define GTSTRUCT_1(fn, en) GTSTRUCT_N(fn, en)
#define GTSTRUCT_2(fn, en, en2) GTSTRUCT_N(fn, en, en2)
#define GTSTRUCT_3(fn, en, en2, en3) GTSTRUCT_N(fn, en, en2, en3)
#define GTSTRUCT_4(fn, en, en2, en3, en4) GTSTRUCT_N(fn, en, en2, en3, en4)
#define GTSTRUCT_2_SPECIAL(fn, en, en2) GTSTRUCT_2(fn, en, en2)
#define GTSTRUCT_3_SPECIAL(fn, en, en2, en3) GTSTRUCT_3(fn, en, en2, en3)
#include "gtstructs.h"
genTreeOps gtOper; // enum subtype BYTE
var_types gtType; // enum subtype BYTE
genTreeOps OperGet() const
{
return gtOper;
}
var_types TypeGet() const
{
return gtType;
}
#ifdef DEBUG
genTreeOps gtOperSave; // Only used to save gtOper when we destroy a node, to aid debugging.
#endif
#define NO_CSE (0)
#define IS_CSE_INDEX(x) ((x) != 0)
#define IS_CSE_USE(x) ((x) > 0)
#define IS_CSE_DEF(x) ((x) < 0)
#define GET_CSE_INDEX(x) (((x) > 0) ? x : -(x))
#define TO_CSE_DEF(x) (-(x))
signed char gtCSEnum; // 0 or the CSE index (negated if def)
// valid only for CSE expressions
unsigned char gtLIRFlags; // Used for nodes that are in LIR. See LIR::Flags in lir.h for the various flags.
AssertionInfo gtAssertionInfo;
bool GeneratesAssertion() const
{
return gtAssertionInfo.HasAssertion();
}
void ClearAssertion()
{
gtAssertionInfo.Clear();
}
AssertionInfo GetAssertionInfo() const
{
return gtAssertionInfo;
}
void SetAssertionInfo(AssertionInfo info)
{
gtAssertionInfo = info;
}
//
// Cost metrics on the node. Don't allow direct access to the variable for setting.
//
public:
#ifdef DEBUG
// You are not allowed to read the cost values before they have been set in gtSetEvalOrder().
// Keep track of whether the costs have been initialized, and assert if they are read before being initialized.
// Obviously, this information does need to be initialized when a node is created.
// This is public so the dumpers can see it.
bool gtCostsInitialized;
#endif // DEBUG
#define MAX_COST UCHAR_MAX
#define IND_COST_EX 3 // execution cost for an indirection
unsigned char GetCostEx() const
{
assert(gtCostsInitialized);
return _gtCostEx;
}
unsigned char GetCostSz() const
{
assert(gtCostsInitialized);
return _gtCostSz;
}
// Set the costs. They are always both set at the same time.
// Don't use the "put" property: force calling this function, to make it more obvious in the few places
// that set the values.
// Note that costs are only set in gtSetEvalOrder() and its callees.
void SetCosts(unsigned costEx, unsigned costSz)
{
assert(costEx != (unsigned)-1); // looks bogus
assert(costSz != (unsigned)-1); // looks bogus
INDEBUG(gtCostsInitialized = true;)
_gtCostEx = (costEx > MAX_COST) ? MAX_COST : (unsigned char)costEx;
_gtCostSz = (costSz > MAX_COST) ? MAX_COST : (unsigned char)costSz;
}
// Opimized copy function, to avoid the SetCosts() function comparisons, and make it more clear that a node copy is
// happening.
void CopyCosts(const GenTree* const tree)
{
// If the 'tree' costs aren't initialized, we'll hit an assert below.
INDEBUG(gtCostsInitialized = tree->gtCostsInitialized;)
_gtCostEx = tree->GetCostEx();
_gtCostSz = tree->GetCostSz();
}
// Same as CopyCosts, but avoids asserts if the costs we are copying have not been initialized.
// This is because the importer, for example, clones nodes, before these costs have been initialized.
// Note that we directly access the 'tree' costs, not going through the accessor functions (either
// directly or through the properties).
void CopyRawCosts(const GenTree* const tree)
{
INDEBUG(gtCostsInitialized = tree->gtCostsInitialized;)
_gtCostEx = tree->_gtCostEx;
_gtCostSz = tree->_gtCostSz;
}
private:
unsigned char _gtCostEx; // estimate of expression execution cost
unsigned char _gtCostSz; // estimate of expression code size cost
//
// Register or register pair number of the node.
//
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
public:
enum genRegTag
{
GT_REGTAG_NONE, // Nothing has been assigned to _gtRegNum
GT_REGTAG_REG // _gtRegNum has been assigned
};
genRegTag GetRegTag() const
{
assert(gtRegTag == GT_REGTAG_NONE || gtRegTag == GT_REGTAG_REG);
return gtRegTag;
}
private:
genRegTag gtRegTag; // What is in _gtRegNum?
#endif // DEBUG
private:
// This stores the register assigned to the node. If a register is not assigned, _gtRegNum is set to REG_NA.
regNumberSmall _gtRegNum;
// Count of operands. Used *only* by GenTreeMultiOp, exists solely due to padding constraints.
friend struct GenTreeMultiOp;
uint8_t m_operandCount;
public:
// The register number is stored in a small format (8 bits), but the getters return and the setters take
// a full-size (unsigned) format, to localize the casts here.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
bool canBeContained() const;
#endif
// for codegen purposes, is this node a subnode of its parent
bool isContained() const;
bool isContainedIndir() const;
bool isIndirAddrMode();
// This returns true only for GT_IND and GT_STOREIND, and is used in contexts where a "true"
// indirection is expected (i.e. either a load to or a store from a single register).
// OperIsIndir() returns true also for indirection nodes such as GT_BLK, etc. as well as GT_NULLCHECK.
bool isIndir() const;
bool isContainedIntOrIImmed() const
{
return isContained() && IsCnsIntOrI() && !isUsedFromSpillTemp();
}
bool isContainedFltOrDblImmed() const
{
return isContained() && (OperGet() == GT_CNS_DBL);
}
bool isLclField() const
{
return OperGet() == GT_LCL_FLD || OperGet() == GT_STORE_LCL_FLD;
}
bool isUsedFromSpillTemp() const;
// Indicates whether it is a memory op.
// Right now it includes Indir and LclField ops.
bool isMemoryOp() const
{
return isIndir() || isLclField();
}
bool isUsedFromMemory() const
{
return ((isContained() && (isMemoryOp() || (OperGet() == GT_LCL_VAR) || (OperGet() == GT_CNS_DBL))) ||
isUsedFromSpillTemp());
}
bool isLclVarUsedFromMemory() const
{
return (OperGet() == GT_LCL_VAR) && (isContained() || isUsedFromSpillTemp());
}
bool isLclFldUsedFromMemory() const
{
return isLclField() && (isContained() || isUsedFromSpillTemp());
}
bool isUsedFromReg() const
{
return !isContained() && !isUsedFromSpillTemp();
}
regNumber GetRegNum() const
{
assert((gtRegTag == GT_REGTAG_REG) || (gtRegTag == GT_REGTAG_NONE)); // TODO-Cleanup: get rid of the NONE case,
// and fix everyplace that reads undefined
// values
regNumber reg = (regNumber)_gtRegNum;
assert((gtRegTag == GT_REGTAG_NONE) || // TODO-Cleanup: get rid of the NONE case, and fix everyplace that reads
// undefined values
(reg >= REG_FIRST && reg <= REG_COUNT));
return reg;
}
void SetRegNum(regNumber reg)
{
assert(reg >= REG_FIRST && reg <= REG_COUNT);
_gtRegNum = (regNumberSmall)reg;
INDEBUG(gtRegTag = GT_REGTAG_REG;)
assert(_gtRegNum == reg);
}
void ClearRegNum()
{
_gtRegNum = REG_NA;
INDEBUG(gtRegTag = GT_REGTAG_NONE;)
}
// Copy the _gtRegNum/gtRegTag fields
void CopyReg(GenTree* from);
bool gtHasReg(Compiler* comp) const;
int GetRegisterDstCount(Compiler* compiler) const;
regMaskTP gtGetRegMask() const;
regMaskTP gtGetContainedRegMask();
GenTreeFlags gtFlags;
#if defined(DEBUG)
GenTreeDebugFlags gtDebugFlags;
#endif // defined(DEBUG)
ValueNumPair gtVNPair;
regMaskSmall gtRsvdRegs; // set of fixed trashed registers
unsigned AvailableTempRegCount(regMaskTP mask = (regMaskTP)-1) const;
regNumber GetSingleTempReg(regMaskTP mask = (regMaskTP)-1);
regNumber ExtractTempReg(regMaskTP mask = (regMaskTP)-1);
void SetVNsFromNode(GenTree* tree)
{
gtVNPair = tree->gtVNPair;
}
ValueNum GetVN(ValueNumKind vnk) const
{
if (vnk == VNK_Liberal)
{
return gtVNPair.GetLiberal();
}
else
{
assert(vnk == VNK_Conservative);
return gtVNPair.GetConservative();
}
}
void SetVN(ValueNumKind vnk, ValueNum vn)
{
if (vnk == VNK_Liberal)
{
return gtVNPair.SetLiberal(vn);
}
else
{
assert(vnk == VNK_Conservative);
return gtVNPair.SetConservative(vn);
}
}
void SetVNs(ValueNumPair vnp)
{
gtVNPair = vnp;
}
void ClearVN()
{
gtVNPair = ValueNumPair(); // Initializes both elements to "NoVN".
}
GenTree* gtNext;
GenTree* gtPrev;
#ifdef DEBUG
unsigned gtTreeID;
unsigned gtSeqNum; // liveness traversal order within the current statement
int gtUseNum; // use-ordered traversal within the function
#endif
static const unsigned char gtOperKindTable[];
static unsigned OperKind(unsigned gtOper)
{
assert(gtOper < GT_COUNT);
return gtOperKindTable[gtOper];
}
unsigned OperKind() const
{
assert(gtOper < GT_COUNT);
return gtOperKindTable[gtOper];
}
static bool IsExOp(unsigned opKind)
{
return (opKind & GTK_EXOP) != 0;
}
bool IsValue() const
{
if ((OperKind(gtOper) & GTK_NOVALUE) != 0)
{
return false;
}
if (gtType == TYP_VOID)
{
// These are the only operators which can produce either VOID or non-VOID results.
assert(OperIs(GT_NOP, GT_CALL, GT_COMMA) || OperIsCompare() || OperIsLong() || OperIsSIMD() ||
OperIsHWIntrinsic());
return false;
}
return true;
}
// LIR flags
// These helper methods, along with the flag values they manipulate, are defined in lir.h
//
// UnusedValue indicates that, although this node produces a value, it is unused.
inline void SetUnusedValue();
inline void ClearUnusedValue();
inline bool IsUnusedValue() const;
// RegOptional indicates that codegen can still generate code even if it isn't allocated a register.
inline bool IsRegOptional() const;
inline void SetRegOptional();
inline void ClearRegOptional();
#ifdef DEBUG
void dumpLIRFlags();
#endif
bool TypeIs(var_types type) const
{
return gtType == type;
}
template <typename... T>
bool TypeIs(var_types type, T... rest) const
{
return TypeIs(type) || TypeIs(rest...);
}
static bool StaticOperIs(genTreeOps operCompare, genTreeOps oper)
{
return operCompare == oper;
}
template <typename... T>
static bool StaticOperIs(genTreeOps operCompare, genTreeOps oper, T... rest)
{
return StaticOperIs(operCompare, oper) || StaticOperIs(operCompare, rest...);
}
bool OperIs(genTreeOps oper) const
{
return OperGet() == oper;
}
template <typename... T>
bool OperIs(genTreeOps oper, T... rest) const
{
return OperIs(oper) || OperIs(rest...);
}
static bool OperIsConst(genTreeOps gtOper)
{
static_assert_no_msg(AreContiguous(GT_CNS_INT, GT_CNS_LNG, GT_CNS_DBL, GT_CNS_STR));
return (GT_CNS_INT <= gtOper) && (gtOper <= GT_CNS_STR);
}
bool OperIsConst() const
{
return OperIsConst(gtOper);
}
static bool OperIsLeaf(genTreeOps gtOper)
{
return (OperKind(gtOper) & GTK_LEAF) != 0;
}
bool OperIsLeaf() const
{
return (OperKind(gtOper) & GTK_LEAF) != 0;
}
static bool OperIsLocal(genTreeOps gtOper)
{
static_assert_no_msg(AreContiguous(GT_PHI_ARG, GT_LCL_VAR, GT_LCL_FLD, GT_STORE_LCL_VAR, GT_STORE_LCL_FLD));
return (GT_PHI_ARG <= gtOper) && (gtOper <= GT_STORE_LCL_FLD);
}
static bool OperIsLocalAddr(genTreeOps gtOper)
{
return (gtOper == GT_LCL_VAR_ADDR || gtOper == GT_LCL_FLD_ADDR);
}
static bool OperIsLocalField(genTreeOps gtOper)
{
return (gtOper == GT_LCL_FLD || gtOper == GT_LCL_FLD_ADDR || gtOper == GT_STORE_LCL_FLD);
}
inline bool OperIsLocalField() const
{
return OperIsLocalField(gtOper);
}
static bool OperIsScalarLocal(genTreeOps gtOper)
{
return (gtOper == GT_LCL_VAR || gtOper == GT_STORE_LCL_VAR);
}
static bool OperIsNonPhiLocal(genTreeOps gtOper)
{
return OperIsLocal(gtOper) && (gtOper != GT_PHI_ARG);
}
static bool OperIsLocalRead(genTreeOps gtOper)
{
return (OperIsLocal(gtOper) && !OperIsLocalStore(gtOper));
}
static bool OperIsLocalStore(genTreeOps gtOper)
{
return (gtOper == GT_STORE_LCL_VAR || gtOper == GT_STORE_LCL_FLD);
}
static bool OperIsAddrMode(genTreeOps gtOper)
{
return (gtOper == GT_LEA);
}
static bool OperIsInitVal(genTreeOps gtOper)
{
return (gtOper == GT_INIT_VAL);
}
bool OperIsInitVal() const
{
return OperIsInitVal(OperGet());
}
bool IsConstInitVal() const
{
return (gtOper == GT_CNS_INT) || (OperIsInitVal() && (gtGetOp1()->gtOper == GT_CNS_INT));
}
bool OperIsBlkOp();
bool OperIsCopyBlkOp();
bool OperIsInitBlkOp();
static bool OperIsBlk(genTreeOps gtOper)
{
return (gtOper == GT_BLK) || (gtOper == GT_OBJ) || OperIsStoreBlk(gtOper);
}
bool OperIsBlk() const
{
return OperIsBlk(OperGet());
}
static bool OperIsStoreBlk(genTreeOps gtOper)
{
return StaticOperIs(gtOper, GT_STORE_BLK, GT_STORE_OBJ, GT_STORE_DYN_BLK);
}
bool OperIsStoreBlk() const
{
return OperIsStoreBlk(OperGet());
}
bool OperIsPutArgSplit() const
{
#if FEATURE_ARG_SPLIT
assert((gtOper != GT_PUTARG_SPLIT) || compFeatureArgSplit());
return gtOper == GT_PUTARG_SPLIT;
#else // !FEATURE_ARG_SPLIT
return false;
#endif
}
bool OperIsPutArgStk() const
{
return gtOper == GT_PUTARG_STK;
}
bool OperIsPutArgStkOrSplit() const
{
return OperIsPutArgStk() || OperIsPutArgSplit();
}
bool OperIsPutArgReg() const
{
return gtOper == GT_PUTARG_REG;
}
bool OperIsPutArg() const
{
return OperIsPutArgStk() || OperIsPutArgReg() || OperIsPutArgSplit();
}
bool OperIsFieldList() const
{
return OperIs(GT_FIELD_LIST);
}
bool OperIsMultiRegOp() const
{
#if !defined(TARGET_64BIT)
if (OperIs(GT_MUL_LONG))
{
return true;
}
#if defined(TARGET_ARM)
if (OperIs(GT_PUTARG_REG, GT_BITCAST))
{
return true;
}
#endif // TARGET_ARM
#endif // TARGET_64BIT
return false;
}
bool OperIsAddrMode() const
{
return OperIsAddrMode(OperGet());
}
bool OperIsLocal() const
{
return OperIsLocal(OperGet());
}
bool OperIsLocalAddr() const
{
return OperIsLocalAddr(OperGet());
}
bool OperIsScalarLocal() const
{
return OperIsScalarLocal(OperGet());
}
bool OperIsNonPhiLocal() const
{
return OperIsNonPhiLocal(OperGet());
}
bool OperIsLocalStore() const
{
return OperIsLocalStore(OperGet());
}
bool OperIsLocalRead() const
{
return OperIsLocalRead(OperGet());
}
static bool OperIsCompare(genTreeOps gtOper)
{
static_assert_no_msg(AreContiguous(GT_EQ, GT_NE, GT_LT, GT_LE, GT_GE, GT_GT, GT_TEST_EQ, GT_TEST_NE));
return (GT_EQ <= gtOper) && (gtOper <= GT_TEST_NE);
}
bool OperIsCompare() const
{
return OperIsCompare(OperGet());
}
static bool OperIsShift(genTreeOps gtOper)
{
return (gtOper == GT_LSH) || (gtOper == GT_RSH) || (gtOper == GT_RSZ);
}
bool OperIsShift() const
{
return OperIsShift(OperGet());
}
static bool OperIsShiftLong(genTreeOps gtOper)
{
#ifdef TARGET_64BIT
return false;
#else
return (gtOper == GT_LSH_HI) || (gtOper == GT_RSH_LO);
#endif
}
bool OperIsShiftLong() const
{
return OperIsShiftLong(OperGet());
}
static bool OperIsRotate(genTreeOps gtOper)
{
return (gtOper == GT_ROL) || (gtOper == GT_ROR);
}
bool OperIsRotate() const
{
return OperIsRotate(OperGet());
}
static bool OperIsShiftOrRotate(genTreeOps gtOper)
{
return OperIsShift(gtOper) || OperIsRotate(gtOper) || OperIsShiftLong(gtOper);
}
bool OperIsShiftOrRotate() const
{
return OperIsShiftOrRotate(OperGet());
}
static bool OperIsMul(genTreeOps gtOper)
{
return (gtOper == GT_MUL) || (gtOper == GT_MULHI)
#if !defined(TARGET_64BIT) || defined(TARGET_ARM64)
|| (gtOper == GT_MUL_LONG)
#endif
;
}
bool OperIsMul() const
{
return OperIsMul(gtOper);
}
bool OperIsArithmetic() const
{
genTreeOps op = OperGet();
return op == GT_ADD || op == GT_SUB || op == GT_MUL || op == GT_DIV || op == GT_MOD
|| op == GT_UDIV || op == GT_UMOD
|| op == GT_OR || op == GT_XOR || op == GT_AND
|| OperIsShiftOrRotate(op);
}
#ifdef TARGET_XARCH
static bool OperIsRMWMemOp(genTreeOps gtOper)
{
// Return if binary op is one of the supported operations for RMW of memory.
return (gtOper == GT_ADD || gtOper == GT_SUB || gtOper == GT_AND || gtOper == GT_OR || gtOper == GT_XOR ||
gtOper == GT_NOT || gtOper == GT_NEG || OperIsShiftOrRotate(gtOper));
}
bool OperIsRMWMemOp() const
{
// Return if binary op is one of the supported operations for RMW of memory.
return OperIsRMWMemOp(gtOper);
}
#endif // TARGET_XARCH
static bool OperIsUnary(genTreeOps gtOper)
{
return (OperKind(gtOper) & GTK_UNOP) != 0;
}
bool OperIsUnary() const
{
return OperIsUnary(gtOper);
}
static bool OperIsBinary(genTreeOps gtOper)
{
return (OperKind(gtOper) & GTK_BINOP) != 0;
}
bool OperIsBinary() const
{
return OperIsBinary(gtOper);
}
static bool OperIsSimple(genTreeOps gtOper)
{
return (OperKind(gtOper) & GTK_SMPOP) != 0;
}
static bool OperIsSpecial(genTreeOps gtOper)
{
return ((OperKind(gtOper) & GTK_KINDMASK) == GTK_SPECIAL);
}
bool OperIsSimple() const
{
return OperIsSimple(gtOper);
}
#ifdef FEATURE_SIMD
bool isCommutativeSIMDIntrinsic();
#else // !
bool isCommutativeSIMDIntrinsic()
{
return false;
}
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
bool isCommutativeHWIntrinsic() const;
bool isContainableHWIntrinsic() const;
bool isRMWHWIntrinsic(Compiler* comp);
#else
bool isCommutativeHWIntrinsic() const
{
return false;
}
bool isContainableHWIntrinsic() const
{
return false;
}
bool isRMWHWIntrinsic(Compiler* comp)
{
return false;
}
#endif // FEATURE_HW_INTRINSICS
static bool OperIsCommutative(genTreeOps gtOper)
{
return (OperKind(gtOper) & GTK_COMMUTE) != 0;
}
bool OperIsCommutative()
{
return OperIsCommutative(gtOper) || (OperIsSIMD(gtOper) && isCommutativeSIMDIntrinsic()) ||
(OperIsHWIntrinsic(gtOper) && isCommutativeHWIntrinsic());
}
static bool OperMayOverflow(genTreeOps gtOper)
{
return ((gtOper == GT_ADD) || (gtOper == GT_SUB) || (gtOper == GT_MUL) || (gtOper == GT_CAST)
#if !defined(TARGET_64BIT)
|| (gtOper == GT_ADD_HI) || (gtOper == GT_SUB_HI)
#endif
);
}
bool OperMayOverflow() const
{
return OperMayOverflow(gtOper);
}
// This returns true only for GT_IND and GT_STOREIND, and is used in contexts where a "true"
// indirection is expected (i.e. either a load to or a store from a single register).
// OperIsIndir() returns true also for indirection nodes such as GT_BLK, etc. as well as GT_NULLCHECK.
static bool OperIsIndir(genTreeOps gtOper)
{
return gtOper == GT_IND || gtOper == GT_STOREIND || gtOper == GT_NULLCHECK || OperIsBlk(gtOper);
}
static bool OperIsIndirOrArrLength(genTreeOps gtOper)
{
return OperIsIndir(gtOper) || (gtOper == GT_ARR_LENGTH);
}
bool OperIsIndir() const
{
return OperIsIndir(gtOper);
}
bool OperIsIndirOrArrLength() const
{
return OperIsIndirOrArrLength(gtOper);
}
bool OperIsImplicitIndir() const;
static bool OperIsAtomicOp(genTreeOps gtOper)
{
switch (gtOper)
{
case GT_XADD:
case GT_XORR:
case GT_XAND:
case GT_XCHG:
case GT_LOCKADD:
case GT_CMPXCHG:
return true;
default:
return false;
}
}
bool OperIsAtomicOp() const
{
return OperIsAtomicOp(gtOper);
}
bool OperIsStore() const
{
return OperIsStore(gtOper);
}
static bool OperIsStore(genTreeOps gtOper)
{
return (gtOper == GT_STOREIND || gtOper == GT_STORE_LCL_VAR || gtOper == GT_STORE_LCL_FLD ||
OperIsStoreBlk(gtOper) || OperIsAtomicOp(gtOper));
}
static bool OperIsMultiOp(genTreeOps gtOper)
{
return OperIsSIMD(gtOper) || OperIsHWIntrinsic(gtOper);
}
bool OperIsMultiOp() const
{
return OperIsMultiOp(OperGet());
}
bool OperIsSsaDef() const
{
return OperIs(GT_ASG, GT_CALL);
}
// This is here for cleaner FEATURE_SIMD #ifdefs.
static bool OperIsSIMD(genTreeOps gtOper)
{
#ifdef FEATURE_SIMD
return gtOper == GT_SIMD;
#else // !FEATURE_SIMD
return false;
#endif // !FEATURE_SIMD
}
bool OperIsSIMD() const
{
return OperIsSIMD(gtOper);
}
static bool OperIsHWIntrinsic(genTreeOps gtOper)
{
#ifdef FEATURE_HW_INTRINSICS
return gtOper == GT_HWINTRINSIC;
#else
return false;
#endif // FEATURE_HW_INTRINSICS
}
bool OperIsHWIntrinsic() const
{
return OperIsHWIntrinsic(gtOper);
}
bool OperIsSimdOrHWintrinsic() const
{
return OperIsSIMD() || OperIsHWIntrinsic();
}
// This is here for cleaner GT_LONG #ifdefs.
static bool OperIsLong(genTreeOps gtOper)
{
#if defined(TARGET_64BIT)
return false;
#else
return gtOper == GT_LONG;
#endif
}
bool OperIsLong() const
{
return OperIsLong(gtOper);
}
bool OperIsConditionalJump() const
{
return (gtOper == GT_JTRUE) || (gtOper == GT_JCMP) || (gtOper == GT_JCC);
}
#ifdef DEBUG
static const GenTreeDebugOperKind gtDebugOperKindTable[];
static GenTreeDebugOperKind DebugOperKind(genTreeOps oper)
{
assert(oper < GT_COUNT);
return gtDebugOperKindTable[oper];
}
GenTreeDebugOperKind DebugOperKind() const
{
return DebugOperKind(OperGet());
}
bool NullOp1Legal() const
{
assert(OperIsSimple());
switch (gtOper)
{
case GT_LEA:
case GT_RETFILT:
case GT_NOP:
case GT_FIELD:
return true;
case GT_RETURN:
return gtType == TYP_VOID;
default:
return false;
}
}
bool NullOp2Legal() const
{
assert(OperIsSimple(gtOper) || OperIsBlk(gtOper));
if (!OperIsBinary(gtOper))
{
return true;
}
switch (gtOper)
{
case GT_INTRINSIC:
case GT_LEA:
#if defined(TARGET_ARM)
case GT_PUTARG_REG:
#endif // defined(TARGET_ARM)
return true;
default:
return false;
}
}
bool OperIsLIR() const
{
if (OperIs(GT_NOP))
{
// NOPs may only be present in LIR if they do not produce a value.
return IsNothingNode();
}
return (DebugOperKind() & DBK_NOTLIR) == 0;
}
bool OperSupportsReverseOpEvalOrder(Compiler* comp) const;
static bool RequiresNonNullOp2(genTreeOps oper);
bool IsValidCallArgument();
#endif // DEBUG
inline bool IsFPZero() const;
inline bool IsIntegralConst(ssize_t constVal) const;
inline bool IsIntegralConstVector(ssize_t constVal) const;
inline bool IsSIMDZero() const;
inline bool IsFloatPositiveZero() const;
inline bool IsVectorZero() const;
inline bool IsBoxedValue();
inline GenTree* gtGetOp1() const;
// Directly return op2. Asserts the node is binary. Might return nullptr if the binary node allows
// a nullptr op2, such as GT_LEA. This is more efficient than gtGetOp2IfPresent() if you know what
// node type you have.
inline GenTree* gtGetOp2() const;
// The returned pointer might be nullptr if the node is not binary, or if non-null op2 is not required.
inline GenTree* gtGetOp2IfPresent() const;
bool TryGetUse(GenTree* operand, GenTree*** pUse);
bool TryGetUse(GenTree* operand)
{
GenTree** unusedUse = nullptr;
return TryGetUse(operand, &unusedUse);
}
private:
bool TryGetUseBinOp(GenTree* operand, GenTree*** pUse);
public:
GenTree* gtGetParent(GenTree*** pUse);
void ReplaceOperand(GenTree** useEdge, GenTree* replacement);
inline GenTree* gtEffectiveVal(bool commaOnly = false);
inline GenTree* gtCommaAssignVal();
// Tunnel through any GT_RET_EXPRs
GenTree* gtRetExprVal(BasicBlockFlags* pbbFlags = nullptr);
inline GenTree* gtSkipPutArgType();
// Return the child of this node if it is a GT_RELOAD or GT_COPY; otherwise simply return the node itself
inline GenTree* gtSkipReloadOrCopy();
// Returns true if it is a call node returning its value in more than one register
inline bool IsMultiRegCall() const;
// Returns true if it is a struct lclVar node residing in multiple registers.
inline bool IsMultiRegLclVar() const;
// Returns true if it is a node returning its value in more than one register
bool IsMultiRegNode() const;
// Returns the number of registers defined by a multireg node.
unsigned GetMultiRegCount(Compiler* comp) const;
// Returns the regIndex'th register defined by a possibly-multireg node.
regNumber GetRegByIndex(int regIndex) const;
// Returns the type of the regIndex'th register defined by a multi-reg node.
var_types GetRegTypeByIndex(int regIndex) const;
// Returns the GTF flag equivalent for the regIndex'th register of a multi-reg node.
GenTreeFlags GetRegSpillFlagByIdx(int regIndex) const;
// Last-use information for either GenTreeLclVar or GenTreeCopyOrReload nodes.
private:
GenTreeFlags GetLastUseBit(int regIndex) const;
public:
bool IsLastUse(int regIndex) const;
bool HasLastUse() const;
void SetLastUse(int regIndex);
void ClearLastUse(int regIndex);
// Returns true if it is a GT_COPY or GT_RELOAD node
inline bool IsCopyOrReload() const;
// Returns true if it is a GT_COPY or GT_RELOAD of a multi-reg call node
inline bool IsCopyOrReloadOfMultiRegCall() const;
bool OperRequiresAsgFlag();
bool OperRequiresCallFlag(Compiler* comp);
bool OperMayThrow(Compiler* comp);
unsigned GetScaleIndexMul();
unsigned GetScaleIndexShf();
unsigned GetScaledIndex();
public:
static unsigned char s_gtNodeSizes[];
#if NODEBASH_STATS || MEASURE_NODE_SIZE || COUNT_AST_OPERS
static unsigned char s_gtTrueSizes[];
#endif
#if COUNT_AST_OPERS
static unsigned s_gtNodeCounts[];
#endif
static void InitNodeSize();
size_t GetNodeSize() const;
bool IsNodeProperlySized() const;
void ReplaceWith(GenTree* src, Compiler* comp);
static genTreeOps ReverseRelop(genTreeOps relop);
static genTreeOps SwapRelop(genTreeOps relop);
//---------------------------------------------------------------------
static bool Compare(GenTree* op1, GenTree* op2, bool swapOK = false);
//---------------------------------------------------------------------
#if defined(DEBUG) || NODEBASH_STATS || MEASURE_NODE_SIZE || COUNT_AST_OPERS || DUMP_FLOWGRAPHS
static const char* OpName(genTreeOps op);
#endif
#if MEASURE_NODE_SIZE
static const char* OpStructName(genTreeOps op);
#endif
//---------------------------------------------------------------------
bool IsNothingNode() const;
void gtBashToNOP();
// Value number update action enumeration
enum ValueNumberUpdate
{
CLEAR_VN, // Clear value number
PRESERVE_VN // Preserve value number
};
void SetOper(genTreeOps oper, ValueNumberUpdate vnUpdate = CLEAR_VN); // set gtOper
void SetOperResetFlags(genTreeOps oper); // set gtOper and reset flags
// set gtOper and only keep GTF_COMMON_MASK flags
void ChangeOper(genTreeOps oper, ValueNumberUpdate vnUpdate = CLEAR_VN);
void ChangeOperUnchecked(genTreeOps oper);
void SetOperRaw(genTreeOps oper);
void ChangeType(var_types newType)
{
var_types oldType = gtType;
gtType = newType;
GenTree* node = this;
while (node->gtOper == GT_COMMA)
{
node = node->gtGetOp2();
if (node->gtType != newType)
{
assert(node->gtType == oldType);
node->gtType = newType;
}
}
}
template <typename T>
void BashToConst(T value, var_types type = TYP_UNDEF);
void BashToZeroConst(var_types type);
#if NODEBASH_STATS
static void RecordOperBashing(genTreeOps operOld, genTreeOps operNew);
static void ReportOperBashing(FILE* fp);
#else
static void RecordOperBashing(genTreeOps operOld, genTreeOps operNew)
{ /* do nothing */
}
static void ReportOperBashing(FILE* fp)
{ /* do nothing */
}
#endif
bool IsLocal() const
{
return OperIsLocal(OperGet());
}
// Returns "true" iff 'this' is a GT_LCL_FLD or GT_STORE_LCL_FLD on which the type
// is not the same size as the type of the GT_LCL_VAR.
bool IsPartialLclFld(Compiler* comp);
// Returns "true" iff "this" defines a local variable. Requires "comp" to be the
// current compilation. If returns "true", sets "*pLclVarTree" to the
// tree for the local that is defined, and, if "pIsEntire" is non-null, sets "*pIsEntire" to
// true or false, depending on whether the assignment writes to the entirety of the local
// variable, or just a portion of it.
bool DefinesLocal(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire = nullptr);
bool IsLocalAddrExpr(Compiler* comp,
GenTreeLclVarCommon** pLclVarTree,
FieldSeqNode** pFldSeq,
ssize_t* pOffset = nullptr);
// Simpler variant of the above which just returns the local node if this is an expression that
// yields an address into a local
GenTreeLclVarCommon* IsLocalAddrExpr();
// Determine if this tree represents the value of an entire implicit byref parameter,
// and if so return the tree for the parameter.
GenTreeLclVar* IsImplicitByrefParameterValue(Compiler* compiler);
// Determine if this is a LclVarCommon node and return some additional info about it in the
// two out parameters.
bool IsLocalExpr(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, FieldSeqNode** pFldSeq);
// Determine whether this is an assignment tree of the form X = X (op) Y,
// where Y is an arbitrary tree, and X is a lclVar.
unsigned IsLclVarUpdateTree(GenTree** otherTree, genTreeOps* updateOper);
bool IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeqNode** pFldSeq);
bool IsArrayAddr(GenTreeArrAddr** pArrAddr);
// Assumes that "this" occurs in a context where it is being dereferenced as the LHS of an assignment-like
// statement (assignment, initblk, or copyblk). The "width" should be the number of bytes copied by the
// operation. Returns "true" if "this" is an address of (or within)
// a local variable; sets "*pLclVarTree" to that local variable instance; and, if "pIsEntire" is non-null,
// sets "*pIsEntire" to true if this assignment writes the full width of the local.
bool DefinesLocalAddr(Compiler* comp, unsigned width, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire);
// These are only used for dumping.
// The GetRegNum() is only valid in LIR, but the dumping methods are not easily
// modified to check this.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
bool InReg() const
{
return (GetRegTag() != GT_REGTAG_NONE) ? true : false;
}
regNumber GetReg() const
{
return (GetRegTag() != GT_REGTAG_NONE) ? GetRegNum() : REG_NA;
}
#endif
static bool IsContained(unsigned flags)
{
return ((flags & GTF_CONTAINED) != 0);
}
void SetContained()
{
assert(IsValue());
gtFlags |= GTF_CONTAINED;
assert(isContained());
}
void ClearContained()
{
assert(IsValue());
gtFlags &= ~GTF_CONTAINED;
ClearRegOptional();
}
bool CanCSE() const
{
return ((gtFlags & GTF_DONT_CSE) == 0);
}
void SetDoNotCSE()
{
gtFlags |= GTF_DONT_CSE;
}
void ClearDoNotCSE()
{
gtFlags &= ~GTF_DONT_CSE;
}
bool IsReverseOp() const
{
return (gtFlags & GTF_REVERSE_OPS) ? true : false;
}
void SetReverseOp()
{
gtFlags |= GTF_REVERSE_OPS;
}
void ClearReverseOp()
{
gtFlags &= ~GTF_REVERSE_OPS;
}
bool IsUnsigned() const
{
return ((gtFlags & GTF_UNSIGNED) != 0);
}
void SetUnsigned()
{
assert(OperIs(GT_ADD, GT_SUB, GT_CAST, GT_LE, GT_LT, GT_GT, GT_GE) || OperIsMul());
gtFlags |= GTF_UNSIGNED;
}
void ClearUnsigned()
{
assert(OperIs(GT_ADD, GT_SUB, GT_CAST) || OperIsMul());
gtFlags &= ~GTF_UNSIGNED;
}
void SetOverflow()
{
assert(OperMayOverflow());
gtFlags |= GTF_OVERFLOW;
}
void ClearOverflow()
{
assert(OperMayOverflow());
gtFlags &= ~GTF_OVERFLOW;
}
bool Is64RsltMul() const
{
return (gtFlags & GTF_MUL_64RSLT) != 0;
}
void Set64RsltMul()
{
gtFlags |= GTF_MUL_64RSLT;
}
void Clear64RsltMul()
{
gtFlags &= ~GTF_MUL_64RSLT;
}
void SetAllEffectsFlags(GenTree* source)
{
SetAllEffectsFlags(source->gtFlags & GTF_ALL_EFFECT);
}
void SetAllEffectsFlags(GenTree* firstSource, GenTree* secondSource)
{
SetAllEffectsFlags((firstSource->gtFlags | secondSource->gtFlags) & GTF_ALL_EFFECT);
}
void SetAllEffectsFlags(GenTree* firstSource, GenTree* secondSource, GenTree* thirdSouce)
{
SetAllEffectsFlags((firstSource->gtFlags | secondSource->gtFlags | thirdSouce->gtFlags) & GTF_ALL_EFFECT);
}
void SetAllEffectsFlags(GenTreeFlags sourceFlags)
{
assert((sourceFlags & ~GTF_ALL_EFFECT) == 0);
gtFlags &= ~GTF_ALL_EFFECT;
gtFlags |= sourceFlags;
}
inline bool IsCnsIntOrI() const;
inline bool IsIntegralConst() const;
inline bool IsIntegralConstUnsignedPow2() const;
inline bool IsIntegralConstAbsPow2() const;
inline bool IsIntCnsFitsInI32(); // Constant fits in INT32
inline bool IsCnsFltOrDbl() const;
inline bool IsCnsNonZeroFltOrDbl() const;
bool IsIconHandle() const
{
assert(gtOper == GT_CNS_INT);
return (gtFlags & GTF_ICON_HDL_MASK) ? true : false;
}
bool IsIconHandle(GenTreeFlags handleType) const
{
assert(gtOper == GT_CNS_INT);
assert((handleType & GTF_ICON_HDL_MASK) != 0); // check that handleType is one of the valid GTF_ICON_* values
assert((handleType & ~GTF_ICON_HDL_MASK) == 0);
return (gtFlags & GTF_ICON_HDL_MASK) == handleType;
}
// Return just the part of the flags corresponding to the GTF_ICON_*_HDL flag. For example,
// GTF_ICON_SCOPE_HDL. The tree node must be a const int, but it might not be a handle, in which
// case we'll return zero.
GenTreeFlags GetIconHandleFlag() const
{
assert(gtOper == GT_CNS_INT);
return (gtFlags & GTF_ICON_HDL_MASK);
}
// Mark this node as no longer being a handle; clear its GTF_ICON_*_HDL bits.
void ClearIconHandleMask()
{
assert(gtOper == GT_CNS_INT);
gtFlags &= ~GTF_ICON_HDL_MASK;
}
// Return true if the two GT_CNS_INT trees have the same handle flag (GTF_ICON_*_HDL).
static bool SameIconHandleFlag(GenTree* t1, GenTree* t2)
{
return t1->GetIconHandleFlag() == t2->GetIconHandleFlag();
}
bool IsArgPlaceHolderNode() const
{
return OperGet() == GT_ARGPLACE;
}
bool IsCall() const
{
return OperGet() == GT_CALL;
}
inline bool IsHelperCall();
bool gtOverflow() const;
bool gtOverflowEx() const;
bool gtSetFlags() const;
bool gtRequestSetFlags();
#ifdef DEBUG
static int gtDispFlags(GenTreeFlags flags, GenTreeDebugFlags debugFlags);
#endif
// cast operations
inline var_types CastFromType();
inline var_types& CastToType();
// Returns "true" iff "this" is a phi-related node (i.e. a GT_PHI_ARG, GT_PHI, or a PhiDefn).
bool IsPhiNode();
// Returns "true" iff "*this" is an assignment (GT_ASG) tree that defines an SSA name (lcl = phi(...));
bool IsPhiDefn();
// Returns "true" iff "*this" is a statement containing an assignment that defines an SSA name (lcl = phi(...));
// Because of the fact that we hid the assignment operator of "BitSet" (in DEBUG),
// we can't synthesize an assignment operator.
// TODO-Cleanup: Could change this w/o liveset on tree nodes
// (This is also necessary for the VTable trick.)
GenTree()
{
}
// Returns an iterator that will produce the use edge to each operand of this node. Differs
// from the sequence of nodes produced by a loop over `GetChild` in its handling of call, phi,
// and block op nodes.
GenTreeUseEdgeIterator UseEdgesBegin();
GenTreeUseEdgeIterator UseEdgesEnd();
IteratorPair<GenTreeUseEdgeIterator> UseEdges();
// Returns an iterator that will produce each operand of this node, in execution order.
GenTreeOperandIterator OperandsBegin();
GenTreeOperandIterator OperandsEnd();
// Returns a range that will produce the operands of this node in execution order.
IteratorPair<GenTreeOperandIterator> Operands();
enum class VisitResult
{
Abort = false,
Continue = true
};
// Visits each operand of this node. The operand must be either a lambda, function, or functor with the signature
// `GenTree::VisitResult VisitorFunction(GenTree* operand)`. Here is a simple example:
//
// unsigned operandCount = 0;
// node->VisitOperands([&](GenTree* operand) -> GenTree::VisitResult)
// {
// operandCount++;
// return GenTree::VisitResult::Continue;
// });
//
// This function is generally more efficient that the operand iterator and should be preferred over that API for
// hot code, as it affords better opportunities for inlining and acheives shorter dynamic path lengths when
// deciding how operands need to be accessed.
//
// Note that this function does not respect `GTF_REVERSE_OPS`. This is always safe in LIR, but may be dangerous
// in HIR if for some reason you need to visit operands in the order in which they will execute.
template <typename TVisitor>
void VisitOperands(TVisitor visitor);
private:
template <typename TVisitor>
void VisitBinOpOperands(TVisitor visitor);
public:
bool Precedes(GenTree* other);
bool IsInvariant() const;
bool IsNeverNegative(Compiler* comp) const;
bool IsReuseRegVal() const
{
// This can be extended to non-constant nodes, but not to local or indir nodes.
if (IsInvariant() && ((gtFlags & GTF_REUSE_REG_VAL) != 0))
{
return true;
}
return false;
}
void SetReuseRegVal()
{
assert(IsInvariant());
gtFlags |= GTF_REUSE_REG_VAL;
}
void ResetReuseRegVal()
{
assert(IsInvariant());
gtFlags &= ~GTF_REUSE_REG_VAL;
}
void SetIndirExceptionFlags(Compiler* comp);
#if MEASURE_NODE_SIZE
static void DumpNodeSizes(FILE* fp);
#endif
#ifdef DEBUG
private:
GenTree& operator=(const GenTree& gt)
{
assert(!"Don't copy");
return *this;
}
#endif // DEBUG
#if DEBUGGABLE_GENTREE
// In DEBUG builds, add a dummy virtual method, to give the debugger run-time type information.
virtual void DummyVirt()
{
}
typedef void* VtablePtr;
VtablePtr GetVtableForOper(genTreeOps oper);
void SetVtableForOper(genTreeOps oper);
static VtablePtr s_vtablesForOpers[GT_COUNT];
static VtablePtr s_vtableForOp;
#endif // DEBUGGABLE_GENTREE
public:
inline void* operator new(size_t sz, class Compiler*, genTreeOps oper);
inline GenTree(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false));
};
// Represents a GT_PHI node - a variable sized list of GT_PHI_ARG nodes.
// All PHI_ARG nodes must represent uses of the same local variable and
// the PHI node's type must be the same as the local variable's type.
//
// The PHI node does not represent a definition by itself, it is always
// the RHS of a GT_ASG node. The LHS of the ASG node is always a GT_LCL_VAR
// node, that is a definition for the same local variable referenced by
// all the used PHI_ARG nodes:
//
// ASG(LCL_VAR(lcl7), PHI(PHI_ARG(lcl7), PHI_ARG(lcl7), PHI_ARG(lcl7)))
//
// PHI nodes are also present in LIR, where GT_STORE_LCL_VAR replaces the
// ASG node.
//
// The order of the PHI_ARG uses is not currently relevant and it may be
// the same or not as the order of the predecessor blocks.
//
struct GenTreePhi final : public GenTree
{
class Use
{
GenTree* m_node;
Use* m_next;
public:
Use(GenTree* node, Use* next = nullptr) : m_node(node), m_next(next)
{
assert(node->OperIs(GT_PHI_ARG));
}
GenTree*& NodeRef()
{
return m_node;
}
GenTree* GetNode() const
{
assert(m_node->OperIs(GT_PHI_ARG));
return m_node;
}
void SetNode(GenTree* node)
{
assert(node->OperIs(GT_PHI_ARG));
m_node = node;
}
Use*& NextRef()
{
return m_next;
}
Use* GetNext() const
{
return m_next;
}
};
class UseIterator
{
Use* m_use;
public:
UseIterator(Use* use) : m_use(use)
{
}
Use& operator*() const
{
return *m_use;
}
Use* operator->() const
{
return m_use;
}
UseIterator& operator++()
{
m_use = m_use->GetNext();
return *this;
}
bool operator==(const UseIterator& i) const
{
return m_use == i.m_use;
}
bool operator!=(const UseIterator& i) const
{
return m_use != i.m_use;
}
};
class UseList
{
Use* m_uses;
public:
UseList(Use* uses) : m_uses(uses)
{
}
UseIterator begin() const
{
return UseIterator(m_uses);
}
UseIterator end() const
{
return UseIterator(nullptr);
}
};
Use* gtUses;
GenTreePhi(var_types type) : GenTree(GT_PHI, type), gtUses(nullptr)
{
}
UseList Uses()
{
return UseList(gtUses);
}
//--------------------------------------------------------------------------
// Equals: Checks if 2 PHI nodes are equal.
//
// Arguments:
// phi1 - The first PHI node
// phi2 - The second PHI node
//
// Return Value:
// true if the 2 PHI nodes have the same type, number of uses, and the
// uses are equal.
//
// Notes:
// The order of uses must be the same for equality, even if the
// order is not usually relevant and is not guaranteed to reflect
// a particular order of the predecessor blocks.
//
static bool Equals(GenTreePhi* phi1, GenTreePhi* phi2)
{
if (phi1->TypeGet() != phi2->TypeGet())
{
return false;
}
GenTreePhi::UseIterator i1 = phi1->Uses().begin();
GenTreePhi::UseIterator end1 = phi1->Uses().end();
GenTreePhi::UseIterator i2 = phi2->Uses().begin();
GenTreePhi::UseIterator end2 = phi2->Uses().end();
for (; (i1 != end1) && (i2 != end2); ++i1, ++i2)
{
if (!Compare(i1->GetNode(), i2->GetNode()))
{
return false;
}
}
return (i1 == end1) && (i2 == end2);
}
#if DEBUGGABLE_GENTREE
GenTreePhi() : GenTree()
{
}
#endif
};
// Represents a list of fields constituting a struct, when it is passed as an argument.
//
struct GenTreeFieldList : public GenTree
{
class Use
{
GenTree* m_node;
Use* m_next;
uint16_t m_offset;
var_types m_type;
public:
Use(GenTree* node, unsigned offset, var_types type)
: m_node(node), m_next(nullptr), m_offset(static_cast<uint16_t>(offset)), m_type(type)
{
// We can save space on 32 bit hosts by storing the offset as uint16_t. Struct promotion
// only accepts structs which are much smaller than that - 128 bytes = max 4 fields * max
// SIMD vector size (32 bytes).
assert(offset <= UINT16_MAX);
}
GenTree*& NodeRef()
{
return m_node;
}
GenTree* GetNode() const
{
return m_node;
}
void SetNode(GenTree* node)
{
assert(node != nullptr);
m_node = node;
}
Use*& NextRef()
{
return m_next;
}
Use* GetNext() const
{
return m_next;
}
void SetNext(Use* next)
{
m_next = next;
}
unsigned GetOffset() const
{
return m_offset;
}
var_types GetType() const
{
return m_type;
}
void SetType(var_types type)
{
m_type = type;
}
};
class UseIterator
{
Use* use;
public:
UseIterator(Use* use) : use(use)
{
}
Use& operator*()
{
return *use;
}
Use* operator->()
{
return use;
}
void operator++()
{
use = use->GetNext();
}
bool operator==(const UseIterator& other)
{
return use == other.use;
}
bool operator!=(const UseIterator& other)
{
return use != other.use;
}
};
class UseList
{
Use* m_head;
Use* m_tail;
public:
UseList() : m_head(nullptr), m_tail(nullptr)
{
}
Use* GetHead() const
{
return m_head;
}
UseIterator begin() const
{
return m_head;
}
UseIterator end() const
{
return nullptr;
}
void AddUse(Use* newUse)
{
assert(newUse->GetNext() == nullptr);
if (m_head == nullptr)
{
m_head = newUse;
}
else
{
m_tail->SetNext(newUse);
}
m_tail = newUse;
}
void InsertUse(Use* insertAfter, Use* newUse)
{
assert(newUse->GetNext() == nullptr);
newUse->SetNext(insertAfter->GetNext());
insertAfter->SetNext(newUse);
if (m_tail == insertAfter)
{
m_tail = newUse;
}
}
void Reverse()
{
m_tail = m_head;
m_head = nullptr;
for (Use *next, *use = m_tail; use != nullptr; use = next)
{
next = use->GetNext();
use->SetNext(m_head);
m_head = use;
}
}
bool IsSorted() const
{
unsigned offset = 0;
for (GenTreeFieldList::Use& use : *this)
{
if (use.GetOffset() < offset)
{
return false;
}
offset = use.GetOffset();
}
return true;
}
};
private:
UseList m_uses;
public:
GenTreeFieldList() : GenTree(GT_FIELD_LIST, TYP_STRUCT)
{
SetContained();
}
UseList& Uses()
{
return m_uses;
}
// Add a new field use to the end of the use list and update side effect flags.
void AddField(Compiler* compiler, GenTree* node, unsigned offset, var_types type);
// Add a new field use to the end of the use list without updating side effect flags.
void AddFieldLIR(Compiler* compiler, GenTree* node, unsigned offset, var_types type);
// Insert a new field use after the specified use and update side effect flags.
void InsertField(Compiler* compiler, Use* insertAfter, GenTree* node, unsigned offset, var_types type);
// Insert a new field use after the specified use without updating side effect flags.
void InsertFieldLIR(Compiler* compiler, Use* insertAfter, GenTree* node, unsigned offset, var_types type);
//--------------------------------------------------------------------------
// Equals: Check if 2 FIELD_LIST nodes are equal.
//
// Arguments:
// list1 - The first FIELD_LIST node
// list2 - The second FIELD_LIST node
//
// Return Value:
// true if the 2 FIELD_LIST nodes have the same type, number of uses, and the
// uses are equal.
//
static bool Equals(GenTreeFieldList* list1, GenTreeFieldList* list2)
{
assert(list1->TypeGet() == TYP_STRUCT);
assert(list2->TypeGet() == TYP_STRUCT);
UseIterator i1 = list1->Uses().begin();
UseIterator end1 = list1->Uses().end();
UseIterator i2 = list2->Uses().begin();
UseIterator end2 = list2->Uses().end();
for (; (i1 != end1) && (i2 != end2); ++i1, ++i2)
{
if (!Compare(i1->GetNode(), i2->GetNode()) || (i1->GetOffset() != i2->GetOffset()) ||
(i1->GetType() != i2->GetType()))
{
return false;
}
}
return (i1 == end1) && (i2 == end2);
}
};
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator: an iterator that will produce each use edge of a GenTree node in the order in which
// they are used.
//
// Operand iteration is common enough in the back end of the compiler that the implementation of this type has
// traded some simplicity for speed:
// - As much work as is reasonable is done in the constructor rather than during operand iteration
// - Node-specific functionality is handled by a small class of "advance" functions called by operator++
// rather than making operator++ itself handle all nodes
// - Some specialization has been performed for specific node types/shapes (e.g. the advance function for
// binary nodes is specialized based on whether or not the node has the GTF_REVERSE_OPS flag set)
//
// Valid values of this type may be obtained by calling `GenTree::UseEdgesBegin` and `GenTree::UseEdgesEnd`.
//
class GenTreeUseEdgeIterator final
{
friend class GenTreeOperandIterator;
friend GenTreeUseEdgeIterator GenTree::UseEdgesBegin();
friend GenTreeUseEdgeIterator GenTree::UseEdgesEnd();
enum
{
CALL_INSTANCE = 0,
CALL_ARGS = 1,
CALL_LATE_ARGS = 2,
CALL_CONTROL_EXPR = 3,
CALL_COOKIE = 4,
CALL_ADDRESS = 5,
CALL_TERMINAL = 6,
};
typedef void (GenTreeUseEdgeIterator::*AdvanceFn)();
AdvanceFn m_advance;
GenTree* m_node;
GenTree** m_edge;
// Pointer sized state storage, GenTreePhi::Use* or GenTreeCall::Use*
// or the exclusive end/beginning of GenTreeMultiOp's operand array.
void* m_statePtr;
// Integer sized state storage, usually the operand index for non-list based nodes.
int m_state;
GenTreeUseEdgeIterator(GenTree* node);
// Advance functions for special nodes
void AdvanceCmpXchg();
void AdvanceArrElem();
void AdvanceArrOffset();
void AdvanceStoreDynBlk();
void AdvanceFieldList();
void AdvancePhi();
template <bool ReverseOperands>
void AdvanceBinOp();
void SetEntryStateForBinOp();
// The advance function for call nodes
template <int state>
void AdvanceCall();
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
void AdvanceMultiOp();
void AdvanceReversedMultiOp();
void SetEntryStateForMultiOp();
#endif
void Terminate();
public:
GenTreeUseEdgeIterator();
inline GenTree** operator*()
{
assert(m_state != -1);
return m_edge;
}
inline GenTree** operator->()
{
assert(m_state != -1);
return m_edge;
}
inline bool operator==(const GenTreeUseEdgeIterator& other) const
{
if (m_state == -1 || other.m_state == -1)
{
return m_state == other.m_state;
}
return (m_node == other.m_node) && (m_edge == other.m_edge) && (m_statePtr == other.m_statePtr) &&
(m_state == other.m_state);
}
inline bool operator!=(const GenTreeUseEdgeIterator& other) const
{
return !(operator==(other));
}
GenTreeUseEdgeIterator& operator++();
};
//------------------------------------------------------------------------
// GenTreeOperandIterator: an iterator that will produce each operand of a
// GenTree node in the order in which they are
// used. This uses `GenTreeUseEdgeIterator` under
// the covers.
//
// Note: valid values of this type may be obtained by calling
// `GenTree::OperandsBegin` and `GenTree::OperandsEnd`.
class GenTreeOperandIterator final
{
friend GenTreeOperandIterator GenTree::OperandsBegin();
friend GenTreeOperandIterator GenTree::OperandsEnd();
GenTreeUseEdgeIterator m_useEdges;
GenTreeOperandIterator(GenTree* node) : m_useEdges(node)
{
}
public:
GenTreeOperandIterator() : m_useEdges()
{
}
inline GenTree* operator*()
{
return *(*m_useEdges);
}
inline GenTree* operator->()
{
return *(*m_useEdges);
}
inline bool operator==(const GenTreeOperandIterator& other) const
{
return m_useEdges == other.m_useEdges;
}
inline bool operator!=(const GenTreeOperandIterator& other) const
{
return !(operator==(other));
}
inline GenTreeOperandIterator& operator++()
{
++m_useEdges;
return *this;
}
};
/*****************************************************************************/
// In the current design, we never instantiate GenTreeUnOp: it exists only to be
// used as a base class. For unary operators, we instantiate GenTreeOp, with a NULL second
// argument. We check that this is true dynamically. We could tighten this and get static
// checking, but that would entail accessing the first child of a unary operator via something
// like gtUnOp.gtOp1 instead of AsOp()->gtOp1.
struct GenTreeUnOp : public GenTree
{
GenTree* gtOp1;
protected:
GenTreeUnOp(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false))
: GenTree(oper, type DEBUGARG(largeNode)), gtOp1(nullptr)
{
}
GenTreeUnOp(genTreeOps oper, var_types type, GenTree* op1 DEBUGARG(bool largeNode = false))
: GenTree(oper, type DEBUGARG(largeNode)), gtOp1(op1)
{
assert(op1 != nullptr || NullOp1Legal());
if (op1 != nullptr)
{ // Propagate effects flags from child.
gtFlags |= op1->gtFlags & GTF_ALL_EFFECT;
}
}
#if DEBUGGABLE_GENTREE
GenTreeUnOp() : GenTree(), gtOp1(nullptr)
{
}
#endif
};
struct GenTreeOp : public GenTreeUnOp
{
GenTree* gtOp2;
GenTreeOp(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2 DEBUGARG(bool largeNode = false))
: GenTreeUnOp(oper, type, op1 DEBUGARG(largeNode)), gtOp2(op2)
{
// comparisons are always integral types
assert(!GenTree::OperIsCompare(oper) || varTypeIsIntegral(type));
// Binary operators, with a few exceptions, require a non-nullptr
// second argument.
assert(op2 != nullptr || NullOp2Legal());
// Unary operators, on the other hand, require a null second argument.
assert(!OperIsUnary(oper) || op2 == nullptr);
// Propagate effects flags from child. (UnOp handled this for first child.)
if (op2 != nullptr)
{
gtFlags |= op2->gtFlags & GTF_ALL_EFFECT;
}
}
// A small set of types are unary operators with optional arguments. We use
// this constructor to build those.
GenTreeOp(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false))
: GenTreeUnOp(oper, type DEBUGARG(largeNode)), gtOp2(nullptr)
{
// Unary operators with optional arguments:
assert(oper == GT_NOP || oper == GT_RETURN || oper == GT_RETFILT || OperIsBlk(oper));
}
// returns true if we will use the division by constant optimization for this node.
bool UsesDivideByConstOptimized(Compiler* comp);
// checks if we will use the division by constant optimization this node
// then sets the flag GTF_DIV_BY_CNS_OPT and GTF_DONT_CSE on the constant
void CheckDivideByConstOptimized(Compiler* comp);
// True if this node is marked as using the division by constant optimization
bool MarkedDivideByConstOptimized() const
{
return (gtFlags & GTF_DIV_BY_CNS_OPT) != 0;
}
#if !defined(TARGET_64BIT) || defined(TARGET_ARM64)
bool IsValidLongMul();
#endif
#if !defined(TARGET_64BIT) && defined(DEBUG)
void DebugCheckLongMul();
#endif
#if DEBUGGABLE_GENTREE
GenTreeOp() : GenTreeUnOp(), gtOp2(nullptr)
{
}
#endif
// True if this relop is marked for a transform during the emitter
// phase, e.g., jge => jns
bool MarkedForSignJumpOpt() const
{
return (gtFlags & GTF_RELOP_SJUMP_OPT) != 0;
}
};
struct GenTreeVal : public GenTree
{
size_t gtVal1;
GenTreeVal(genTreeOps oper, var_types type, ssize_t val) : GenTree(oper, type), gtVal1(val)
{
}
#if DEBUGGABLE_GENTREE
GenTreeVal() : GenTree()
{
}
#endif
};
struct GenTreeIntConCommon : public GenTree
{
inline INT64 LngValue() const;
inline void SetLngValue(INT64 val);
inline ssize_t IconValue() const;
inline void SetIconValue(ssize_t val);
inline INT64 IntegralValue() const;
inline void SetIntegralValue(int64_t value);
template <typename T>
inline void SetValueTruncating(T value);
GenTreeIntConCommon(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false))
: GenTree(oper, type DEBUGARG(largeNode))
{
}
bool FitsInI8() // IconValue() fits into 8-bit signed storage
{
return FitsInI8(IconValue());
}
static bool FitsInI8(ssize_t val) // Constant fits into 8-bit signed storage
{
return (int8_t)val == val;
}
bool FitsInI32() // IconValue() fits into 32-bit signed storage
{
return FitsInI32(IconValue());
}
static bool FitsInI32(ssize_t val) // Constant fits into 32-bit signed storage
{
#ifdef TARGET_64BIT
return (int32_t)val == val;
#else
return true;
#endif
}
bool ImmedValNeedsReloc(Compiler* comp);
bool ImmedValCanBeFolded(Compiler* comp, genTreeOps op);
#ifdef TARGET_XARCH
bool FitsInAddrBase(Compiler* comp);
bool AddrNeedsReloc(Compiler* comp);
#endif
#if DEBUGGABLE_GENTREE
GenTreeIntConCommon() : GenTree()
{
}
#endif
};
// node representing a read from a physical register
struct GenTreePhysReg : public GenTree
{
// physregs need a field beyond GetRegNum() because
// GetRegNum() indicates the destination (and can be changed)
// whereas reg indicates the source
regNumber gtSrcReg;
GenTreePhysReg(regNumber r, var_types type = TYP_I_IMPL) : GenTree(GT_PHYSREG, type), gtSrcReg(r)
{
}
#if DEBUGGABLE_GENTREE
GenTreePhysReg() : GenTree()
{
}
#endif
};
/* gtIntCon -- integer constant (GT_CNS_INT) */
struct GenTreeIntCon : public GenTreeIntConCommon
{
/*
* This is the GT_CNS_INT struct definition.
* It's used to hold for both int constants and pointer handle constants.
* For the 64-bit targets we will only use GT_CNS_INT as it used to represent all the possible sizes
* For the 32-bit targets we use a GT_CNS_LNG to hold a 64-bit integer constant and GT_CNS_INT for all others.
* In the future when we retarget the JIT for x86 we should consider eliminating GT_CNS_LNG
*/
ssize_t gtIconVal; // Must overlap and have the same offset with the gtIconVal field in GenTreeLngCon below.
/* The InitializeArray intrinsic needs to go back to the newarray statement
to find the class handle of the array so that we can get its size. However,
in ngen mode, the handle in that statement does not correspond to the compile
time handle (rather it lets you get a handle at run-time). In that case, we also
need to store a compile time handle, which goes in this gtCompileTimeHandle field.
*/
ssize_t gtCompileTimeHandle;
// TODO-Cleanup: It's not clear what characterizes the cases where the field
// above is used. It may be that its uses and those of the "gtFieldSeq" field below
// are mutually exclusive, and they could be put in a union. Or else we should separate
// this type into three subtypes.
// If this constant represents the offset of one or more fields, "gtFieldSeq" represents that
// sequence of fields.
FieldSeqNode* gtFieldSeq;
#ifdef DEBUG
// If the value represents target address, holds the method handle to that target which is used
// to fetch target method name and display in the disassembled code.
size_t gtTargetHandle = 0;
#endif
GenTreeIntCon(var_types type, ssize_t value DEBUGARG(bool largeNode = false))
: GenTreeIntConCommon(GT_CNS_INT, type DEBUGARG(largeNode))
, gtIconVal(value)
, gtCompileTimeHandle(0)
, gtFieldSeq(FieldSeqStore::NotAField())
{
}
GenTreeIntCon(var_types type, ssize_t value, FieldSeqNode* fields DEBUGARG(bool largeNode = false))
: GenTreeIntConCommon(GT_CNS_INT, type DEBUGARG(largeNode))
, gtIconVal(value)
, gtCompileTimeHandle(0)
, gtFieldSeq(fields)
{
assert(fields != nullptr);
}
void FixupInitBlkValue(var_types asgType);
#if DEBUGGABLE_GENTREE
GenTreeIntCon() : GenTreeIntConCommon()
{
}
#endif
};
/* gtLngCon -- long constant (GT_CNS_LNG) */
struct GenTreeLngCon : public GenTreeIntConCommon
{
INT64 gtLconVal; // Must overlap and have the same offset with the gtIconVal field in GenTreeIntCon above.
INT32 LoVal()
{
return (INT32)(gtLconVal & 0xffffffff);
}
INT32 HiVal()
{
return (INT32)(gtLconVal >> 32);
}
GenTreeLngCon(INT64 val) : GenTreeIntConCommon(GT_CNS_NATIVELONG, TYP_LONG)
{
SetLngValue(val);
}
#if DEBUGGABLE_GENTREE
GenTreeLngCon() : GenTreeIntConCommon()
{
}
#endif
};
inline INT64 GenTreeIntConCommon::LngValue() const
{
#ifndef TARGET_64BIT
assert(gtOper == GT_CNS_LNG);
return AsLngCon()->gtLconVal;
#else
return IconValue();
#endif
}
inline void GenTreeIntConCommon::SetLngValue(INT64 val)
{
#ifndef TARGET_64BIT
assert(gtOper == GT_CNS_LNG);
AsLngCon()->gtLconVal = val;
#else
// Compile time asserts that these two fields overlap and have the same offsets: gtIconVal and gtLconVal
C_ASSERT(offsetof(GenTreeLngCon, gtLconVal) == offsetof(GenTreeIntCon, gtIconVal));
C_ASSERT(sizeof(AsLngCon()->gtLconVal) == sizeof(AsIntCon()->gtIconVal));
SetIconValue(ssize_t(val));
#endif
}
inline ssize_t GenTreeIntConCommon::IconValue() const
{
assert(gtOper == GT_CNS_INT); // We should never see a GT_CNS_LNG for a 64-bit target!
return AsIntCon()->gtIconVal;
}
inline void GenTreeIntConCommon::SetIconValue(ssize_t val)
{
assert(gtOper == GT_CNS_INT); // We should never see a GT_CNS_LNG for a 64-bit target!
AsIntCon()->gtIconVal = val;
}
inline INT64 GenTreeIntConCommon::IntegralValue() const
{
#ifdef TARGET_64BIT
return LngValue();
#else
return gtOper == GT_CNS_LNG ? LngValue() : (INT64)IconValue();
#endif // TARGET_64BIT
}
inline void GenTreeIntConCommon::SetIntegralValue(int64_t value)
{
#ifdef TARGET_64BIT
SetIconValue(value);
#else
if (OperIs(GT_CNS_LNG))
{
SetLngValue(value);
}
else
{
assert(FitsIn<int32_t>(value));
SetIconValue(static_cast<int32_t>(value));
}
#endif // TARGET_64BIT
}
//------------------------------------------------------------------------
// SetValueTruncating: Set the value, truncating to TYP_INT if necessary.
//
// The function will truncate the supplied value to a 32 bit signed
// integer if the node's type is not TYP_LONG, otherwise setting it
// as-is. Note that this function intentionally does not check for
// small types (such nodes are created in lowering) for TP reasons.
//
// This function is intended to be used where its truncating behavior is
// desirable. One example is folding of ADD(CNS_INT, CNS_INT) performed in
// wider integers, which is typical when compiling on 64 bit hosts, as
// most aritmetic is done in ssize_t's aka int64_t's in that case, while
// the node itself can be of a narrower type.
//
// Arguments:
// value - Value to set, truncating to TYP_INT if the node is not of TYP_LONG
//
// Notes:
// This function is templated so that it works well with compiler warnings of
// the form "Operation may overflow before being assigned to a wider type", in
// case "value" is of type ssize_t, which is common.
//
template <typename T>
inline void GenTreeIntConCommon::SetValueTruncating(T value)
{
static_assert_no_msg((std::is_same<T, int32_t>::value || std::is_same<T, int64_t>::value));
if (TypeIs(TYP_LONG))
{
SetLngValue(value);
}
else
{
SetIconValue(static_cast<int32_t>(value));
}
}
/* gtDblCon -- double constant (GT_CNS_DBL) */
struct GenTreeDblCon : public GenTree
{
double gtDconVal;
bool isBitwiseEqual(GenTreeDblCon* other)
{
unsigned __int64 bits = *(unsigned __int64*)(>DconVal);
unsigned __int64 otherBits = *(unsigned __int64*)(&(other->gtDconVal));
return (bits == otherBits);
}
GenTreeDblCon(double val, var_types type = TYP_DOUBLE) : GenTree(GT_CNS_DBL, type), gtDconVal(val)
{
assert(varTypeIsFloating(type));
}
#if DEBUGGABLE_GENTREE
GenTreeDblCon() : GenTree()
{
}
#endif
};
/* gtStrCon -- string constant (GT_CNS_STR) */
#define EMPTY_STRING_SCON (unsigned)-1
struct GenTreeStrCon : public GenTree
{
unsigned gtSconCPX;
CORINFO_MODULE_HANDLE gtScpHnd;
// Returns true if this GT_CNS_STR was imported for String.Empty field
bool IsStringEmptyField()
{
return gtSconCPX == EMPTY_STRING_SCON && gtScpHnd == nullptr;
}
// Because this node can come from an inlined method we need to
// have the scope handle, since it will become a helper call.
GenTreeStrCon(unsigned sconCPX, CORINFO_MODULE_HANDLE mod DEBUGARG(bool largeNode = false))
: GenTree(GT_CNS_STR, TYP_REF DEBUGARG(largeNode)), gtSconCPX(sconCPX), gtScpHnd(mod)
{
}
#if DEBUGGABLE_GENTREE
GenTreeStrCon() : GenTree()
{
}
#endif
};
// Common supertype of LCL_VAR, LCL_FLD, REG_VAR, PHI_ARG
// This inherits from UnOp because lclvar stores are Unops
struct GenTreeLclVarCommon : public GenTreeUnOp
{
private:
unsigned _gtLclNum; // The local number. An index into the Compiler::lvaTable array.
unsigned _gtSsaNum; // The SSA number.
public:
GenTreeLclVarCommon(genTreeOps oper, var_types type, unsigned lclNum DEBUGARG(bool largeNode = false))
: GenTreeUnOp(oper, type DEBUGARG(largeNode))
{
SetLclNum(lclNum);
}
unsigned GetLclNum() const
{
return _gtLclNum;
}
void SetLclNum(unsigned lclNum)
{
_gtLclNum = lclNum;
_gtSsaNum = SsaConfig::RESERVED_SSA_NUM;
}
uint16_t GetLclOffs() const;
unsigned GetSsaNum() const
{
return _gtSsaNum;
}
void SetSsaNum(unsigned ssaNum)
{
_gtSsaNum = ssaNum;
}
bool HasSsaName()
{
return (GetSsaNum() != SsaConfig::RESERVED_SSA_NUM);
}
#if DEBUGGABLE_GENTREE
GenTreeLclVarCommon() : GenTreeUnOp()
{
}
#endif
};
//------------------------------------------------------------------------
// MultiRegSpillFlags
//
// GTF_SPILL or GTF_SPILLED flag on a multi-reg node indicates that one or
// more of its result regs are in that state. The spill flags of each register
// are stored here. We only need 2 bits per returned register,
// so this is treated as a 2-bit array. No architecture needs more than 8 bits.
//
typedef unsigned char MultiRegSpillFlags;
static const unsigned PACKED_GTF_SPILL = 1;
static const unsigned PACKED_GTF_SPILLED = 2;
//----------------------------------------------------------------------
// GetMultiRegSpillFlagsByIdx: get spill flag associated with the return register
// specified by its index.
//
// Arguments:
// idx - Position or index of the return register
//
// Return Value:
// Returns GTF_* flags associated with the register. Only GTF_SPILL and GTF_SPILLED are considered.
//
inline GenTreeFlags GetMultiRegSpillFlagsByIdx(MultiRegSpillFlags flags, unsigned idx)
{
static_assert_no_msg(MAX_RET_REG_COUNT * 2 <= sizeof(unsigned char) * BITS_PER_BYTE);
assert(idx < MAX_RET_REG_COUNT);
unsigned bits = flags >> (idx * 2); // It doesn't matter that we possibly leave other high bits here.
GenTreeFlags spillFlags = GTF_EMPTY;
if (bits & PACKED_GTF_SPILL)
{
spillFlags |= GTF_SPILL;
}
if (bits & PACKED_GTF_SPILLED)
{
spillFlags |= GTF_SPILLED;
}
return spillFlags;
}
//----------------------------------------------------------------------
// SetMultiRegSpillFlagsByIdx: set spill flags for the register specified by its index.
//
// Arguments:
// oldFlags - The current value of the MultiRegSpillFlags for a node.
// flagsToSet - GTF_* flags. Only GTF_SPILL and GTF_SPILLED are allowed.
// Note that these are the flags used on non-multireg nodes,
// and this method adds the appropriate flags to the
// incoming MultiRegSpillFlags and returns it.
// idx - Position or index of the register
//
// Return Value:
// The new value for the node's MultiRegSpillFlags.
//
inline MultiRegSpillFlags SetMultiRegSpillFlagsByIdx(MultiRegSpillFlags oldFlags, GenTreeFlags flagsToSet, unsigned idx)
{
static_assert_no_msg(MAX_RET_REG_COUNT * 2 <= sizeof(unsigned char) * BITS_PER_BYTE);
assert(idx < MAX_RET_REG_COUNT);
MultiRegSpillFlags newFlags = oldFlags;
unsigned bits = 0;
if (flagsToSet & GTF_SPILL)
{
bits |= PACKED_GTF_SPILL;
}
if (flagsToSet & GTF_SPILLED)
{
bits |= PACKED_GTF_SPILLED;
}
const unsigned char packedFlags = PACKED_GTF_SPILL | PACKED_GTF_SPILLED;
// Clear anything that was already there by masking out the bits before 'or'ing in what we want there.
newFlags = (unsigned char)((newFlags & ~(packedFlags << (idx * 2))) | (bits << (idx * 2)));
return newFlags;
}
// gtLclVar -- load/store/addr of local variable
struct GenTreeLclVar : public GenTreeLclVarCommon
{
private:
regNumberSmall gtOtherReg[MAX_MULTIREG_COUNT - 1];
MultiRegSpillFlags gtSpillFlags;
public:
INDEBUG(IL_OFFSET gtLclILoffs;) // instr offset of ref (only for JIT dumps)
// Multireg support
bool IsMultiReg() const
{
return ((gtFlags & GTF_VAR_MULTIREG) != 0);
}
void ClearMultiReg()
{
gtFlags &= ~GTF_VAR_MULTIREG;
}
void SetMultiReg()
{
gtFlags |= GTF_VAR_MULTIREG;
ClearOtherRegFlags();
}
regNumber GetRegNumByIdx(int regIndex) const
{
assert(regIndex < MAX_MULTIREG_COUNT);
return (regIndex == 0) ? GetRegNum() : (regNumber)gtOtherReg[regIndex - 1];
}
void SetRegNumByIdx(regNumber reg, int regIndex)
{
assert(regIndex < MAX_MULTIREG_COUNT);
if (regIndex == 0)
{
SetRegNum(reg);
}
else
{
gtOtherReg[regIndex - 1] = regNumberSmall(reg);
}
}
GenTreeFlags GetRegSpillFlagByIdx(unsigned idx) const
{
return GetMultiRegSpillFlagsByIdx(gtSpillFlags, idx);
}
void SetRegSpillFlagByIdx(GenTreeFlags flags, unsigned idx)
{
gtSpillFlags = SetMultiRegSpillFlagsByIdx(gtSpillFlags, flags, idx);
}
unsigned int GetFieldCount(Compiler* compiler) const;
var_types GetFieldTypeByIndex(Compiler* compiler, unsigned idx);
//-------------------------------------------------------------------
// clearOtherRegFlags: clear GTF_* flags associated with gtOtherRegs
//
// Arguments:
// None
//
// Return Value:
// None
void ClearOtherRegFlags()
{
gtSpillFlags = 0;
}
//-------------------------------------------------------------------------
// CopyOtherRegFlags: copy GTF_* flags associated with gtOtherRegs from
// the given LclVar node.
//
// Arguments:
// fromCall - GenTreeLclVar node from which to copy
//
// Return Value:
// None
//
void CopyOtherRegFlags(GenTreeLclVar* from)
{
this->gtSpillFlags = from->gtSpillFlags;
}
GenTreeLclVar(genTreeOps oper,
var_types type,
unsigned lclNum DEBUGARG(IL_OFFSET ilOffs = BAD_IL_OFFSET) DEBUGARG(bool largeNode = false))
: GenTreeLclVarCommon(oper, type, lclNum DEBUGARG(largeNode)) DEBUGARG(gtLclILoffs(ilOffs))
{
assert(OperIsLocal(oper) || OperIsLocalAddr(oper));
}
#if DEBUGGABLE_GENTREE
GenTreeLclVar() : GenTreeLclVarCommon()
{
}
#endif
};
// gtLclFld -- load/store/addr of local variable field
struct GenTreeLclFld : public GenTreeLclVarCommon
{
private:
uint16_t m_lclOffs; // offset into the variable to access
FieldSeqNode* m_fieldSeq; // This LclFld node represents some sequences of accesses.
public:
GenTreeLclFld(genTreeOps oper, var_types type, unsigned lclNum, unsigned lclOffs)
: GenTreeLclVarCommon(oper, type, lclNum), m_lclOffs(static_cast<uint16_t>(lclOffs)), m_fieldSeq(nullptr)
{
assert(lclOffs <= UINT16_MAX);
}
uint16_t GetLclOffs() const
{
return m_lclOffs;
}
void SetLclOffs(unsigned lclOffs)
{
assert(lclOffs <= UINT16_MAX);
m_lclOffs = static_cast<uint16_t>(lclOffs);
}
FieldSeqNode* GetFieldSeq() const
{
return m_fieldSeq;
}
void SetFieldSeq(FieldSeqNode* fieldSeq)
{
m_fieldSeq = fieldSeq;
}
#ifdef TARGET_ARM
bool IsOffsetMisaligned() const;
#endif // TARGET_ARM
#if DEBUGGABLE_GENTREE
GenTreeLclFld() : GenTreeLclVarCommon()
{
}
#endif
};
// GenTreeCast - conversion to a different type (GT_CAST).
//
// This node represents all "conv[.ovf].{type}[.un]" IL opcodes.
//
// There are four semantically significant values that determine what it does:
//
// 1) "genActualType(CastOp())" - the type being cast from.
// 2) "gtCastType" - the type being cast to.
// 3) "IsUnsigned" (the "GTF_UNSIGNED" flag) - whether the cast is "unsigned".
// 4) "gtOverflow" (the "GTF_OVERFLOW" flag) - whether the cast is checked.
//
// Different "kinds" of casts use these values differently; not all are always
// meaningful or legal:
//
// 1) For casts from FP types, "IsUnsigned" will always be "false".
// 2) Checked casts use "IsUnsigned" to represent the fact the type being cast
// from is unsigned. The target type's signedness is similarly significant.
// 3) For unchecked casts, "IsUnsigned" is significant for "int -> long", where
// it decides whether the cast sign- or zero-extends its source, and "integer
// -> FP" cases. For all other unchecked casts, "IsUnsigned" is meaningless.
// 4) For unchecked casts, signedness of the target type is only meaningful if
// the cast is to an FP or small type. In the latter case (and everywhere
// else in IR) it decided whether the value will be sign- or zero-extended.
//
// For additional context on "GT_CAST"'s semantics, see "IntegralRange::ForCast"
// methods and "GenIntCastDesc"'s constructor.
//
struct GenTreeCast : public GenTreeOp
{
GenTree*& CastOp()
{
return gtOp1;
}
var_types gtCastType;
GenTreeCast(var_types type, GenTree* op, bool fromUnsigned, var_types castType DEBUGARG(bool largeNode = false))
: GenTreeOp(GT_CAST, type, op, nullptr DEBUGARG(largeNode)), gtCastType(castType)
{
// We do not allow casts from floating point types to be treated as from
// unsigned to avoid bugs related to wrong GTF_UNSIGNED in case the
// CastOp's type changes.
assert(!varTypeIsFloating(op) || !fromUnsigned);
gtFlags |= fromUnsigned ? GTF_UNSIGNED : GTF_EMPTY;
}
#if DEBUGGABLE_GENTREE
GenTreeCast() : GenTreeOp()
{
}
#endif
};
// GT_BOX nodes are place markers for boxed values. The "real" tree
// for most purposes is in gtBoxOp.
struct GenTreeBox : public GenTreeUnOp
{
// An expanded helper call to implement the "box" if we don't get
// rid of it any other way. Must be in same position as op1.
GenTree*& BoxOp()
{
return gtOp1;
}
// This is the statement that contains the assignment tree when the node is an inlined GT_BOX on a value
// type
Statement* gtAsgStmtWhenInlinedBoxValue;
// And this is the statement that copies from the value being boxed to the box payload
Statement* gtCopyStmtWhenInlinedBoxValue;
GenTreeBox(var_types type,
GenTree* boxOp,
Statement* asgStmtWhenInlinedBoxValue,
Statement* copyStmtWhenInlinedBoxValue)
: GenTreeUnOp(GT_BOX, type, boxOp)
, gtAsgStmtWhenInlinedBoxValue(asgStmtWhenInlinedBoxValue)
, gtCopyStmtWhenInlinedBoxValue(copyStmtWhenInlinedBoxValue)
{
}
#if DEBUGGABLE_GENTREE
GenTreeBox() : GenTreeUnOp()
{
}
#endif
};
// GenTreeField -- data member ref (GT_FIELD)
struct GenTreeField : public GenTreeUnOp
{
CORINFO_FIELD_HANDLE gtFldHnd;
DWORD gtFldOffset;
bool gtFldMayOverlap;
#ifdef FEATURE_READYTORUN
CORINFO_CONST_LOOKUP gtFieldLookup;
#endif
GenTreeField(var_types type, GenTree* obj, CORINFO_FIELD_HANDLE fldHnd, DWORD offs)
: GenTreeUnOp(GT_FIELD, type, obj), gtFldHnd(fldHnd), gtFldOffset(offs), gtFldMayOverlap(false)
{
#ifdef FEATURE_READYTORUN
gtFieldLookup.addr = nullptr;
#endif
}
#if DEBUGGABLE_GENTREE
GenTreeField() : GenTreeUnOp()
{
}
#endif
// The object this field belongs to. Will be "nullptr" for static fields.
// Note that this is an address, i. e. for struct fields it will be ADDR(STRUCT).
GenTree* GetFldObj() const
{
return gtOp1;
}
// True if this field is a volatile memory operation.
bool IsVolatile() const
{
return (gtFlags & GTF_FLD_VOLATILE) != 0;
}
};
// There was quite a bit of confusion in the code base about which of gtOp1 and gtOp2 was the
// 'then' and 'else' clause of a colon node. Adding these accessors, while not enforcing anything,
// at least *allows* the programmer to be obviously correct.
// However, these conventions seem backward.
// TODO-Cleanup: If we could get these accessors used everywhere, then we could switch them.
struct GenTreeColon : public GenTreeOp
{
GenTree*& ThenNode()
{
return gtOp2;
}
GenTree*& ElseNode()
{
return gtOp1;
}
#if DEBUGGABLE_GENTREE
GenTreeColon() : GenTreeOp()
{
}
#endif
GenTreeColon(var_types typ, GenTree* thenNode, GenTree* elseNode) : GenTreeOp(GT_COLON, typ, elseNode, thenNode)
{
}
};
// gtCall -- method call (GT_CALL)
enum class InlineObservation;
//------------------------------------------------------------------------
// GenTreeCallFlags: a bitmask of flags for GenTreeCall stored in gtCallMoreFlags.
//
// clang-format off
enum GenTreeCallFlags : unsigned int
{
GTF_CALL_M_EMPTY = 0,
GTF_CALL_M_EXPLICIT_TAILCALL = 0x00000001, // the call is "tail" prefixed and importer has performed tail call checks
GTF_CALL_M_TAILCALL = 0x00000002, // the call is a tailcall
GTF_CALL_M_VARARGS = 0x00000004, // the call uses varargs ABI
GTF_CALL_M_RETBUFFARG = 0x00000008, // call has a return buffer argument
GTF_CALL_M_DELEGATE_INV = 0x00000010, // call to Delegate.Invoke
GTF_CALL_M_NOGCCHECK = 0x00000020, // not a call for computing full interruptability and therefore no GC check is required.
GTF_CALL_M_SPECIAL_INTRINSIC = 0x00000040, // function that could be optimized as an intrinsic
// in special cases. Used to optimize fast way out in morphing
GTF_CALL_M_UNMGD_THISCALL = 0x00000080, // "this" pointer (first argument) should be enregistered (only for GTF_CALL_UNMANAGED)
GTF_CALL_M_VIRTSTUB_REL_INDIRECT = 0x00000080, // the virtstub is indirected through a relative address (only for GTF_CALL_VIRT_STUB)
GTF_CALL_M_NONVIRT_SAME_THIS = 0x00000080, // callee "this" pointer is equal to caller this pointer (only for GTF_CALL_NONVIRT)
GTF_CALL_M_FRAME_VAR_DEATH = 0x00000100, // the compLvFrameListRoot variable dies here (last use)
GTF_CALL_M_TAILCALL_VIA_JIT_HELPER = 0x00000200, // call is a tail call dispatched via tail call JIT helper.
#if FEATURE_TAILCALL_OPT
GTF_CALL_M_IMPLICIT_TAILCALL = 0x00000400, // call is an opportunistic tail call and importer has performed tail call checks
GTF_CALL_M_TAILCALL_TO_LOOP = 0x00000800, // call is a fast recursive tail call that can be converted into a loop
#endif
GTF_CALL_M_PINVOKE = 0x00001000, // call is a pinvoke. This mirrors VM flag CORINFO_FLG_PINVOKE.
// A call marked as Pinvoke is not necessarily a GT_CALL_UNMANAGED. For e.g.
// an IL Stub dynamically generated for a PInvoke declaration is flagged as
// a Pinvoke but not as an unmanaged call. See impCheckForPInvokeCall() to
// know when these flags are set.
GTF_CALL_M_R2R_REL_INDIRECT = 0x00002000, // ready to run call is indirected through a relative address
GTF_CALL_M_DOES_NOT_RETURN = 0x00004000, // call does not return
GTF_CALL_M_WRAPPER_DELEGATE_INV = 0x00008000, // call is in wrapper delegate
GTF_CALL_M_FAT_POINTER_CHECK = 0x00010000, // CoreRT managed calli needs transformation, that checks
// special bit in calli address. If it is set, then it is necessary
// to restore real function address and load hidden argument
// as the first argument for calli. It is CoreRT replacement for instantiating
// stubs, because executable code cannot be generated at runtime.
GTF_CALL_M_HELPER_SPECIAL_DCE = 0x00020000, // this helper call can be removed if it is part of a comma and
// the comma result is unused.
GTF_CALL_M_DEVIRTUALIZED = 0x00040000, // this call was devirtualized
GTF_CALL_M_UNBOXED = 0x00080000, // this call was optimized to use the unboxed entry point
GTF_CALL_M_GUARDED_DEVIRT = 0x00100000, // this call is a candidate for guarded devirtualization
GTF_CALL_M_GUARDED_DEVIRT_CHAIN = 0x00200000, // this call is a candidate for chained guarded devirtualization
GTF_CALL_M_GUARDED = 0x00400000, // this call was transformed by guarded devirtualization
GTF_CALL_M_ALLOC_SIDE_EFFECTS = 0x00800000, // this is a call to an allocator with side effects
GTF_CALL_M_SUPPRESS_GC_TRANSITION = 0x01000000, // suppress the GC transition (i.e. during a pinvoke) but a separate GC safe point is required.
GTF_CALL_M_EXP_RUNTIME_LOOKUP = 0x02000000, // this call needs to be tranformed into CFG for the dynamic dictionary expansion feature.
GTF_CALL_M_STRESS_TAILCALL = 0x04000000, // the call is NOT "tail" prefixed but GTF_CALL_M_EXPLICIT_TAILCALL was added because of tail call stress mode
GTF_CALL_M_EXPANDED_EARLY = 0x08000000, // the Virtual Call target address is expanded and placed in gtControlExpr in Morph rather than in Lower
GTF_CALL_M_LATE_DEVIRT = 0x10000000, // this call has late devirtualzation info
};
inline constexpr GenTreeCallFlags operator ~(GenTreeCallFlags a)
{
return (GenTreeCallFlags)(~(unsigned int)a);
}
inline constexpr GenTreeCallFlags operator |(GenTreeCallFlags a, GenTreeCallFlags b)
{
return (GenTreeCallFlags)((unsigned int)a | (unsigned int)b);
}
inline constexpr GenTreeCallFlags operator &(GenTreeCallFlags a, GenTreeCallFlags b)
{
return (GenTreeCallFlags)((unsigned int)a & (unsigned int)b);
}
inline GenTreeCallFlags& operator |=(GenTreeCallFlags& a, GenTreeCallFlags b)
{
return a = (GenTreeCallFlags)((unsigned int)a | (unsigned int)b);
}
inline GenTreeCallFlags& operator &=(GenTreeCallFlags& a, GenTreeCallFlags b)
{
return a = (GenTreeCallFlags)((unsigned int)a & (unsigned int)b);
}
// clang-format on
// Return type descriptor of a GT_CALL node.
// x64 Unix, Arm64, Arm32 and x86 allow a value to be returned in multiple
// registers. For such calls this struct provides the following info
// on their return type
// - type of value returned in each return register
// - ABI return register numbers in which the value is returned
// - count of return registers in which the value is returned
//
// TODO-ARM: Update this to meet the needs of Arm64 and Arm32
//
// TODO-AllArch: Right now it is used for describing multi-reg returned types.
// Eventually we would want to use it for describing even single-reg
// returned types (e.g. structs returned in single register x64/arm).
// This would allow us not to lie or normalize single struct return
// values in importer/morph.
struct ReturnTypeDesc
{
private:
var_types m_regType[MAX_RET_REG_COUNT];
bool m_isEnclosingType;
#ifdef DEBUG
bool m_inited;
#endif
public:
ReturnTypeDesc()
{
Reset();
}
// Initialize the Return Type Descriptor for a method that returns a struct type
void InitializeStructReturnType(Compiler* comp, CORINFO_CLASS_HANDLE retClsHnd, CorInfoCallConvExtension callConv);
// Initialize the Return Type Descriptor for a method that returns a TYP_LONG
// Only needed for X86 and arm32.
void InitializeLongReturnType();
// Reset type descriptor to defaults
void Reset()
{
for (unsigned i = 0; i < MAX_RET_REG_COUNT; ++i)
{
m_regType[i] = TYP_UNKNOWN;
}
m_isEnclosingType = false;
#ifdef DEBUG
m_inited = false;
#endif
}
#ifdef DEBUG
// NOTE: we only use this function when writing out IR dumps. These dumps may take place before the ReturnTypeDesc
// has been initialized.
unsigned TryGetReturnRegCount() const
{
return m_inited ? GetReturnRegCount() : 0;
}
#endif // DEBUG
//--------------------------------------------------------------------------------------------
// GetReturnRegCount: Get the count of return registers in which the return value is returned.
//
// Arguments:
// None
//
// Return Value:
// Count of return registers.
// Returns 0 if the return type is not returned in registers.
unsigned GetReturnRegCount() const
{
assert(m_inited);
int regCount = 0;
for (unsigned i = 0; i < MAX_RET_REG_COUNT; ++i)
{
if (m_regType[i] == TYP_UNKNOWN)
{
break;
}
// otherwise
regCount++;
}
#ifdef DEBUG
// Any remaining elements in m_regTypes[] should also be TYP_UNKNOWN
for (unsigned i = regCount + 1; i < MAX_RET_REG_COUNT; ++i)
{
assert(m_regType[i] == TYP_UNKNOWN);
}
#endif
return regCount;
}
//-----------------------------------------------------------------------
// IsMultiRegRetType: check whether the type is returned in multiple
// return registers.
//
// Arguments:
// None
//
// Return Value:
// Returns true if the type is returned in multiple return registers.
// False otherwise.
// Note that we only have to examine the first two values to determine this
//
bool IsMultiRegRetType() const
{
if (MAX_RET_REG_COUNT < 2)
{
return false;
}
else
{
assert(m_inited);
return ((m_regType[0] != TYP_UNKNOWN) && (m_regType[1] != TYP_UNKNOWN));
}
}
//--------------------------------------------------------------------------
// GetReturnRegType: Get var_type of the return register specified by index.
//
// Arguments:
// index - Index of the return register.
// First return register will have an index 0 and so on.
//
// Return Value:
// var_type of the return register specified by its index.
// asserts if the index does not have a valid register return type.
var_types GetReturnRegType(unsigned index) const
{
var_types result = m_regType[index];
assert(result != TYP_UNKNOWN);
return result;
}
// True if this value is returned in integer register
// that is larger than the type itself.
bool IsEnclosingType() const
{
return m_isEnclosingType;
}
// Get i'th ABI return register
regNumber GetABIReturnReg(unsigned idx) const;
// Get reg mask of ABI return registers
regMaskTP GetABIReturnRegs() const;
};
class TailCallSiteInfo
{
bool m_isCallvirt : 1;
bool m_isCalli : 1;
CORINFO_SIG_INFO m_sig;
CORINFO_RESOLVED_TOKEN m_token;
public:
// Is the tailcall a callvirt instruction?
bool IsCallvirt()
{
return m_isCallvirt;
}
// Is the tailcall a calli instruction?
bool IsCalli()
{
return m_isCalli;
}
// Get the token of the callee
CORINFO_RESOLVED_TOKEN* GetToken()
{
assert(!IsCalli());
return &m_token;
}
// Get the signature of the callee
CORINFO_SIG_INFO* GetSig()
{
return &m_sig;
}
// Mark the tailcall as a calli with the given signature
void SetCalli(CORINFO_SIG_INFO* sig)
{
m_isCallvirt = false;
m_isCalli = true;
m_sig = *sig;
}
// Mark the tailcall as a callvirt with the given signature and token
void SetCallvirt(CORINFO_SIG_INFO* sig, CORINFO_RESOLVED_TOKEN* token)
{
m_isCallvirt = true;
m_isCalli = false;
m_sig = *sig;
m_token = *token;
}
// Mark the tailcall as a call with the given signature and token
void SetCall(CORINFO_SIG_INFO* sig, CORINFO_RESOLVED_TOKEN* token)
{
m_isCallvirt = false;
m_isCalli = false;
m_sig = *sig;
m_token = *token;
}
};
class fgArgInfo;
enum class NonStandardArgKind : unsigned
{
None,
PInvokeFrame,
PInvokeTarget,
PInvokeCookie,
WrapperDelegateCell,
ShiftLow,
ShiftHigh,
FixedRetBuffer,
VirtualStubCell,
R2RIndirectionCell,
ValidateIndirectCallTarget,
// If changing this enum also change getNonStandardArgKindName and isNonStandardArgAddedLate in fgArgInfo
};
#ifdef DEBUG
const char* getNonStandardArgKindName(NonStandardArgKind kind);
#endif
enum class CFGCallKind
{
ValidateAndCall,
Dispatch,
};
struct GenTreeCall final : public GenTree
{
class Use
{
GenTree* m_node;
Use* m_next;
public:
Use(GenTree* node, Use* next = nullptr) : m_node(node), m_next(next)
{
assert(node != nullptr);
}
GenTree*& NodeRef()
{
return m_node;
}
GenTree* GetNode() const
{
assert(m_node != nullptr);
return m_node;
}
void SetNode(GenTree* node)
{
assert(node != nullptr);
m_node = node;
}
Use*& NextRef()
{
return m_next;
}
Use* GetNext() const
{
return m_next;
}
void SetNext(Use* next)
{
m_next = next;
}
};
class UseIterator
{
Use* m_use;
public:
UseIterator(Use* use) : m_use(use)
{
}
Use& operator*() const
{
return *m_use;
}
Use* operator->() const
{
return m_use;
}
Use* GetUse() const
{
return m_use;
}
UseIterator& operator++()
{
m_use = m_use->GetNext();
return *this;
}
bool operator==(const UseIterator& i) const
{
return m_use == i.m_use;
}
bool operator!=(const UseIterator& i) const
{
return m_use != i.m_use;
}
};
class UseList
{
Use* m_uses;
public:
UseList(Use* uses) : m_uses(uses)
{
}
UseIterator begin() const
{
return UseIterator(m_uses);
}
UseIterator end() const
{
return UseIterator(nullptr);
}
};
Use* gtCallThisArg; // The instance argument ('this' pointer)
Use* gtCallArgs; // The list of arguments in original evaluation order
Use* gtCallLateArgs; // On x86: The register arguments in an optimal order
// On ARM/x64: - also includes any outgoing arg space arguments
// - that were evaluated into a temp LclVar
fgArgInfo* fgArgInfo;
UseList Args()
{
return UseList(gtCallArgs);
}
UseList LateArgs()
{
return UseList(gtCallLateArgs);
}
#ifdef DEBUG
// Used to register callsites with the EE
CORINFO_SIG_INFO* callSig;
#endif
union {
TailCallSiteInfo* tailCallInfo;
// Only used for unmanaged calls, which cannot be tail-called
CorInfoCallConvExtension unmgdCallConv;
};
#if FEATURE_MULTIREG_RET
// State required to support multi-reg returning call nodes.
//
// TODO-AllArch: enable for all call nodes to unify single-reg and multi-reg returns.
ReturnTypeDesc gtReturnTypeDesc;
// GetRegNum() would always be the first return reg.
// The following array holds the other reg numbers of multi-reg return.
regNumberSmall gtOtherRegs[MAX_RET_REG_COUNT - 1];
MultiRegSpillFlags gtSpillFlags;
#endif // FEATURE_MULTIREG_RET
//-----------------------------------------------------------------------
// GetReturnTypeDesc: get the type descriptor of return value of the call
//
// Arguments:
// None
//
// Returns
// Type descriptor of the value returned by call
//
// TODO-AllArch: enable for all call nodes to unify single-reg and multi-reg returns.
const ReturnTypeDesc* GetReturnTypeDesc() const
{
#if FEATURE_MULTIREG_RET
return >ReturnTypeDesc;
#else
return nullptr;
#endif
}
void InitializeLongReturnType()
{
#if FEATURE_MULTIREG_RET
gtReturnTypeDesc.InitializeLongReturnType();
#endif
}
void InitializeStructReturnType(Compiler* comp, CORINFO_CLASS_HANDLE retClsHnd, CorInfoCallConvExtension callConv)
{
#if FEATURE_MULTIREG_RET
gtReturnTypeDesc.InitializeStructReturnType(comp, retClsHnd, callConv);
#endif
}
void ResetReturnType()
{
#if FEATURE_MULTIREG_RET
gtReturnTypeDesc.Reset();
#endif
}
//---------------------------------------------------------------------------
// GetRegNumByIdx: get i'th return register allocated to this call node.
//
// Arguments:
// idx - index of the return register
//
// Return Value:
// Return regNumber of i'th return register of call node.
// Returns REG_NA if there is no valid return register for the given index.
//
regNumber GetRegNumByIdx(unsigned idx) const
{
assert(idx < MAX_RET_REG_COUNT);
if (idx == 0)
{
return GetRegNum();
}
#if FEATURE_MULTIREG_RET
return (regNumber)gtOtherRegs[idx - 1];
#else
return REG_NA;
#endif
}
//----------------------------------------------------------------------
// SetRegNumByIdx: set i'th return register of this call node
//
// Arguments:
// reg - reg number
// idx - index of the return register
//
// Return Value:
// None
//
void SetRegNumByIdx(regNumber reg, unsigned idx)
{
assert(idx < MAX_RET_REG_COUNT);
if (idx == 0)
{
SetRegNum(reg);
}
#if FEATURE_MULTIREG_RET
else
{
gtOtherRegs[idx - 1] = (regNumberSmall)reg;
assert(gtOtherRegs[idx - 1] == reg);
}
#else
unreached();
#endif
}
//----------------------------------------------------------------------------
// ClearOtherRegs: clear multi-reg state to indicate no regs are allocated
//
// Arguments:
// None
//
// Return Value:
// None
//
void ClearOtherRegs()
{
#if FEATURE_MULTIREG_RET
for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
{
gtOtherRegs[i] = REG_NA;
}
#endif
}
//----------------------------------------------------------------------------
// CopyOtherRegs: copy multi-reg state from the given call node to this node
//
// Arguments:
// fromCall - GenTreeCall node from which to copy multi-reg state
//
// Return Value:
// None
//
void CopyOtherRegs(GenTreeCall* fromCall)
{
#if FEATURE_MULTIREG_RET
for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
{
this->gtOtherRegs[i] = fromCall->gtOtherRegs[i];
}
#endif
}
// Get reg mask of all the valid registers of gtOtherRegs array
regMaskTP GetOtherRegMask() const;
GenTreeFlags GetRegSpillFlagByIdx(unsigned idx) const
{
#if FEATURE_MULTIREG_RET
return GetMultiRegSpillFlagsByIdx(gtSpillFlags, idx);
#else
assert(!"unreached");
return GTF_EMPTY;
#endif
}
void SetRegSpillFlagByIdx(GenTreeFlags flags, unsigned idx)
{
#if FEATURE_MULTIREG_RET
gtSpillFlags = SetMultiRegSpillFlagsByIdx(gtSpillFlags, flags, idx);
#endif
}
//-------------------------------------------------------------------
// clearOtherRegFlags: clear GTF_* flags associated with gtOtherRegs
//
// Arguments:
// None
//
// Return Value:
// None
void ClearOtherRegFlags()
{
#if FEATURE_MULTIREG_RET
gtSpillFlags = 0;
#endif
}
//-------------------------------------------------------------------------
// CopyOtherRegFlags: copy GTF_* flags associated with gtOtherRegs from
// the given call node.
//
// Arguments:
// fromCall - GenTreeCall node from which to copy
//
// Return Value:
// None
//
void CopyOtherRegFlags(GenTreeCall* fromCall)
{
#if FEATURE_MULTIREG_RET
this->gtSpillFlags = fromCall->gtSpillFlags;
#endif
}
bool IsUnmanaged() const
{
return (gtFlags & GTF_CALL_UNMANAGED) != 0;
}
bool NeedsNullCheck() const
{
return (gtFlags & GTF_CALL_NULLCHECK) != 0;
}
bool CallerPop() const
{
return (gtFlags & GTF_CALL_POP_ARGS) != 0;
}
bool IsVirtual() const
{
return (gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT;
}
bool IsVirtualStub() const
{
return (gtFlags & GTF_CALL_VIRT_KIND_MASK) == GTF_CALL_VIRT_STUB;
}
bool IsVirtualVtable() const
{
return (gtFlags & GTF_CALL_VIRT_KIND_MASK) == GTF_CALL_VIRT_VTABLE;
}
bool IsInlineCandidate() const
{
return (gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0;
}
bool IsR2ROrVirtualStubRelativeIndir()
{
#if defined(FEATURE_READYTORUN)
if (IsR2RRelativeIndir())
{
return true;
}
#endif
return IsVirtualStubRelativeIndir();
}
bool HasNonStandardAddedArgs(Compiler* compiler) const;
int GetNonStandardAddedArgCount(Compiler* compiler) const;
// Returns true if this call uses a retBuf argument and its calling convention
bool HasRetBufArg() const
{
return (gtCallMoreFlags & GTF_CALL_M_RETBUFFARG) != 0;
}
//-------------------------------------------------------------------------
// TreatAsHasRetBufArg:
//
// Arguments:
// compiler, the compiler instance so that we can call eeGetHelperNum
//
// Return Value:
// Returns true if we treat the call as if it has a retBuf argument
// This method may actually have a retBuf argument
// or it could be a JIT helper that we are still transforming during
// the importer phase.
//
// Notes:
// On ARM64 marking the method with the GTF_CALL_M_RETBUFFARG flag
// will make HasRetBufArg() return true, but will also force the
// use of register x8 to pass the RetBuf argument.
//
bool TreatAsHasRetBufArg(Compiler* compiler) const;
bool HasFixedRetBufArg() const
{
if (!(hasFixedRetBuffReg() && HasRetBufArg()))
{
return false;
}
#if !defined(TARGET_ARM)
return !TargetOS::IsWindows || !callConvIsInstanceMethodCallConv(GetUnmanagedCallConv());
#else
return true;
#endif
}
//-----------------------------------------------------------------------------------------
// HasMultiRegRetVal: whether the call node returns its value in multiple return registers.
//
// Arguments:
// None
//
// Return Value:
// True if the call is returning a multi-reg return value. False otherwise.
//
bool HasMultiRegRetVal() const
{
#ifdef FEATURE_MULTIREG_RET
#if defined(TARGET_LOONGARCH64)
return (gtType == TYP_STRUCT) && (gtReturnTypeDesc.GetReturnRegCount() > 1);
#else
#if defined(TARGET_X86) || defined(TARGET_ARM)
if (varTypeIsLong(gtType))
{
return true;
}
#endif
if (!varTypeIsStruct(gtType) || HasRetBufArg())
{
return false;
}
// Now it is a struct that is returned in registers.
return GetReturnTypeDesc()->IsMultiRegRetType();
#endif
#else // !FEATURE_MULTIREG_RET
return false;
#endif // !FEATURE_MULTIREG_RET
}
// Returns true if VM has flagged this method as CORINFO_FLG_PINVOKE.
bool IsPInvoke() const
{
return (gtCallMoreFlags & GTF_CALL_M_PINVOKE) != 0;
}
// Note that the distinction of whether tail prefixed or an implicit tail call
// is maintained on a call node till fgMorphCall() after which it will be
// either a tail call (i.e. IsTailCall() is true) or a non-tail call.
bool IsTailPrefixedCall() const
{
return (gtCallMoreFlags & GTF_CALL_M_EXPLICIT_TAILCALL) != 0;
}
// Returns true if this call didn't have an explicit tail. prefix in the IL
// but was marked as an explicit tail call because of tail call stress mode.
bool IsStressTailCall() const
{
return (gtCallMoreFlags & GTF_CALL_M_STRESS_TAILCALL) != 0;
}
// This method returning "true" implies that tail call flowgraph morhphing has
// performed final checks and committed to making a tail call.
bool IsTailCall() const
{
return (gtCallMoreFlags & GTF_CALL_M_TAILCALL) != 0;
}
// This method returning "true" implies that importer has performed tail call checks
// and providing a hint that this can be converted to a tail call.
bool CanTailCall() const
{
return IsTailPrefixedCall() || IsImplicitTailCall();
}
// Check whether this is a tailcall dispatched via JIT helper. We only use
// this mechanism on x86 as it is faster than our other more general
// tailcall mechanism.
bool IsTailCallViaJitHelper() const
{
#ifdef TARGET_X86
return IsTailCall() && (gtCallMoreFlags & GTF_CALL_M_TAILCALL_VIA_JIT_HELPER);
#else
return false;
#endif
}
#if FEATURE_FASTTAILCALL
bool IsFastTailCall() const
{
#ifdef TARGET_X86
return IsTailCall() && !(gtCallMoreFlags & GTF_CALL_M_TAILCALL_VIA_JIT_HELPER);
#else
return IsTailCall();
#endif
}
#else // !FEATURE_FASTTAILCALL
bool IsFastTailCall() const
{
return false;
}
#endif // !FEATURE_FASTTAILCALL
#if FEATURE_TAILCALL_OPT
// Returns true if this is marked for opportunistic tail calling.
// That is, can be tail called though not explicitly prefixed with "tail" prefix.
bool IsImplicitTailCall() const
{
return (gtCallMoreFlags & GTF_CALL_M_IMPLICIT_TAILCALL) != 0;
}
bool IsTailCallConvertibleToLoop() const
{
return (gtCallMoreFlags & GTF_CALL_M_TAILCALL_TO_LOOP) != 0;
}
#else // !FEATURE_TAILCALL_OPT
bool IsImplicitTailCall() const
{
return false;
}
bool IsTailCallConvertibleToLoop() const
{
return false;
}
#endif // !FEATURE_TAILCALL_OPT
bool NormalizesSmallTypesOnReturn()
{
return GetUnmanagedCallConv() == CorInfoCallConvExtension::Managed;
}
bool IsSameThis() const
{
return (gtCallMoreFlags & GTF_CALL_M_NONVIRT_SAME_THIS) != 0;
}
bool IsDelegateInvoke() const
{
return (gtCallMoreFlags & GTF_CALL_M_DELEGATE_INV) != 0;
}
bool IsVirtualStubRelativeIndir() const
{
return IsVirtualStub() && (gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT) != 0;
}
bool IsR2RRelativeIndir() const
{
#ifdef FEATURE_READYTORUN
return (gtCallMoreFlags & GTF_CALL_M_R2R_REL_INDIRECT) != 0;
#else
return false;
#endif
}
#ifdef FEATURE_READYTORUN
void setEntryPoint(const CORINFO_CONST_LOOKUP& entryPoint)
{
gtEntryPoint = entryPoint;
if (gtEntryPoint.accessType == IAT_PVALUE)
{
gtCallMoreFlags |= GTF_CALL_M_R2R_REL_INDIRECT;
}
}
#endif // FEATURE_READYTORUN
bool IsVarargs() const
{
return (gtCallMoreFlags & GTF_CALL_M_VARARGS) != 0;
}
bool IsNoReturn() const
{
return (gtCallMoreFlags & GTF_CALL_M_DOES_NOT_RETURN) != 0;
}
bool IsFatPointerCandidate() const
{
return (gtCallMoreFlags & GTF_CALL_M_FAT_POINTER_CHECK) != 0;
}
bool IsGuardedDevirtualizationCandidate() const
{
return (gtCallMoreFlags & GTF_CALL_M_GUARDED_DEVIRT) != 0;
}
bool IsPure(Compiler* compiler) const;
bool HasSideEffects(Compiler* compiler, bool ignoreExceptions = false, bool ignoreCctors = false) const;
void ClearFatPointerCandidate()
{
gtCallMoreFlags &= ~GTF_CALL_M_FAT_POINTER_CHECK;
}
void SetFatPointerCandidate()
{
gtCallMoreFlags |= GTF_CALL_M_FAT_POINTER_CHECK;
}
bool IsDevirtualized() const
{
return (gtCallMoreFlags & GTF_CALL_M_DEVIRTUALIZED) != 0;
}
bool IsGuarded() const
{
return (gtCallMoreFlags & GTF_CALL_M_GUARDED) != 0;
}
bool IsUnboxed() const
{
return (gtCallMoreFlags & GTF_CALL_M_UNBOXED) != 0;
}
bool IsSuppressGCTransition() const
{
return (gtCallMoreFlags & GTF_CALL_M_SUPPRESS_GC_TRANSITION) != 0;
}
void ClearGuardedDevirtualizationCandidate()
{
gtCallMoreFlags &= ~GTF_CALL_M_GUARDED_DEVIRT;
}
void SetGuardedDevirtualizationCandidate()
{
gtCallMoreFlags |= GTF_CALL_M_GUARDED_DEVIRT;
}
void SetIsGuarded()
{
gtCallMoreFlags |= GTF_CALL_M_GUARDED;
}
void SetExpRuntimeLookup()
{
gtCallMoreFlags |= GTF_CALL_M_EXP_RUNTIME_LOOKUP;
}
void ClearExpRuntimeLookup()
{
gtCallMoreFlags &= ~GTF_CALL_M_EXP_RUNTIME_LOOKUP;
}
bool IsExpRuntimeLookup() const
{
return (gtCallMoreFlags & GTF_CALL_M_EXP_RUNTIME_LOOKUP) != 0;
}
void SetExpandedEarly()
{
gtCallMoreFlags |= GTF_CALL_M_EXPANDED_EARLY;
}
void ClearExpandedEarly()
{
gtCallMoreFlags &= ~GTF_CALL_M_EXPANDED_EARLY;
}
bool IsExpandedEarly() const
{
return (gtCallMoreFlags & GTF_CALL_M_EXPANDED_EARLY) != 0;
}
//-----------------------------------------------------------------------------------------
// GetIndirectionCellArgKind: Get the kind of indirection cell used by this call.
//
// Arguments:
// None
//
// Return Value:
// The kind (either R2RIndirectionCell or VirtualStubCell),
// or NonStandardArgKind::None if this call does not have an indirection cell.
//
NonStandardArgKind GetIndirectionCellArgKind() const
{
if (IsVirtualStub())
{
return NonStandardArgKind::VirtualStubCell;
}
#if defined(TARGET_ARMARCH)
// For ARM architectures, we always use an indirection cell for R2R calls.
if (IsR2RRelativeIndir())
{
return NonStandardArgKind::R2RIndirectionCell;
}
#elif defined(TARGET_XARCH)
// On XARCH we disassemble it from callsite except for tailcalls that need indirection cell.
if (IsR2RRelativeIndir() && IsFastTailCall())
{
return NonStandardArgKind::R2RIndirectionCell;
}
#endif
return NonStandardArgKind::None;
}
CFGCallKind GetCFGCallKind()
{
#if defined(TARGET_AMD64)
// On x64 the dispatcher is more performant, but we cannot use it when
// we need to pass indirection cells as those go into registers that
// are clobbered by the dispatch helper.
bool mayUseDispatcher = GetIndirectionCellArgKind() == NonStandardArgKind::None;
bool shouldUseDispatcher = true;
#elif defined(TARGET_ARM64)
bool mayUseDispatcher = true;
// Branch predictors on ARM64 generally do not handle the dispatcher as
// well as on x64 hardware, so only use the validator by default.
bool shouldUseDispatcher = false;
#else
// Other platforms do not even support the dispatcher.
bool mayUseDispatcher = false;
bool shouldUseDispatcher = false;
#endif
#ifdef DEBUG
switch (JitConfig.JitCFGUseDispatcher())
{
case 0:
shouldUseDispatcher = false;
break;
case 1:
shouldUseDispatcher = true;
break;
default:
break;
}
#endif
return mayUseDispatcher && shouldUseDispatcher ? CFGCallKind::Dispatch : CFGCallKind::ValidateAndCall;
}
void ResetArgInfo();
GenTreeCallFlags gtCallMoreFlags; // in addition to gtFlags
gtCallTypes gtCallType : 3; // value from the gtCallTypes enumeration
var_types gtReturnType : 5; // exact return type
CORINFO_CLASS_HANDLE gtRetClsHnd; // The return type handle of the call if it is a struct; always available
void* gtStubCallStubAddr; // GTF_CALL_VIRT_STUB - these are never inlined
union {
// only used for CALLI unmanaged calls (CT_INDIRECT)
GenTree* gtCallCookie;
// gtInlineCandidateInfo is only used when inlining methods
InlineCandidateInfo* gtInlineCandidateInfo;
GuardedDevirtualizationCandidateInfo* gtGuardedDevirtualizationCandidateInfo;
ClassProfileCandidateInfo* gtClassProfileCandidateInfo;
LateDevirtualizationInfo* gtLateDevirtualizationInfo;
CORINFO_GENERIC_HANDLE compileTimeHelperArgumentHandle; // Used to track type handle argument of dynamic helpers
void* gtDirectCallAddress; // Used to pass direct call address between lower and codegen
};
// expression evaluated after args are placed which determines the control target
GenTree* gtControlExpr;
union {
CORINFO_METHOD_HANDLE gtCallMethHnd; // CT_USER_FUNC or CT_HELPER
GenTree* gtCallAddr; // CT_INDIRECT
};
#ifdef FEATURE_READYTORUN
// Call target lookup info for method call from a Ready To Run module
CORINFO_CONST_LOOKUP gtEntryPoint;
#endif
#if defined(DEBUG) || defined(INLINE_DATA)
// For non-inline candidates, track the first observation
// that blocks candidacy.
InlineObservation gtInlineObservation;
// IL offset of the call wrt its parent method.
IL_OFFSET gtRawILOffset;
// In DEBUG we report even non inline candidates in the inline tree in
// fgNoteNonInlineCandidate. We need to keep around the inline context for
// this as normally it's part of the candidate info.
class InlineContext* gtInlineContext;
#endif // defined(DEBUG) || defined(INLINE_DATA)
bool IsHelperCall() const
{
return gtCallType == CT_HELPER;
}
bool IsHelperCall(CORINFO_METHOD_HANDLE callMethHnd) const
{
return IsHelperCall() && (callMethHnd == gtCallMethHnd);
}
bool IsHelperCall(Compiler* compiler, unsigned helper) const;
void ReplaceCallOperand(GenTree** operandUseEdge, GenTree* replacement);
bool AreArgsComplete() const;
CorInfoCallConvExtension GetUnmanagedCallConv() const
{
return IsUnmanaged() ? unmgdCallConv : CorInfoCallConvExtension::Managed;
}
static bool Equals(GenTreeCall* c1, GenTreeCall* c2);
GenTreeCall(var_types type) : GenTree(GT_CALL, type)
{
fgArgInfo = nullptr;
gtRetBufArg = nullptr;
}
#if DEBUGGABLE_GENTREE
GenTreeCall() : GenTree()
{
}
#endif
GenTree* GetLclRetBufArgNode() const
{
if (gtRetBufArg == nullptr)
{
return nullptr;
}
assert(HasRetBufArg());
GenTree* lclRetBufArgNode = gtRetBufArg->GetNode();
switch (lclRetBufArgNode->OperGet())
{
// Get the true value from setup args
case GT_ASG:
return lclRetBufArgNode->AsOp()->gtGetOp2();
case GT_STORE_LCL_VAR:
return lclRetBufArgNode->AsUnOp()->gtGetOp1();
// Get the value from putarg wrapper nodes
case GT_PUTARG_REG:
case GT_PUTARG_STK:
return lclRetBufArgNode->AsOp()->gtGetOp1();
// Otherwise the node should be in the Use*
default:
return lclRetBufArgNode;
}
}
void SetLclRetBufArg(Use* retBufArg);
Use* gtRetBufArg; // The argument that holds return buffer argument
};
struct GenTreeCmpXchg : public GenTree
{
GenTree* gtOpLocation;
GenTree* gtOpValue;
GenTree* gtOpComparand;
GenTreeCmpXchg(var_types type, GenTree* loc, GenTree* val, GenTree* comparand)
: GenTree(GT_CMPXCHG, type), gtOpLocation(loc), gtOpValue(val), gtOpComparand(comparand)
{
// There's no reason to do a compare-exchange on a local location, so we'll assume that all of these
// have global effects.
gtFlags |= (GTF_GLOB_REF | GTF_ASG);
// Merge in flags from operands
gtFlags |= gtOpLocation->gtFlags & GTF_ALL_EFFECT;
gtFlags |= gtOpValue->gtFlags & GTF_ALL_EFFECT;
gtFlags |= gtOpComparand->gtFlags & GTF_ALL_EFFECT;
}
#if DEBUGGABLE_GENTREE
GenTreeCmpXchg() : GenTree()
{
}
#endif
};
#if !defined(TARGET_64BIT)
struct GenTreeMultiRegOp : public GenTreeOp
{
regNumber gtOtherReg;
// GTF_SPILL or GTF_SPILLED flag on a multi-reg node indicates that one or
// more of its result regs are in that state. The spill flag of each of the
// return register is stored here. We only need 2 bits per returned register,
// so this is treated as a 2-bit array. No architecture needs more than 8 bits.
MultiRegSpillFlags gtSpillFlags;
GenTreeMultiRegOp(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2)
: GenTreeOp(oper, type, op1, op2), gtOtherReg(REG_NA)
{
ClearOtherRegFlags();
}
unsigned GetRegCount() const
{
return (TypeGet() == TYP_LONG) ? 2 : 1;
}
//---------------------------------------------------------------------------
// GetRegNumByIdx: get i'th register allocated to this struct argument.
//
// Arguments:
// idx - index of the register
//
// Return Value:
// Return regNumber of i'th register of this register argument
//
regNumber GetRegNumByIdx(unsigned idx) const
{
assert(idx < 2);
if (idx == 0)
{
return GetRegNum();
}
return gtOtherReg;
}
GenTreeFlags GetRegSpillFlagByIdx(unsigned idx) const
{
return GetMultiRegSpillFlagsByIdx(gtSpillFlags, idx);
}
void SetRegSpillFlagByIdx(GenTreeFlags flags, unsigned idx)
{
#if FEATURE_MULTIREG_RET
gtSpillFlags = SetMultiRegSpillFlagsByIdx(gtSpillFlags, flags, idx);
#endif
}
//--------------------------------------------------------------------------
// GetRegType: Get var_type of the register specified by index.
//
// Arguments:
// index - Index of the register.
// First register will have an index 0 and so on.
//
// Return Value:
// var_type of the register specified by its index.
//
var_types GetRegType(unsigned index) const
{
assert(index < 2);
// The type of register is usually the same as GenTree type, since GenTreeMultiRegOp usually defines a single
// reg.
// The special case is when we have TYP_LONG, which may be a MUL_LONG, or a DOUBLE arg passed as LONG,
// in which case we need to separate them into int for each index.
var_types result = TypeGet();
if (result == TYP_LONG)
{
result = TYP_INT;
}
return result;
}
//-------------------------------------------------------------------
// clearOtherRegFlags: clear GTF_* flags associated with gtOtherRegs
//
// Arguments:
// None
//
// Return Value:
// None
//
void ClearOtherRegFlags()
{
gtSpillFlags = 0;
}
#if DEBUGGABLE_GENTREE
GenTreeMultiRegOp() : GenTreeOp()
{
}
#endif
};
#endif // !defined(TARGET_64BIT)
struct GenTreeFptrVal : public GenTree
{
CORINFO_METHOD_HANDLE gtFptrMethod;
bool gtFptrDelegateTarget;
#ifdef FEATURE_READYTORUN
CORINFO_CONST_LOOKUP gtEntryPoint;
#endif
GenTreeFptrVal(var_types type, CORINFO_METHOD_HANDLE meth)
: GenTree(GT_FTN_ADDR, type), gtFptrMethod(meth), gtFptrDelegateTarget(false)
{
#ifdef FEATURE_READYTORUN
gtEntryPoint.addr = nullptr;
gtEntryPoint.accessType = IAT_VALUE;
#endif
}
#if DEBUGGABLE_GENTREE
GenTreeFptrVal() : GenTree()
{
}
#endif
};
/* gtQmark */
struct GenTreeQmark : public GenTreeOp
{
GenTreeQmark(var_types type, GenTree* cond, GenTreeColon* colon) : GenTreeOp(GT_QMARK, type, cond, colon)
{
// These must follow a specific form.
assert((cond != nullptr) && cond->TypeIs(TYP_INT));
assert((colon != nullptr) && colon->OperIs(GT_COLON));
}
#if DEBUGGABLE_GENTREE
GenTreeQmark() : GenTreeOp()
{
}
#endif
};
/* gtIntrinsic -- intrinsic (possibly-binary op [NULL op2 is allowed] with an additional field) */
struct GenTreeIntrinsic : public GenTreeOp
{
NamedIntrinsic gtIntrinsicName;
CORINFO_METHOD_HANDLE gtMethodHandle; // Method handle of the method which is treated as an intrinsic.
#ifdef FEATURE_READYTORUN
// Call target lookup info for method call from a Ready To Run module
CORINFO_CONST_LOOKUP gtEntryPoint;
#endif
GenTreeIntrinsic(var_types type, GenTree* op1, NamedIntrinsic intrinsicName, CORINFO_METHOD_HANDLE methodHandle)
: GenTreeOp(GT_INTRINSIC, type, op1, nullptr), gtIntrinsicName(intrinsicName), gtMethodHandle(methodHandle)
{
assert(intrinsicName != NI_Illegal);
}
GenTreeIntrinsic(
var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic intrinsicName, CORINFO_METHOD_HANDLE methodHandle)
: GenTreeOp(GT_INTRINSIC, type, op1, op2), gtIntrinsicName(intrinsicName), gtMethodHandle(methodHandle)
{
assert(intrinsicName != NI_Illegal);
}
#if DEBUGGABLE_GENTREE
GenTreeIntrinsic() : GenTreeOp()
{
}
#endif
};
// GenTreeMultiOp - a node with a flexible count of operands stored in an array.
// The array can be an inline one, or a dynamic one, or both, with switching
// between them supported. See GenTreeJitIntrinsic for an example of a node
// utilizing GenTreeMultiOp. GTF_REVERSE_OPS is supported for GenTreeMultiOp's
// with two operands.
//
struct GenTreeMultiOp : public GenTree
{
public:
class Iterator
{
protected:
GenTree** m_use;
Iterator(GenTree** use) : m_use(use)
{
}
public:
Iterator& operator++()
{
m_use++;
return *this;
}
bool operator==(const Iterator& other) const
{
return m_use == other.m_use;
}
bool operator!=(const Iterator& other) const
{
return m_use != other.m_use;
}
};
class OperandsIterator final : public Iterator
{
public:
OperandsIterator(GenTree** use) : Iterator(use)
{
}
GenTree* operator*()
{
return *m_use;
}
};
class UseEdgesIterator final : public Iterator
{
public:
UseEdgesIterator(GenTree** use) : Iterator(use)
{
}
GenTree** operator*()
{
return m_use;
}
};
private:
GenTree** m_operands;
protected:
template <unsigned InlineOperandCount, typename... Operands>
GenTreeMultiOp(genTreeOps oper,
var_types type,
CompAllocator allocator,
GenTree* (&inlineOperands)[InlineOperandCount] DEBUGARG(bool largeNode),
Operands... operands)
: GenTree(oper, type DEBUGARG(largeNode))
{
const size_t OperandCount = sizeof...(Operands);
m_operands = (OperandCount <= InlineOperandCount) ? inlineOperands : allocator.allocate<GenTree*>(OperandCount);
// "OperandCount + 1" so that it works well when OperandCount is 0.
GenTree* operandsArray[OperandCount + 1]{operands...};
InitializeOperands(operandsArray, OperandCount);
}
// Note that this constructor takes the owndership of the "operands" array.
template <unsigned InlineOperandCount>
GenTreeMultiOp(genTreeOps oper,
var_types type,
GenTree** operands,
size_t operandCount,
GenTree* (&inlineOperands)[InlineOperandCount] DEBUGARG(bool largeNode))
: GenTree(oper, type DEBUGARG(largeNode))
{
m_operands = (operandCount <= InlineOperandCount) ? inlineOperands : operands;
InitializeOperands(operands, operandCount);
}
public:
#if DEBUGGABLE_GENTREE
GenTreeMultiOp() : GenTree()
{
}
#endif
GenTree*& Op(size_t index)
{
size_t actualIndex = index - 1;
assert(actualIndex < m_operandCount);
assert(m_operands[actualIndex] != nullptr);
return m_operands[actualIndex];
}
GenTree* Op(size_t index) const
{
return const_cast<GenTreeMultiOp*>(this)->Op(index);
}
// Note that unlike the general "Operands" iterator, this specialized version does not respect GTF_REVERSE_OPS.
IteratorPair<OperandsIterator> Operands()
{
return MakeIteratorPair(OperandsIterator(GetOperandArray()),
OperandsIterator(GetOperandArray() + GetOperandCount()));
}
// Note that unlike the general "UseEdges" iterator, this specialized version does not respect GTF_REVERSE_OPS.
IteratorPair<UseEdgesIterator> UseEdges()
{
return MakeIteratorPair(UseEdgesIterator(GetOperandArray()),
UseEdgesIterator(GetOperandArray() + GetOperandCount()));
}
size_t GetOperandCount() const
{
return m_operandCount;
}
GenTree** GetOperandArray(size_t startIndex = 0) const
{
return m_operands + startIndex;
}
protected:
// Reconfigures the operand array, leaving it in a "dirty" state.
void ResetOperandArray(size_t newOperandCount,
Compiler* compiler,
GenTree** inlineOperands,
size_t inlineOperandCount);
static bool OperandsAreEqual(GenTreeMultiOp* op1, GenTreeMultiOp* op2);
private:
void InitializeOperands(GenTree** operands, size_t operandCount);
void SetOperandCount(size_t newOperandCount)
{
assert(FitsIn<uint8_t>(newOperandCount));
m_operandCount = static_cast<uint8_t>(newOperandCount);
}
};
// Helper class used to implement the constructor of GenTreeJitIntrinsic which
// transfers the ownership of the passed-in array to the underlying MultiOp node.
class IntrinsicNodeBuilder final
{
friend struct GenTreeJitIntrinsic;
GenTree** m_operands;
size_t m_operandCount;
GenTree* m_inlineOperands[2];
public:
IntrinsicNodeBuilder(CompAllocator allocator, size_t operandCount) : m_operandCount(operandCount)
{
m_operands =
(operandCount <= ArrLen(m_inlineOperands)) ? m_inlineOperands : allocator.allocate<GenTree*>(operandCount);
#ifdef DEBUG
for (size_t i = 0; i < operandCount; i++)
{
m_operands[i] = nullptr;
}
#endif // DEBUG
}
IntrinsicNodeBuilder(CompAllocator allocator, GenTreeMultiOp* source) : m_operandCount(source->GetOperandCount())
{
m_operands = (m_operandCount <= ArrLen(m_inlineOperands)) ? m_inlineOperands
: allocator.allocate<GenTree*>(m_operandCount);
for (size_t i = 0; i < m_operandCount; i++)
{
m_operands[i] = source->Op(i + 1);
}
}
void AddOperand(size_t index, GenTree* operand)
{
assert(index < m_operandCount);
assert(m_operands[index] == nullptr);
m_operands[index] = operand;
}
GenTree* GetOperand(size_t index) const
{
assert(index < m_operandCount);
assert(m_operands[index] != nullptr);
return m_operands[index];
}
size_t GetOperandCount() const
{
return m_operandCount;
}
private:
GenTree** GetBuiltOperands()
{
#ifdef DEBUG
for (size_t i = 0; i < m_operandCount; i++)
{
assert(m_operands[i] != nullptr);
}
#endif // DEBUG
return m_operands;
}
};
struct GenTreeJitIntrinsic : public GenTreeMultiOp
{
protected:
GenTree* gtInlineOperands[2];
uint16_t gtLayoutNum;
unsigned char gtAuxiliaryJitType; // For intrinsics than need another type (e.g. Avx2.Gather* or SIMD (by element))
regNumberSmall gtOtherReg; // For intrinsics that return 2 registers
unsigned char gtSimdBaseJitType; // SIMD vector base JIT type
unsigned char gtSimdSize; // SIMD vector size in bytes, use 0 for scalar intrinsics
#if defined(FEATURE_SIMD)
union {
SIMDIntrinsicID gtSIMDIntrinsicID; // operation Id
NamedIntrinsic gtHWIntrinsicId;
};
#else
NamedIntrinsic gtHWIntrinsicId;
#endif
public:
unsigned GetLayoutNum() const
{
return gtLayoutNum;
}
void SetLayoutNum(unsigned layoutNum)
{
assert(FitsIn<uint16_t>(layoutNum));
gtLayoutNum = static_cast<uint16_t>(layoutNum);
}
regNumber GetOtherReg() const
{
return (regNumber)gtOtherReg;
}
void SetOtherReg(regNumber reg)
{
gtOtherReg = (regNumberSmall)reg;
assert(gtOtherReg == reg);
}
CorInfoType GetAuxiliaryJitType() const
{
return (CorInfoType)gtAuxiliaryJitType;
}
void SetAuxiliaryJitType(CorInfoType auxiliaryJitType)
{
gtAuxiliaryJitType = (unsigned char)auxiliaryJitType;
assert(gtAuxiliaryJitType == auxiliaryJitType);
}
var_types GetAuxiliaryType() const;
CorInfoType GetSimdBaseJitType() const
{
return (CorInfoType)gtSimdBaseJitType;
}
CorInfoType GetNormalizedSimdBaseJitType() const
{
CorInfoType simdBaseJitType = GetSimdBaseJitType();
switch (simdBaseJitType)
{
case CORINFO_TYPE_NATIVEINT:
{
#ifdef TARGET_64BIT
return CORINFO_TYPE_LONG;
#else
return CORINFO_TYPE_INT;
#endif
}
case CORINFO_TYPE_NATIVEUINT:
{
#ifdef TARGET_64BIT
return CORINFO_TYPE_ULONG;
#else
return CORINFO_TYPE_UINT;
#endif
}
default:
return simdBaseJitType;
}
}
void SetSimdBaseJitType(CorInfoType simdBaseJitType)
{
gtSimdBaseJitType = (unsigned char)simdBaseJitType;
assert(gtSimdBaseJitType == simdBaseJitType);
}
var_types GetSimdBaseType() const;
unsigned char GetSimdSize() const
{
return gtSimdSize;
}
void SetSimdSize(unsigned simdSize)
{
gtSimdSize = (unsigned char)simdSize;
assert(gtSimdSize == simdSize);
}
template <typename... Operands>
GenTreeJitIntrinsic(genTreeOps oper,
var_types type,
CompAllocator allocator,
CorInfoType simdBaseJitType,
unsigned simdSize,
Operands... operands)
: GenTreeMultiOp(oper, type, allocator, gtInlineOperands DEBUGARG(false), operands...)
, gtLayoutNum(0)
, gtAuxiliaryJitType(CORINFO_TYPE_UNDEF)
, gtOtherReg(REG_NA)
, gtSimdBaseJitType((unsigned char)simdBaseJitType)
, gtSimdSize((unsigned char)simdSize)
, gtHWIntrinsicId(NI_Illegal)
{
assert(gtSimdBaseJitType == simdBaseJitType);
assert(gtSimdSize == simdSize);
}
#if DEBUGGABLE_GENTREE
GenTreeJitIntrinsic() : GenTreeMultiOp()
{
}
#endif
protected:
GenTreeJitIntrinsic(genTreeOps oper,
var_types type,
IntrinsicNodeBuilder&& nodeBuilder,
CorInfoType simdBaseJitType,
unsigned simdSize)
: GenTreeMultiOp(oper,
type,
nodeBuilder.GetBuiltOperands(),
nodeBuilder.GetOperandCount(),
gtInlineOperands DEBUGARG(false))
, gtLayoutNum(0)
, gtAuxiliaryJitType(CORINFO_TYPE_UNDEF)
, gtOtherReg(REG_NA)
, gtSimdBaseJitType((unsigned char)simdBaseJitType)
, gtSimdSize((unsigned char)simdSize)
, gtHWIntrinsicId(NI_Illegal)
{
assert(gtSimdBaseJitType == simdBaseJitType);
assert(gtSimdSize == simdSize);
}
public:
bool isSIMD() const
{
return gtSimdSize != 0;
}
};
#ifdef FEATURE_SIMD
/* gtSIMD -- SIMD intrinsic (possibly-binary op [NULL op2 is allowed] with additional fields) */
struct GenTreeSIMD : public GenTreeJitIntrinsic
{
GenTreeSIMD(var_types type,
IntrinsicNodeBuilder&& nodeBuilder,
SIMDIntrinsicID simdIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
: GenTreeJitIntrinsic(GT_SIMD, type, std::move(nodeBuilder), simdBaseJitType, simdSize)
{
gtSIMDIntrinsicID = simdIntrinsicID;
}
GenTreeSIMD(var_types type,
CompAllocator allocator,
GenTree* op1,
SIMDIntrinsicID simdIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
: GenTreeJitIntrinsic(GT_SIMD, type, allocator, simdBaseJitType, simdSize, op1)
{
gtSIMDIntrinsicID = simdIntrinsicID;
}
GenTreeSIMD(var_types type,
CompAllocator allocator,
GenTree* op1,
GenTree* op2,
SIMDIntrinsicID simdIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
: GenTreeJitIntrinsic(GT_SIMD, type, allocator, simdBaseJitType, simdSize, op1, op2)
{
gtSIMDIntrinsicID = simdIntrinsicID;
}
#if DEBUGGABLE_GENTREE
GenTreeSIMD() : GenTreeJitIntrinsic()
{
}
#endif
bool OperIsMemoryLoad() const; // Returns true for the SIMD Intrinsic instructions that have MemoryLoad semantics,
// false otherwise
SIMDIntrinsicID GetSIMDIntrinsicId() const
{
return gtSIMDIntrinsicID;
}
static bool Equals(GenTreeSIMD* op1, GenTreeSIMD* op2);
};
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
struct GenTreeHWIntrinsic : public GenTreeJitIntrinsic
{
GenTreeHWIntrinsic(var_types type,
IntrinsicNodeBuilder&& nodeBuilder,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
: GenTreeJitIntrinsic(GT_HWINTRINSIC, type, std::move(nodeBuilder), simdBaseJitType, simdSize)
{
Initialize(hwIntrinsicID, isSimdAsHWIntrinsic);
}
template <typename... Operands>
GenTreeHWIntrinsic(var_types type,
CompAllocator allocator,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic,
Operands... operands)
: GenTreeJitIntrinsic(GT_HWINTRINSIC, type, allocator, simdBaseJitType, simdSize, operands...)
{
Initialize(hwIntrinsicID, isSimdAsHWIntrinsic);
}
#if DEBUGGABLE_GENTREE
GenTreeHWIntrinsic() : GenTreeJitIntrinsic()
{
}
#endif
bool OperIsMemoryLoad() const; // Returns true for the HW Intrinsic instructions that have MemoryLoad semantics,
// false otherwise
bool OperIsMemoryStore() const; // Returns true for the HW Intrinsic instructions that have MemoryStore semantics,
// false otherwise
bool OperIsMemoryLoadOrStore() const; // Returns true for the HW Intrinsic instructions that have MemoryLoad or
// MemoryStore semantics, false otherwise
bool IsSimdAsHWIntrinsic() const
{
return (gtFlags & GTF_SIMDASHW_OP) != 0;
}
unsigned GetResultOpNumForFMA(GenTree* use, GenTree* op1, GenTree* op2, GenTree* op3);
NamedIntrinsic GetHWIntrinsicId() const;
//---------------------------------------------------------------------------------------
// ChangeHWIntrinsicId: Change the intrinsic id for this node.
//
// This method just sets the intrinsic id, asserting that the new intrinsic
// has the same number of operands as the old one, optionally setting some of
// the new operands. Intrinsics with an unknown number of operands are exempt
// from the "do I have the same number of operands" check however, so this method must
// be used with care. Use "ResetHWIntrinsicId" if you need to fully reconfigure
// the node for a different intrinsic, with a possibly different number of operands.
//
// Arguments:
// intrinsicId - the new intrinsic id for the node
// operands - optional operands to set while changing the id
//
// Notes:
// It is the caller's responsibility to update side effect flags.
//
template <typename... Operands>
void ChangeHWIntrinsicId(NamedIntrinsic intrinsicId, Operands... operands)
{
const size_t OperandCount = sizeof...(Operands);
assert(OperandCount <= GetOperandCount());
SetHWIntrinsicId(intrinsicId);
GenTree* operandsArray[OperandCount + 1]{operands...};
GenTree** operandsStore = GetOperandArray();
for (size_t i = 0; i < OperandCount; i++)
{
operandsStore[i] = operandsArray[i];
}
}
//---------------------------------------------------------------------------------------
// ResetHWIntrinsicId: Reset the intrinsic id for this node.
//
// This method resets the intrinsic id, fully reconfiguring the node. It must
// be supplied with all the operands the new node needs, and can allocate a
// new dynamic array if the operands do not fit into in an inline one, in which
// case a compiler argument is used to get the memory allocator.
//
// This method is similar to "ChangeHWIntrinsicId" but is more versatile and
// thus more expensive. Use it when you need to bash to an intrinsic id with
// a different number of operands than what the original node had, or, which
// is equivalent, when you do not know the original number of operands.
//
// Arguments:
// intrinsicId - the new intrinsic id for the node
// compiler - compiler to allocate memory with, can be "nullptr" if the
// number of new operands does not exceed the length of the
// inline array (so, there are 2 or fewer of them)
// operands - *all* operands for the new node
//
// Notes:
// It is the caller's responsibility to update side effect flags.
//
template <typename... Operands>
void ResetHWIntrinsicId(NamedIntrinsic intrinsicId, Compiler* compiler, Operands... operands)
{
const size_t NewOperandCount = sizeof...(Operands);
assert((compiler != nullptr) || (NewOperandCount <= ArrLen(gtInlineOperands)));
ResetOperandArray(NewOperandCount, compiler, gtInlineOperands, ArrLen(gtInlineOperands));
ChangeHWIntrinsicId(intrinsicId, operands...);
}
void ResetHWIntrinsicId(NamedIntrinsic intrinsicId, GenTree* op1, GenTree* op2)
{
ResetHWIntrinsicId(intrinsicId, static_cast<Compiler*>(nullptr), op1, op2);
}
void ResetHWIntrinsicId(NamedIntrinsic intrinsicId, GenTree* op1)
{
ResetHWIntrinsicId(intrinsicId, static_cast<Compiler*>(nullptr), op1);
}
void ResetHWIntrinsicId(NamedIntrinsic intrinsicId)
{
ResetHWIntrinsicId(intrinsicId, static_cast<Compiler*>(nullptr));
}
static bool Equals(GenTreeHWIntrinsic* op1, GenTreeHWIntrinsic* op2);
private:
void SetHWIntrinsicId(NamedIntrinsic intrinsicId);
void Initialize(NamedIntrinsic intrinsicId, bool isSimdAsHWIntrinsic)
{
SetHWIntrinsicId(intrinsicId);
bool isStore = OperIsMemoryStore();
bool isLoad = OperIsMemoryLoad();
if (isStore || isLoad)
{
gtFlags |= (GTF_GLOB_REF | GTF_EXCEPT);
if (isStore)
{
gtFlags |= GTF_ASG;
}
}
if (isSimdAsHWIntrinsic)
{
gtFlags |= GTF_SIMDASHW_OP;
}
}
};
#endif // FEATURE_HW_INTRINSICS
/* gtIndex -- array access */
struct GenTreeIndex : public GenTreeOp
{
GenTree*& Arr()
{
return gtOp1;
}
GenTree*& Index()
{
return gtOp2;
}
unsigned gtIndElemSize; // size of elements in the array
CORINFO_CLASS_HANDLE gtStructElemClass; // If the element type is a struct, this is the struct type.
GenTreeIndex(var_types type, GenTree* arr, GenTree* ind, unsigned indElemSize)
: GenTreeOp(GT_INDEX, type, arr, ind)
, gtIndElemSize(indElemSize)
, gtStructElemClass(nullptr) // We always initialize this after construction.
{
#ifdef DEBUG
if (JitConfig.JitSkipArrayBoundCheck() == 1)
{
// Skip bounds check
}
else
#endif
{
// Do bounds check
gtFlags |= GTF_INX_RNGCHK;
}
gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
}
#if DEBUGGABLE_GENTREE
GenTreeIndex() : GenTreeOp()
{
}
#endif
};
// gtIndexAddr: given an array object and an index, checks that the index is within the bounds of the array if
// necessary and produces the address of the value at that index of the array.
struct GenTreeIndexAddr : public GenTreeOp
{
GenTree*& Arr()
{
return gtOp1;
}
GenTree*& Index()
{
return gtOp2;
}
CORINFO_CLASS_HANDLE gtStructElemClass; // If the element type is a struct, this is the struct type.
BasicBlock* gtIndRngFailBB; // Basic block to jump to for array-index-out-of-range
var_types gtElemType; // The element type of the array.
unsigned gtElemSize; // size of elements in the array
unsigned gtLenOffset; // The offset from the array's base address to its length.
unsigned gtElemOffset; // The offset from the array's base address to its first element.
GenTreeIndexAddr(GenTree* arr,
GenTree* ind,
var_types elemType,
CORINFO_CLASS_HANDLE structElemClass,
unsigned elemSize,
unsigned lenOffset,
unsigned elemOffset)
: GenTreeOp(GT_INDEX_ADDR, TYP_BYREF, arr, ind)
, gtStructElemClass(structElemClass)
, gtIndRngFailBB(nullptr)
, gtElemType(elemType)
, gtElemSize(elemSize)
, gtLenOffset(lenOffset)
, gtElemOffset(elemOffset)
{
#ifdef DEBUG
if (JitConfig.JitSkipArrayBoundCheck() == 1)
{
// Skip bounds check
}
else
#endif
{
// Do bounds check
gtFlags |= GTF_INX_RNGCHK;
}
gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
}
#if DEBUGGABLE_GENTREE
GenTreeIndexAddr() : GenTreeOp()
{
}
#endif
};
// GenTreeArrAddr - GT_ARR_ADDR, carries information about the array type from morph to VN.
// This node is just a wrapper (similar to GenTreeBox), the real address
// expression is contained in its first operand.
//
struct GenTreeArrAddr : GenTreeUnOp
{
private:
CORINFO_CLASS_HANDLE m_elemClassHandle; // The array element class. Currently only used for arrays of TYP_STRUCT.
var_types m_elemType; // The normalized (TYP_SIMD != TYP_STRUCT) array element type.
uint8_t m_firstElemOffset; // Offset to the first element of the array.
public:
GenTreeArrAddr(GenTree* addr, var_types elemType, CORINFO_CLASS_HANDLE elemClassHandle, uint8_t firstElemOffset)
: GenTreeUnOp(GT_ARR_ADDR, TYP_BYREF, addr DEBUGARG(/* largeNode */ false))
, m_elemClassHandle(elemClassHandle)
, m_elemType(elemType)
, m_firstElemOffset(firstElemOffset)
{
// Temporarily disable this assert. Tracking: https://github.com/dotnet/runtime/issues/67600
// assert(addr->TypeIs(TYP_BYREF) || addr->IsIntegralConst(0));
assert(((elemType == TYP_STRUCT) && (elemClassHandle != NO_CLASS_HANDLE)) ||
(elemClassHandle == NO_CLASS_HANDLE));
// We will only consider "addr" for CSE. This is more profitable and precise
// because ARR_ADDR can get its VN "polluted" by zero-offset field sequences.
SetDoNotCSE();
}
#if DEBUGGABLE_GENTREE
GenTreeArrAddr() : GenTreeUnOp()
{
}
#endif
GenTree*& Addr()
{
return gtOp1;
}
CORINFO_CLASS_HANDLE GetElemClassHandle() const
{
return m_elemClassHandle;
}
var_types GetElemType() const
{
return m_elemType;
}
uint8_t GetFirstElemOffset() const
{
return m_firstElemOffset;
}
void ParseArrayAddress(Compiler* comp, GenTree** pArr, ValueNum* pInxVN);
private:
static void ParseArrayAddressWork(GenTree* tree,
Compiler* comp,
target_ssize_t inputMul,
GenTree** pArr,
ValueNum* pInxVN,
target_ssize_t* pOffset);
};
/* gtArrLen -- array length (GT_ARR_LENGTH)
GT_ARR_LENGTH is used for "arr.length" */
struct GenTreeArrLen : public GenTreeUnOp
{
GenTree*& ArrRef()
{
return gtOp1;
} // the array address node
private:
int gtArrLenOffset; // constant to add to "gtArrRef" to get the address of the array length.
public:
inline int ArrLenOffset()
{
return gtArrLenOffset;
}
GenTreeArrLen(var_types type, GenTree* arrRef, int lenOffset)
: GenTreeUnOp(GT_ARR_LENGTH, type, arrRef), gtArrLenOffset(lenOffset)
{
}
#if DEBUGGABLE_GENTREE
GenTreeArrLen() : GenTreeUnOp()
{
}
#endif
};
// This takes:
// - a length value
// - an index value, and
// - the label to jump to if the index is out of range.
// - the "kind" of the throw block to branch to on failure
// It generates no result.
//
struct GenTreeBoundsChk : public GenTreeOp
{
BasicBlock* gtIndRngFailBB; // Basic block to jump to for index-out-of-range
SpecialCodeKind gtThrowKind; // Kind of throw block to branch to on failure
// Store some information about the array element type that was in the GT_INDEX node before morphing.
// Note that this information is also stored in the m_arrayInfoMap of the morphed IND node (that
// is marked with GTF_IND_ARR_INDEX), but that can be hard to find.
var_types gtInxType; // Save the GT_INDEX type
GenTreeBoundsChk(GenTree* index, GenTree* length, SpecialCodeKind kind)
: GenTreeOp(GT_BOUNDS_CHECK, TYP_VOID, index, length)
, gtIndRngFailBB(nullptr)
, gtThrowKind(kind)
, gtInxType(TYP_UNKNOWN)
{
gtFlags |= GTF_EXCEPT;
}
#if DEBUGGABLE_GENTREE
GenTreeBoundsChk() : GenTreeOp()
{
}
#endif
// If this check is against GT_ARR_LENGTH, returns array reference, else "NULL".
GenTree* GetArray() const
{
return GetArrayLength()->OperIs(GT_ARR_LENGTH) ? GetArrayLength()->AsArrLen()->ArrRef() : nullptr;
}
// The index expression.
GenTree* GetIndex() const
{
return gtOp1;
}
// An expression for the length.
GenTree* GetArrayLength() const
{
return gtOp2;
}
};
// GenTreeArrElem - bounds checked address (byref) of a general array element,
// for multidimensional arrays, or 1-d arrays with non-zero lower bounds.
//
struct GenTreeArrElem : public GenTree
{
GenTree* gtArrObj;
#define GT_ARR_MAX_RANK 3
GenTree* gtArrInds[GT_ARR_MAX_RANK]; // Indices
unsigned char gtArrRank; // Rank of the array
unsigned char gtArrElemSize; // !!! Caution, this is an "unsigned char", it is used only
// on the optimization path of array intrisics.
// It stores the size of array elements WHEN it can fit
// into an "unsigned char".
// This has caused VSW 571394.
var_types gtArrElemType; // The array element type
// Requires that "inds" is a pointer to an array of "rank" nodes for the indices.
GenTreeArrElem(
var_types type, GenTree* arr, unsigned char rank, unsigned char elemSize, var_types elemType, GenTree** inds)
: GenTree(GT_ARR_ELEM, type), gtArrObj(arr), gtArrRank(rank), gtArrElemSize(elemSize), gtArrElemType(elemType)
{
gtFlags |= (arr->gtFlags & GTF_ALL_EFFECT);
for (unsigned char i = 0; i < rank; i++)
{
gtArrInds[i] = inds[i];
gtFlags |= (inds[i]->gtFlags & GTF_ALL_EFFECT);
}
gtFlags |= GTF_EXCEPT;
}
#if DEBUGGABLE_GENTREE
GenTreeArrElem() : GenTree()
{
}
#endif
};
//--------------------------------------------
//
// GenTreeArrIndex (gtArrIndex): Expression to bounds-check the index for one dimension of a
// multi-dimensional or non-zero-based array., and compute the effective index
// (i.e. subtracting the lower bound).
//
// Notes:
// This node is similar in some ways to GenTreeBoundsChk, which ONLY performs the check.
// The reason that this node incorporates the check into the effective index computation is
// to avoid duplicating the codegen, as the effective index is required to compute the
// offset anyway.
// TODO-CQ: Enable optimization of the lower bound and length by replacing this:
// /--* <arrObj>
// +--* <index0>
// +--* ArrIndex[i, ]
// with something like:
// /--* <arrObj>
// /--* ArrLowerBound[i, ]
// | /--* <arrObj>
// +--* ArrLen[i, ] (either generalize GT_ARR_LENGTH or add a new node)
// +--* <index0>
// +--* ArrIndex[i, ]
// Which could, for example, be optimized to the following when known to be within bounds:
// /--* TempForLowerBoundDim0
// +--* <index0>
// +--* - (GT_SUB)
//
struct GenTreeArrIndex : public GenTreeOp
{
// The array object - may be any expression producing an Array reference, but is likely to be a lclVar.
GenTree*& ArrObj()
{
return gtOp1;
}
// The index expression - may be any integral expression.
GenTree*& IndexExpr()
{
return gtOp2;
}
unsigned char gtCurrDim; // The current dimension
unsigned char gtArrRank; // Rank of the array
var_types gtArrElemType; // The array element type
GenTreeArrIndex(var_types type,
GenTree* arrObj,
GenTree* indexExpr,
unsigned char currDim,
unsigned char arrRank,
var_types elemType)
: GenTreeOp(GT_ARR_INDEX, type, arrObj, indexExpr)
, gtCurrDim(currDim)
, gtArrRank(arrRank)
, gtArrElemType(elemType)
{
gtFlags |= GTF_EXCEPT;
}
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
// Used only for GenTree::GetVtableForOper()
GenTreeArrIndex() : GenTreeOp()
{
}
#endif
};
//--------------------------------------------
//
// GenTreeArrOffset (gtArrOffset): Expression to compute the accumulated offset for the address
// of an element of a multi-dimensional or non-zero-based array.
//
// Notes:
// The result of this expression is (gtOffset * dimSize) + gtIndex
// where dimSize is the length/stride/size of the dimension, and is obtained from gtArrObj.
// This node is generated in conjunction with the GenTreeArrIndex node, which computes the
// effective index for a single dimension. The sub-trees can be separately optimized, e.g.
// within a loop body where the expression for the 0th dimension may be invariant.
//
// Here is an example of how the tree might look for a two-dimension array reference:
// /--* const 0
// | /--* <arrObj>
// | +--* <index0>
// +--* ArrIndex[i, ]
// +--* <arrObj>
// /--| arrOffs[i, ]
// | +--* <arrObj>
// | +--* <index1>
// +--* ArrIndex[*,j]
// +--* <arrObj>
// /--| arrOffs[*,j]
// TODO-CQ: see comment on GenTreeArrIndex for how its representation may change. When that
// is done, we will also want to replace the <arrObj> argument to arrOffs with the
// ArrLen as for GenTreeArrIndex.
//
struct GenTreeArrOffs : public GenTree
{
GenTree* gtOffset; // The accumulated offset for lower dimensions - must be TYP_I_IMPL, and
// will either be a CSE temp, the constant 0, or another GenTreeArrOffs node.
GenTree* gtIndex; // The effective index for the current dimension - must be non-negative
// and can be any expression (though it is likely to be either a GenTreeArrIndex,
// node, a lclVar, or a constant).
GenTree* gtArrObj; // The array object - may be any expression producing an Array reference,
// but is likely to be a lclVar.
unsigned char gtCurrDim; // The current dimension
unsigned char gtArrRank; // Rank of the array
var_types gtArrElemType; // The array element type
GenTreeArrOffs(var_types type,
GenTree* offset,
GenTree* index,
GenTree* arrObj,
unsigned char currDim,
unsigned char rank,
var_types elemType)
: GenTree(GT_ARR_OFFSET, type)
, gtOffset(offset)
, gtIndex(index)
, gtArrObj(arrObj)
, gtCurrDim(currDim)
, gtArrRank(rank)
, gtArrElemType(elemType)
{
assert(index->gtFlags & GTF_EXCEPT);
gtFlags |= GTF_EXCEPT;
}
#if DEBUGGABLE_GENTREE
GenTreeArrOffs() : GenTree()
{
}
#endif
};
/* gtAddrMode -- Target-specific canonicalized addressing expression (GT_LEA) */
struct GenTreeAddrMode : public GenTreeOp
{
// Address is Base + Index*Scale + Offset.
// These are the legal patterns:
//
// Base // Base != nullptr && Index == nullptr && Scale == 0 && Offset == 0
// Base + Index*Scale // Base != nullptr && Index != nullptr && Scale != 0 && Offset == 0
// Base + Offset // Base != nullptr && Index == nullptr && Scale == 0 && Offset != 0
// Base + Index*Scale + Offset // Base != nullptr && Index != nullptr && Scale != 0 && Offset != 0
// Index*Scale // Base == nullptr && Index != nullptr && Scale > 1 && Offset == 0
// Index*Scale + Offset // Base == nullptr && Index != nullptr && Scale > 1 && Offset != 0
// Offset // Base == nullptr && Index == nullptr && Scale == 0 && Offset != 0
//
// So, for example:
// 1. Base + Index is legal with Scale==1
// 2. If Index is null, Scale should be zero (or unintialized / unused)
// 3. If Scale==1, then we should have "Base" instead of "Index*Scale", and "Base + Offset" instead of
// "Index*Scale + Offset".
// First operand is base address/pointer
bool HasBase() const
{
return gtOp1 != nullptr;
}
GenTree*& Base()
{
return gtOp1;
}
void SetBase(GenTree* base)
{
gtOp1 = base;
}
// Second operand is scaled index value
bool HasIndex() const
{
return gtOp2 != nullptr;
}
GenTree*& Index()
{
return gtOp2;
}
void SetIndex(GenTree* index)
{
gtOp2 = index;
}
unsigned GetScale() const
{
return gtScale;
}
void SetScale(unsigned scale)
{
gtScale = scale;
}
int Offset()
{
return static_cast<int>(gtOffset);
}
void SetOffset(int offset)
{
gtOffset = offset;
}
unsigned gtScale; // The scale factor
private:
ssize_t gtOffset; // The offset to add
public:
GenTreeAddrMode(var_types type, GenTree* base, GenTree* index, unsigned scale, ssize_t offset)
: GenTreeOp(GT_LEA, type, base, index)
{
assert(base != nullptr || index != nullptr);
gtScale = scale;
gtOffset = offset;
}
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
// Used only for GenTree::GetVtableForOper()
GenTreeAddrMode() : GenTreeOp()
{
}
#endif
};
// Indir is just an op, no additional data, but some additional abstractions
struct GenTreeIndir : public GenTreeOp
{
// The address for the indirection.
GenTree*& Addr()
{
return gtOp1;
}
void SetAddr(GenTree* addr)
{
assert(addr != nullptr);
assert(addr->TypeIs(TYP_I_IMPL, TYP_BYREF));
gtOp1 = addr;
}
// these methods provide an interface to the indirection node which
bool HasBase();
bool HasIndex();
GenTree* Base();
GenTree* Index();
unsigned Scale();
ssize_t Offset();
GenTreeIndir(genTreeOps oper, var_types type, GenTree* addr, GenTree* data) : GenTreeOp(oper, type, addr, data)
{
}
// True if this indirection is a volatile memory operation.
bool IsVolatile() const
{
return (gtFlags & GTF_IND_VOLATILE) != 0;
}
// True if this indirection is an unaligned memory operation.
bool IsUnaligned() const
{
return (gtFlags & GTF_IND_UNALIGNED) != 0;
}
#if DEBUGGABLE_GENTREE
// Used only for GenTree::GetVtableForOper()
GenTreeIndir() : GenTreeOp()
{
}
#else
// Used by XARCH codegen to construct temporary trees to pass to the emitter.
GenTreeIndir() : GenTreeOp(GT_NOP, TYP_UNDEF)
{
}
#endif
};
// gtBlk -- 'block' (GT_BLK, GT_STORE_BLK).
//
// This is the base type for all of the nodes that represent block or struct
// values.
// Since it can be a store, it includes gtBlkOpKind to specify the type of
// code generation that will be used for the block operation.
struct GenTreeBlk : public GenTreeIndir
{
private:
ClassLayout* m_layout;
public:
ClassLayout* GetLayout() const
{
return m_layout;
}
void SetLayout(ClassLayout* layout)
{
assert((layout != nullptr) || OperIs(GT_STORE_DYN_BLK));
m_layout = layout;
}
// The data to be stored (null for GT_BLK)
GenTree*& Data()
{
return gtOp2;
}
void SetData(GenTree* dataNode)
{
gtOp2 = dataNode;
}
// The size of the buffer to be copied.
unsigned Size() const
{
assert((m_layout != nullptr) || OperIs(GT_STORE_DYN_BLK));
return (m_layout != nullptr) ? m_layout->GetSize() : 0;
}
// Instruction selection: during codegen time, what code sequence we will be using
// to encode this operation.
enum
{
BlkOpKindInvalid,
#ifndef TARGET_X86
BlkOpKindHelper,
#endif
#ifdef TARGET_XARCH
BlkOpKindRepInstr,
#endif
BlkOpKindUnroll,
} gtBlkOpKind;
#ifndef JIT32_GCENCODER
bool gtBlkOpGcUnsafe;
#endif
#ifdef TARGET_XARCH
bool IsOnHeapAndContainsReferences()
{
return (m_layout != nullptr) && m_layout->HasGCPtr() && !Addr()->OperIsLocalAddr();
}
#endif
GenTreeBlk(genTreeOps oper, var_types type, GenTree* addr, ClassLayout* layout)
: GenTreeIndir(oper, type, addr, nullptr)
, m_layout(layout)
, gtBlkOpKind(BlkOpKindInvalid)
#ifndef JIT32_GCENCODER
, gtBlkOpGcUnsafe(false)
#endif
{
assert(OperIsBlk(oper));
assert((layout != nullptr) || OperIs(GT_STORE_DYN_BLK));
gtFlags |= (addr->gtFlags & GTF_ALL_EFFECT);
}
GenTreeBlk(genTreeOps oper, var_types type, GenTree* addr, GenTree* data, ClassLayout* layout)
: GenTreeIndir(oper, type, addr, data)
, m_layout(layout)
, gtBlkOpKind(BlkOpKindInvalid)
#ifndef JIT32_GCENCODER
, gtBlkOpGcUnsafe(false)
#endif
{
assert(OperIsBlk(oper));
assert((layout != nullptr) || OperIs(GT_STORE_DYN_BLK));
gtFlags |= (addr->gtFlags & GTF_ALL_EFFECT);
gtFlags |= (data->gtFlags & GTF_ALL_EFFECT);
}
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
GenTreeBlk() : GenTreeIndir()
{
}
#endif // DEBUGGABLE_GENTREE
};
// gtObj -- 'object' (GT_OBJ).
//
// This node is used for block values that may have GC pointers.
struct GenTreeObj : public GenTreeBlk
{
void Init()
{
// By default, an OBJ is assumed to be a global reference, unless it is local.
GenTreeLclVarCommon* lcl = Addr()->IsLocalAddrExpr();
if ((lcl == nullptr) || ((lcl->gtFlags & GTF_GLOB_EFFECT) != 0))
{
gtFlags |= GTF_GLOB_REF;
}
noway_assert(GetLayout()->GetClassHandle() != NO_CLASS_HANDLE);
}
GenTreeObj(var_types type, GenTree* addr, ClassLayout* layout) : GenTreeBlk(GT_OBJ, type, addr, layout)
{
Init();
}
GenTreeObj(var_types type, GenTree* addr, GenTree* data, ClassLayout* layout)
: GenTreeBlk(GT_STORE_OBJ, type, addr, data, layout)
{
Init();
}
#if DEBUGGABLE_GENTREE
GenTreeObj() : GenTreeBlk()
{
}
#endif
};
// GenTreeStoreDynBlk -- 'dynamic block store' (GT_STORE_DYN_BLK).
//
// This node is used to represent stores that have a dynamic size - the "cpblk" and "initblk"
// IL instructions are implemented with it. Note that such stores assume the input has no GC
// pointers in it, and as such do not ever use write barriers.
//
// The "Data()" member of this node will either be a "dummy" IND(struct) node, for "cpblk", or
// the zero constant/INIT_VAL for "initblk".
//
struct GenTreeStoreDynBlk : public GenTreeBlk
{
public:
GenTree* gtDynamicSize;
GenTreeStoreDynBlk(GenTree* dstAddr, GenTree* data, GenTree* dynamicSize)
: GenTreeBlk(GT_STORE_DYN_BLK, TYP_VOID, dstAddr, data, nullptr), gtDynamicSize(dynamicSize)
{
// Conservatively the 'dstAddr' could be null or point into the global heap.
// Likewise, this is a store and so must be marked with the GTF_ASG flag.
gtFlags |= (GTF_ASG | GTF_EXCEPT | GTF_GLOB_REF);
gtFlags |= (dynamicSize->gtFlags & GTF_ALL_EFFECT);
}
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
GenTreeStoreDynBlk() : GenTreeBlk()
{
}
#endif // DEBUGGABLE_GENTREE
};
// Read-modify-write status of a RMW memory op rooted at a storeInd
enum RMWStatus
{
STOREIND_RMW_STATUS_UNKNOWN, // RMW status of storeInd unknown
// Default status unless modified by IsRMWMemOpRootedAtStoreInd()
// One of these denote storeind is a RMW memory operation.
STOREIND_RMW_DST_IS_OP1, // StoreInd is known to be a RMW memory op and dst candidate is op1
STOREIND_RMW_DST_IS_OP2, // StoreInd is known to be a RMW memory op and dst candidate is op2
// One of these denote the reason for storeind is marked as non-RMW operation
STOREIND_RMW_UNSUPPORTED_ADDR, // Addr mode is not yet supported for RMW memory
STOREIND_RMW_UNSUPPORTED_OPER, // Operation is not supported for RMW memory
STOREIND_RMW_UNSUPPORTED_TYPE, // Type is not supported for RMW memory
STOREIND_RMW_INDIR_UNEQUAL // Indir to read value is not equivalent to indir that writes the value
};
#ifdef DEBUG
inline const char* RMWStatusDescription(RMWStatus status)
{
switch (status)
{
case STOREIND_RMW_STATUS_UNKNOWN:
return "RMW status unknown";
case STOREIND_RMW_DST_IS_OP1:
return "dst candidate is op1";
case STOREIND_RMW_DST_IS_OP2:
return "dst candidate is op2";
case STOREIND_RMW_UNSUPPORTED_ADDR:
return "address mode is not supported";
case STOREIND_RMW_UNSUPPORTED_OPER:
return "oper is not supported";
case STOREIND_RMW_UNSUPPORTED_TYPE:
return "type is not supported";
case STOREIND_RMW_INDIR_UNEQUAL:
return "read indir is not equivalent to write indir";
default:
unreached();
}
}
#endif
// StoreInd is just a BinOp, with additional RMW status
struct GenTreeStoreInd : public GenTreeIndir
{
#if !CPU_LOAD_STORE_ARCH
// The below flag is set and used during lowering
RMWStatus gtRMWStatus;
bool IsRMWStatusUnknown()
{
return gtRMWStatus == STOREIND_RMW_STATUS_UNKNOWN;
}
bool IsNonRMWMemoryOp()
{
return gtRMWStatus == STOREIND_RMW_UNSUPPORTED_ADDR || gtRMWStatus == STOREIND_RMW_UNSUPPORTED_OPER ||
gtRMWStatus == STOREIND_RMW_UNSUPPORTED_TYPE || gtRMWStatus == STOREIND_RMW_INDIR_UNEQUAL;
}
bool IsRMWMemoryOp()
{
return gtRMWStatus == STOREIND_RMW_DST_IS_OP1 || gtRMWStatus == STOREIND_RMW_DST_IS_OP2;
}
bool IsRMWDstOp1()
{
return gtRMWStatus == STOREIND_RMW_DST_IS_OP1;
}
bool IsRMWDstOp2()
{
return gtRMWStatus == STOREIND_RMW_DST_IS_OP2;
}
#endif //! CPU_LOAD_STORE_ARCH
RMWStatus GetRMWStatus()
{
#if !CPU_LOAD_STORE_ARCH
return gtRMWStatus;
#else
return STOREIND_RMW_STATUS_UNKNOWN;
#endif
}
void SetRMWStatusDefault()
{
#if !CPU_LOAD_STORE_ARCH
gtRMWStatus = STOREIND_RMW_STATUS_UNKNOWN;
#endif
}
void SetRMWStatus(RMWStatus status)
{
#if !CPU_LOAD_STORE_ARCH
gtRMWStatus = status;
#endif
}
GenTree*& Data()
{
return gtOp2;
}
GenTreeStoreInd(var_types type, GenTree* destPtr, GenTree* data) : GenTreeIndir(GT_STOREIND, type, destPtr, data)
{
SetRMWStatusDefault();
}
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
// Used only for GenTree::GetVtableForOper()
GenTreeStoreInd() : GenTreeIndir()
{
SetRMWStatusDefault();
}
#endif
};
/* gtRetExp -- Place holder for the return expression from an inline candidate (GT_RET_EXPR) */
struct GenTreeRetExpr : public GenTree
{
GenTree* gtInlineCandidate;
BasicBlockFlags bbFlags;
CORINFO_CLASS_HANDLE gtRetClsHnd;
GenTreeRetExpr(var_types type) : GenTree(GT_RET_EXPR, type)
{
}
#if DEBUGGABLE_GENTREE
GenTreeRetExpr() : GenTree()
{
}
#endif
};
// In LIR there are no longer statements so debug information is inserted linearly using these nodes.
struct GenTreeILOffset : public GenTree
{
DebugInfo gtStmtDI; // debug info
#ifdef DEBUG
IL_OFFSET gtStmtLastILoffs; // instr offset at end of stmt
#endif
GenTreeILOffset(const DebugInfo& di DEBUGARG(IL_OFFSET lastOffset = BAD_IL_OFFSET))
: GenTree(GT_IL_OFFSET, TYP_VOID)
, gtStmtDI(di)
#ifdef DEBUG
, gtStmtLastILoffs(lastOffset)
#endif
{
}
#if DEBUGGABLE_GENTREE
GenTreeILOffset() : GenTree(GT_IL_OFFSET, TYP_VOID)
{
}
#endif
};
// GenTreeList: adapter class for forward iteration of the execution order GenTree linked list
// using range-based `for`, normally used via Statement::TreeList(), e.g.:
// for (GenTree* const tree : stmt->TreeList()) ...
//
class GenTreeList
{
GenTree* m_trees;
// Forward iterator for the execution order GenTree linked list (using `gtNext` pointer).
//
class iterator
{
GenTree* m_tree;
public:
iterator(GenTree* tree) : m_tree(tree)
{
}
GenTree* operator*() const
{
return m_tree;
}
iterator& operator++()
{
m_tree = m_tree->gtNext;
return *this;
}
bool operator!=(const iterator& i) const
{
return m_tree != i.m_tree;
}
};
public:
GenTreeList(GenTree* trees) : m_trees(trees)
{
}
iterator begin() const
{
return iterator(m_trees);
}
iterator end() const
{
return iterator(nullptr);
}
};
// We use the following format when printing the Statement number: Statement->GetID()
// This define is used with string concatenation to put this in printf format strings (Note that %u means unsigned int)
#define FMT_STMT "STMT%05u"
struct Statement
{
public:
Statement(GenTree* expr DEBUGARG(unsigned stmtID))
: m_rootNode(expr)
, m_treeList(nullptr)
, m_next(nullptr)
, m_prev(nullptr)
#ifdef DEBUG
, m_lastILOffset(BAD_IL_OFFSET)
, m_stmtID(stmtID)
#endif
{
}
GenTree* GetRootNode() const
{
return m_rootNode;
}
GenTree** GetRootNodePointer()
{
return &m_rootNode;
}
void SetRootNode(GenTree* treeRoot)
{
m_rootNode = treeRoot;
}
GenTree* GetTreeList() const
{
return m_treeList;
}
void SetTreeList(GenTree* treeHead)
{
m_treeList = treeHead;
}
// TreeList: convenience method for enabling range-based `for` iteration over the
// execution order of the GenTree linked list, e.g.:
// for (GenTree* const tree : stmt->TreeList()) ...
//
GenTreeList TreeList() const
{
return GenTreeList(GetTreeList());
}
const DebugInfo& GetDebugInfo() const
{
return m_debugInfo;
}
void SetDebugInfo(const DebugInfo& di)
{
m_debugInfo = di;
di.Validate();
}
#ifdef DEBUG
IL_OFFSET GetLastILOffset() const
{
return m_lastILOffset;
}
void SetLastILOffset(IL_OFFSET lastILOffset)
{
m_lastILOffset = lastILOffset;
}
unsigned GetID() const
{
return m_stmtID;
}
#endif // DEBUG
Statement* GetNextStmt() const
{
return m_next;
}
void SetNextStmt(Statement* nextStmt)
{
m_next = nextStmt;
}
Statement* GetPrevStmt() const
{
return m_prev;
}
void SetPrevStmt(Statement* prevStmt)
{
m_prev = prevStmt;
}
bool IsPhiDefnStmt() const
{
return m_rootNode->IsPhiDefn();
}
unsigned char GetCostSz() const
{
return m_rootNode->GetCostSz();
}
unsigned char GetCostEx() const
{
return m_rootNode->GetCostEx();
}
private:
// The root of the expression tree.
// Note: It will be the last node in evaluation order.
GenTree* m_rootNode;
// The tree list head (for forward walks in evaluation order).
// The value is `nullptr` until we have set the sequencing of the nodes.
GenTree* m_treeList;
// The statement nodes are doubly-linked. The first statement node in a block points
// to the last node in the block via its `m_prev` link. Note that the last statement node
// does not point to the first: it has `m_next == nullptr`; that is, the list is not fully circular.
Statement* m_next;
Statement* m_prev;
DebugInfo m_debugInfo;
#ifdef DEBUG
IL_OFFSET m_lastILOffset; // The instr offset at the end of this statement.
unsigned m_stmtID;
#endif
};
// StatementList: adapter class for forward iteration of the statement linked list using range-based `for`,
// normally used via BasicBlock::Statements(), e.g.:
// for (Statement* const stmt : block->Statements()) ...
// or:
// for (Statement* const stmt : block->NonPhiStatements()) ...
//
class StatementList
{
Statement* m_stmts;
// Forward iterator for the statement linked list.
//
class iterator
{
Statement* m_stmt;
public:
iterator(Statement* stmt) : m_stmt(stmt)
{
}
Statement* operator*() const
{
return m_stmt;
}
iterator& operator++()
{
m_stmt = m_stmt->GetNextStmt();
return *this;
}
bool operator!=(const iterator& i) const
{
return m_stmt != i.m_stmt;
}
};
public:
StatementList(Statement* stmts) : m_stmts(stmts)
{
}
iterator begin() const
{
return iterator(m_stmts);
}
iterator end() const
{
return iterator(nullptr);
}
};
/* NOTE: Any tree nodes that are larger than 8 bytes (two ints or
pointers) must be flagged as 'large' in GenTree::InitNodeSize().
*/
/* AsClsVar() -- 'static data member' (GT_CLS_VAR) */
struct GenTreeClsVar : public GenTree
{
CORINFO_FIELD_HANDLE gtClsVarHnd;
FieldSeqNode* gtFieldSeq;
GenTreeClsVar(var_types type, CORINFO_FIELD_HANDLE clsVarHnd, FieldSeqNode* fldSeq)
: GenTree(GT_CLS_VAR, type), gtClsVarHnd(clsVarHnd), gtFieldSeq(fldSeq)
{
gtFlags |= GTF_GLOB_REF;
}
GenTreeClsVar(genTreeOps oper, var_types type, CORINFO_FIELD_HANDLE clsVarHnd, FieldSeqNode* fldSeq)
: GenTree(oper, type), gtClsVarHnd(clsVarHnd), gtFieldSeq(fldSeq)
{
assert((oper == GT_CLS_VAR) || (oper == GT_CLS_VAR_ADDR));
gtFlags |= GTF_GLOB_REF;
}
#if DEBUGGABLE_GENTREE
GenTreeClsVar() : GenTree()
{
}
#endif
};
/* gtArgPlace -- 'register argument placeholder' (GT_ARGPLACE) */
struct GenTreeArgPlace : public GenTree
{
CORINFO_CLASS_HANDLE gtArgPlaceClsHnd; // Needed when we have a TYP_STRUCT argument
GenTreeArgPlace(var_types type, CORINFO_CLASS_HANDLE clsHnd) : GenTree(GT_ARGPLACE, type), gtArgPlaceClsHnd(clsHnd)
{
}
#if DEBUGGABLE_GENTREE
GenTreeArgPlace() : GenTree()
{
}
#endif
};
/* gtPhiArg -- phi node rhs argument, var = phi(phiarg, phiarg, phiarg...); GT_PHI_ARG */
struct GenTreePhiArg : public GenTreeLclVarCommon
{
BasicBlock* gtPredBB;
GenTreePhiArg(var_types type, unsigned lclNum, unsigned ssaNum, BasicBlock* block)
: GenTreeLclVarCommon(GT_PHI_ARG, type, lclNum), gtPredBB(block)
{
SetSsaNum(ssaNum);
}
#if DEBUGGABLE_GENTREE
GenTreePhiArg() : GenTreeLclVarCommon()
{
}
#endif
};
/* gtPutArgStk -- Argument passed on stack (GT_PUTARG_STK) */
struct GenTreePutArgStk : public GenTreeUnOp
{
private:
unsigned m_byteOffset;
#ifdef FEATURE_PUT_STRUCT_ARG_STK
unsigned m_byteSize; // The number of bytes that this argument is occupying on the stack with padding.
#endif
public:
#if defined(DEBUG_ARG_SLOTS)
unsigned gtSlotNum; // Slot number of the argument to be passed on stack
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned gtNumSlots; // Number of slots for the argument to be passed on stack
#endif
#endif
#if defined(UNIX_X86_ABI)
unsigned gtPadAlign; // Number of padding slots for stack alignment
#endif
#if defined(DEBUG) || defined(UNIX_X86_ABI)
GenTreeCall* gtCall; // the call node to which this argument belongs
#endif
#if FEATURE_FASTTAILCALL
bool gtPutInIncomingArgArea; // Whether this arg needs to be placed in incoming arg area.
// By default this is false and will be placed in out-going arg area.
// Fast tail calls set this to true.
// In future if we need to add more such bool fields consider bit fields.
#endif
#ifdef FEATURE_PUT_STRUCT_ARG_STK
// Instruction selection: during codegen time, what code sequence we will be using
// to encode this operation.
// TODO-Throughput: The following information should be obtained from the child
// block node.
enum class Kind : __int8{
Invalid, RepInstr, PartialRepInstr, Unroll, Push,
};
Kind gtPutArgStkKind;
#endif
GenTreePutArgStk(genTreeOps oper,
var_types type,
GenTree* op1,
unsigned stackByteOffset,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned stackByteSize,
#endif
#if defined(DEBUG_ARG_SLOTS)
unsigned slotNum,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned numSlots,
#endif
#endif
GenTreeCall* callNode,
bool putInIncomingArgArea)
: GenTreeUnOp(oper, type, op1 DEBUGARG(/*largeNode*/ false))
, m_byteOffset(stackByteOffset)
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
, m_byteSize(stackByteSize)
#endif
#if defined(DEBUG_ARG_SLOTS)
, gtSlotNum(slotNum)
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
, gtNumSlots(numSlots)
#endif
#endif
#if defined(UNIX_X86_ABI)
, gtPadAlign(0)
#endif
#if defined(DEBUG) || defined(UNIX_X86_ABI)
, gtCall(callNode)
#endif
#if FEATURE_FASTTAILCALL
, gtPutInIncomingArgArea(putInIncomingArgArea)
#endif // FEATURE_FASTTAILCALL
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
, gtPutArgStkKind(Kind::Invalid)
#endif
{
DEBUG_ARG_SLOTS_ASSERT(m_byteOffset == slotNum * TARGET_POINTER_SIZE);
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
DEBUG_ARG_SLOTS_ASSERT(m_byteSize == gtNumSlots * TARGET_POINTER_SIZE);
#endif
}
GenTree*& Data()
{
return gtOp1;
}
#if FEATURE_FASTTAILCALL
bool putInIncomingArgArea() const
{
return gtPutInIncomingArgArea;
}
#else // !FEATURE_FASTTAILCALL
bool putInIncomingArgArea() const
{
return false;
}
#endif // !FEATURE_FASTTAILCALL
unsigned getArgOffset() const
{
DEBUG_ARG_SLOTS_ASSERT(m_byteOffset / TARGET_POINTER_SIZE == gtSlotNum);
DEBUG_ARG_SLOTS_ASSERT(m_byteOffset % TARGET_POINTER_SIZE == 0);
return m_byteOffset;
}
#if defined(UNIX_X86_ABI)
unsigned getArgPadding() const
{
return gtPadAlign;
}
void setArgPadding(unsigned padAlign)
{
gtPadAlign = padAlign;
}
#endif
#ifdef FEATURE_PUT_STRUCT_ARG_STK
unsigned GetStackByteSize() const
{
return m_byteSize;
}
// Return true if this is a PutArgStk of a SIMD12 struct.
// This is needed because such values are re-typed to SIMD16, and the type of PutArgStk is VOID.
unsigned isSIMD12() const
{
return (varTypeIsSIMD(gtOp1) && (GetStackByteSize() == 12));
}
bool isPushKind() const
{
return gtPutArgStkKind == Kind::Push;
}
#else // !FEATURE_PUT_STRUCT_ARG_STK
unsigned GetStackByteSize() const;
#endif // !FEATURE_PUT_STRUCT_ARG_STK
#if DEBUGGABLE_GENTREE
GenTreePutArgStk() : GenTreeUnOp()
{
}
#endif
};
#if FEATURE_ARG_SPLIT
// Represent the struct argument: split value in register(s) and stack
struct GenTreePutArgSplit : public GenTreePutArgStk
{
unsigned gtNumRegs;
GenTreePutArgSplit(GenTree* op1,
unsigned stackByteOffset,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned stackByteSize,
#endif
#if defined(DEBUG_ARG_SLOTS)
unsigned slotNum,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned numSlots,
#endif
#endif
unsigned numRegs,
GenTreeCall* callNode,
bool putIncomingArgArea)
: GenTreePutArgStk(GT_PUTARG_SPLIT,
TYP_STRUCT,
op1,
stackByteOffset,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
stackByteSize,
#endif
#if defined(DEBUG_ARG_SLOTS)
slotNum,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
numSlots,
#endif
#endif
callNode,
putIncomingArgArea)
, gtNumRegs(numRegs)
{
ClearOtherRegs();
ClearOtherRegFlags();
}
// Type required to support multi-reg struct arg.
var_types m_regType[MAX_REG_ARG];
// First reg of struct is always given by GetRegNum().
// gtOtherRegs holds the other reg numbers of struct.
regNumberSmall gtOtherRegs[MAX_REG_ARG - 1];
MultiRegSpillFlags gtSpillFlags;
//---------------------------------------------------------------------------
// GetRegNumByIdx: get i'th register allocated to this struct argument.
//
// Arguments:
// idx - index of the struct
//
// Return Value:
// Return regNumber of i'th register of this struct argument
//
regNumber GetRegNumByIdx(unsigned idx) const
{
assert(idx < MAX_REG_ARG);
if (idx == 0)
{
return GetRegNum();
}
return (regNumber)gtOtherRegs[idx - 1];
}
//----------------------------------------------------------------------
// SetRegNumByIdx: set i'th register of this struct argument
//
// Arguments:
// reg - reg number
// idx - index of the struct
//
// Return Value:
// None
//
void SetRegNumByIdx(regNumber reg, unsigned idx)
{
assert(idx < MAX_REG_ARG);
if (idx == 0)
{
SetRegNum(reg);
}
else
{
gtOtherRegs[idx - 1] = (regNumberSmall)reg;
assert(gtOtherRegs[idx - 1] == reg);
}
}
//----------------------------------------------------------------------------
// ClearOtherRegs: clear multi-reg state to indicate no regs are allocated
//
// Arguments:
// None
//
// Return Value:
// None
//
void ClearOtherRegs()
{
for (unsigned i = 0; i < MAX_REG_ARG - 1; ++i)
{
gtOtherRegs[i] = REG_NA;
}
}
GenTreeFlags GetRegSpillFlagByIdx(unsigned idx) const
{
return GetMultiRegSpillFlagsByIdx(gtSpillFlags, idx);
}
void SetRegSpillFlagByIdx(GenTreeFlags flags, unsigned idx)
{
#if FEATURE_MULTIREG_RET
gtSpillFlags = SetMultiRegSpillFlagsByIdx(gtSpillFlags, flags, idx);
#endif
}
//--------------------------------------------------------------------------
// GetRegType: Get var_type of the register specified by index.
//
// Arguments:
// index - Index of the register.
// First register will have an index 0 and so on.
//
// Return Value:
// var_type of the register specified by its index.
var_types GetRegType(unsigned index) const
{
assert(index < gtNumRegs);
var_types result = m_regType[index];
return result;
}
//-------------------------------------------------------------------
// clearOtherRegFlags: clear GTF_* flags associated with gtOtherRegs
//
// Arguments:
// None
//
// Return Value:
// None
//
void ClearOtherRegFlags()
{
gtSpillFlags = 0;
}
#if DEBUGGABLE_GENTREE
GenTreePutArgSplit() : GenTreePutArgStk()
{
}
#endif
};
#endif // FEATURE_ARG_SPLIT
// Represents GT_COPY or GT_RELOAD node
//
// As it turns out, these are only needed on targets that happen to have multi-reg returns.
// However, they are actually needed on any target that has any multi-reg ops. It is just
// coincidence that those are the same (and there isn't a FEATURE_MULTIREG_OPS).
//
struct GenTreeCopyOrReload : public GenTreeUnOp
{
#if FEATURE_MULTIREG_RET
// State required to support copy/reload of a multi-reg call node.
// The first register is always given by GetRegNum().
//
regNumberSmall gtOtherRegs[MAX_RET_REG_COUNT - 1];
#endif
//----------------------------------------------------------
// ClearOtherRegs: set gtOtherRegs to REG_NA.
//
// Arguments:
// None
//
// Return Value:
// None
//
void ClearOtherRegs()
{
#if FEATURE_MULTIREG_RET
for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
{
gtOtherRegs[i] = REG_NA;
}
#endif
}
//-----------------------------------------------------------
// GetRegNumByIdx: Get regNumber of i'th position.
//
// Arguments:
// idx - register position.
//
// Return Value:
// Returns regNumber assigned to i'th position.
//
regNumber GetRegNumByIdx(unsigned idx) const
{
assert(idx < MAX_RET_REG_COUNT);
if (idx == 0)
{
return GetRegNum();
}
#if FEATURE_MULTIREG_RET
return (regNumber)gtOtherRegs[idx - 1];
#else
return REG_NA;
#endif
}
//-----------------------------------------------------------
// SetRegNumByIdx: Set the regNumber for i'th position.
//
// Arguments:
// reg - reg number
// idx - register position.
//
// Return Value:
// None.
//
void SetRegNumByIdx(regNumber reg, unsigned idx)
{
assert(idx < MAX_RET_REG_COUNT);
if (idx == 0)
{
SetRegNum(reg);
}
#if FEATURE_MULTIREG_RET
else
{
gtOtherRegs[idx - 1] = (regNumberSmall)reg;
assert(gtOtherRegs[idx - 1] == reg);
}
#else
else
{
unreached();
}
#endif
}
//----------------------------------------------------------------------------
// CopyOtherRegs: copy multi-reg state from the given copy/reload node to this
// node.
//
// Arguments:
// from - GenTree node from which to copy multi-reg state
//
// Return Value:
// None
//
// TODO-ARM: Implement this routine for Arm64 and Arm32
// TODO-X86: Implement this routine for x86
void CopyOtherRegs(GenTreeCopyOrReload* from)
{
assert(OperGet() == from->OperGet());
#ifdef UNIX_AMD64_ABI
for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
{
gtOtherRegs[i] = from->gtOtherRegs[i];
}
#endif
}
unsigned GetRegCount() const
{
#if FEATURE_MULTIREG_RET
// We need to return the highest index for which we have a valid register.
// Note that the gtOtherRegs array is off by one (the 0th register is GetRegNum()).
// If there's no valid register in gtOtherRegs, GetRegNum() must be valid.
// Note that for most nodes, the set of valid registers must be contiguous,
// but for COPY or RELOAD there is only a valid register for the register positions
// that must be copied or reloaded.
//
for (unsigned i = MAX_RET_REG_COUNT; i > 1; i--)
{
if (gtOtherRegs[i - 2] != REG_NA)
{
return i;
}
}
#endif
// We should never have a COPY or RELOAD with no valid registers.
assert(GetRegNum() != REG_NA);
return 1;
}
GenTreeCopyOrReload(genTreeOps oper, var_types type, GenTree* op1) : GenTreeUnOp(oper, type, op1)
{
assert(type != TYP_STRUCT || op1->IsMultiRegNode());
SetRegNum(REG_NA);
ClearOtherRegs();
}
#if DEBUGGABLE_GENTREE
GenTreeCopyOrReload() : GenTreeUnOp()
{
}
#endif
};
// Represents GT_ALLOCOBJ node
struct GenTreeAllocObj final : public GenTreeUnOp
{
unsigned int gtNewHelper; // Value returned by ICorJitInfo::getNewHelper
bool gtHelperHasSideEffects;
CORINFO_CLASS_HANDLE gtAllocObjClsHnd;
#ifdef FEATURE_READYTORUN
CORINFO_CONST_LOOKUP gtEntryPoint;
#endif
GenTreeAllocObj(
var_types type, unsigned int helper, bool helperHasSideEffects, CORINFO_CLASS_HANDLE clsHnd, GenTree* op)
: GenTreeUnOp(GT_ALLOCOBJ, type, op DEBUGARG(/*largeNode*/ TRUE))
, // This node in most cases will be changed to a call node
gtNewHelper(helper)
, gtHelperHasSideEffects(helperHasSideEffects)
, gtAllocObjClsHnd(clsHnd)
{
#ifdef FEATURE_READYTORUN
gtEntryPoint.addr = nullptr;
#endif
}
#if DEBUGGABLE_GENTREE
GenTreeAllocObj() : GenTreeUnOp()
{
}
#endif
};
// Represents GT_RUNTIMELOOKUP node
struct GenTreeRuntimeLookup final : public GenTreeUnOp
{
CORINFO_GENERIC_HANDLE gtHnd;
CorInfoGenericHandleType gtHndType;
GenTreeRuntimeLookup(CORINFO_GENERIC_HANDLE hnd, CorInfoGenericHandleType hndTyp, GenTree* tree)
: GenTreeUnOp(GT_RUNTIMELOOKUP, tree->gtType, tree DEBUGARG(/*largeNode*/ FALSE)), gtHnd(hnd), gtHndType(hndTyp)
{
assert(hnd != nullptr);
}
#if DEBUGGABLE_GENTREE
GenTreeRuntimeLookup() : GenTreeUnOp()
{
}
#endif
// Return reference to the actual tree that does the lookup
GenTree*& Lookup()
{
return gtOp1;
}
bool IsClassHandle() const
{
return gtHndType == CORINFO_HANDLETYPE_CLASS;
}
bool IsMethodHandle() const
{
return gtHndType == CORINFO_HANDLETYPE_METHOD;
}
bool IsFieldHandle() const
{
return gtHndType == CORINFO_HANDLETYPE_FIELD;
}
// Note these operations describe the handle that is input to the
// lookup, not the handle produced by the lookup.
CORINFO_CLASS_HANDLE GetClassHandle() const
{
assert(IsClassHandle());
return (CORINFO_CLASS_HANDLE)gtHnd;
}
CORINFO_METHOD_HANDLE GetMethodHandle() const
{
assert(IsMethodHandle());
return (CORINFO_METHOD_HANDLE)gtHnd;
}
CORINFO_FIELD_HANDLE GetFieldHandle() const
{
assert(IsMethodHandle());
return (CORINFO_FIELD_HANDLE)gtHnd;
}
};
// Represents the condition of a GT_JCC or GT_SETCC node.
struct GenCondition
{
// clang-format off
enum Code : unsigned char
{
OperMask = 7,
Unsigned = 8,
Unordered = Unsigned,
Float = 16,
// 0 would be the encoding of "signed EQ" but since equality is sign insensitive
// we'll use 0 as invalid/uninitialized condition code. This will also leave 1
// as a spare code.
NONE = 0,
SLT = 2,
SLE = 3,
SGE = 4,
SGT = 5,
S = 6,
NS = 7,
EQ = Unsigned | 0, // = 8
NE = Unsigned | 1, // = 9
ULT = Unsigned | SLT, // = 10
ULE = Unsigned | SLE, // = 11
UGE = Unsigned | SGE, // = 12
UGT = Unsigned | SGT, // = 13
C = Unsigned | S, // = 14
NC = Unsigned | NS, // = 15
FEQ = Float | 0, // = 16
FNE = Float | 1, // = 17
FLT = Float | SLT, // = 18
FLE = Float | SLE, // = 19
FGE = Float | SGE, // = 20
FGT = Float | SGT, // = 21
O = Float | S, // = 22
NO = Float | NS, // = 23
FEQU = Unordered | FEQ, // = 24
FNEU = Unordered | FNE, // = 25
FLTU = Unordered | FLT, // = 26
FLEU = Unordered | FLE, // = 27
FGEU = Unordered | FGE, // = 28
FGTU = Unordered | FGT, // = 29
P = Unordered | O, // = 30
NP = Unordered | NO, // = 31
};
// clang-format on
private:
Code m_code;
public:
Code GetCode() const
{
return m_code;
}
bool IsFlag() const
{
return (m_code & OperMask) >= S;
}
bool IsUnsigned() const
{
return (ULT <= m_code) && (m_code <= UGT);
}
bool IsFloat() const
{
return !IsFlag() && (m_code & Float) != 0;
}
bool IsUnordered() const
{
return !IsFlag() && (m_code & (Float | Unordered)) == (Float | Unordered);
}
bool Is(Code cond) const
{
return m_code == cond;
}
template <typename... TRest>
bool Is(Code c, TRest... rest) const
{
return Is(c) || Is(rest...);
}
// Indicate whether the condition should be swapped in order to avoid generating
// multiple branches. This happens for certain floating point conditions on XARCH,
// see GenConditionDesc and its associated mapping table for more details.
bool PreferSwap() const
{
#ifdef TARGET_XARCH
return Is(GenCondition::FLT, GenCondition::FLE, GenCondition::FGTU, GenCondition::FGEU);
#else
return false;
#endif
}
const char* Name() const
{
// clang-format off
static const char* names[]
{
"NONE", "???", "SLT", "SLE", "SGE", "SGT", "S", "NS",
"UEQ", "UNE", "ULT", "ULE", "UGE", "UGT", "C", "NC",
"FEQ", "FNE", "FLT", "FLE", "FGE", "FGT", "O", "NO",
"FEQU", "FNEU", "FLTU", "FLEU", "FGEU", "FGTU", "P", "NP"
};
// clang-format on
assert(m_code < ArrLen(names));
return names[m_code];
}
GenCondition() : m_code()
{
}
GenCondition(Code cond) : m_code(cond)
{
}
static_assert((GT_NE - GT_EQ) == (NE & ~Unsigned), "bad relop");
static_assert((GT_LT - GT_EQ) == SLT, "bad relop");
static_assert((GT_LE - GT_EQ) == SLE, "bad relop");
static_assert((GT_GE - GT_EQ) == SGE, "bad relop");
static_assert((GT_GT - GT_EQ) == SGT, "bad relop");
static_assert((GT_TEST_NE - GT_TEST_EQ) == (NE & ~Unsigned), "bad relop");
static GenCondition FromRelop(GenTree* relop)
{
assert(relop->OperIsCompare());
if (varTypeIsFloating(relop->gtGetOp1()))
{
return FromFloatRelop(relop);
}
else
{
return FromIntegralRelop(relop);
}
}
static GenCondition FromFloatRelop(GenTree* relop)
{
assert(varTypeIsFloating(relop->gtGetOp1()) && varTypeIsFloating(relop->gtGetOp2()));
return FromFloatRelop(relop->OperGet(), (relop->gtFlags & GTF_RELOP_NAN_UN) != 0);
}
static GenCondition FromFloatRelop(genTreeOps oper, bool isUnordered)
{
assert(GenTree::OperIsCompare(oper));
unsigned code = oper - GT_EQ;
assert(code <= SGT);
code |= Float;
if (isUnordered)
{
code |= Unordered;
}
return GenCondition(static_cast<Code>(code));
}
static GenCondition FromIntegralRelop(GenTree* relop)
{
assert(!varTypeIsFloating(relop->gtGetOp1()) && !varTypeIsFloating(relop->gtGetOp2()));
return FromIntegralRelop(relop->OperGet(), relop->IsUnsigned());
}
static GenCondition FromIntegralRelop(genTreeOps oper, bool isUnsigned)
{
assert(GenTree::OperIsCompare(oper));
// GT_TEST_EQ/NE are special, they need to be mapped as GT_EQ/NE
unsigned code = oper - ((oper >= GT_TEST_EQ) ? GT_TEST_EQ : GT_EQ);
if (isUnsigned || (code <= 1)) // EQ/NE are treated as unsigned
{
code |= Unsigned;
}
return GenCondition(static_cast<Code>(code));
}
static GenCondition Reverse(GenCondition condition)
{
// clang-format off
static const Code reverse[]
{
// EQ NE LT LE GE GT F NF
NONE, NONE, SGE, SGT, SLT, SLE, NS, S,
NE, EQ, UGE, UGT, ULT, ULE, NC, C,
FNEU, FEQU, FGEU, FGTU, FLTU, FLEU, NO, O,
FNE, FEQ, FGE, FGT, FLT, FGT, NP, P
};
// clang-format on
assert(condition.m_code < ArrLen(reverse));
return GenCondition(reverse[condition.m_code]);
}
static GenCondition Swap(GenCondition condition)
{
// clang-format off
static const Code swap[]
{
// EQ NE LT LE GE GT F NF
NONE, NONE, SGT, SGE, SLE, SLT, S, NS,
EQ, NE, UGT, UGE, ULE, ULT, C, NC,
FEQ, FNE, FGT, FGE, FLE, FLT, O, NO,
FEQU, FNEU, FGTU, FGEU, FLEU, FLTU, P, NP
};
// clang-format on
assert(condition.m_code < ArrLen(swap));
return GenCondition(swap[condition.m_code]);
}
};
// Represents a GT_JCC or GT_SETCC node.
struct GenTreeCC final : public GenTree
{
GenCondition gtCondition;
GenTreeCC(genTreeOps oper, GenCondition condition, var_types type = TYP_VOID)
: GenTree(oper, type DEBUGARG(/*largeNode*/ FALSE)), gtCondition(condition)
{
assert(OperIs(GT_JCC, GT_SETCC));
}
#if DEBUGGABLE_GENTREE
GenTreeCC() : GenTree()
{
}
#endif // DEBUGGABLE_GENTREE
};
//------------------------------------------------------------------------
// Deferred inline functions of GenTree -- these need the subtypes above to
// be defined already.
//------------------------------------------------------------------------
inline bool GenTree::OperIsBlkOp()
{
return ((gtOper == GT_ASG) && varTypeIsStruct(AsOp()->gtOp1)) || OperIsStoreBlk();
}
inline bool GenTree::OperIsInitBlkOp()
{
if (!OperIsBlkOp())
{
return false;
}
GenTree* src;
if (gtOper == GT_ASG)
{
src = gtGetOp2();
}
else
{
src = AsBlk()->Data()->gtSkipReloadOrCopy();
}
return src->OperIsInitVal() || src->OperIsConst();
}
inline bool GenTree::OperIsCopyBlkOp()
{
return OperIsBlkOp() && !OperIsInitBlkOp();
}
//------------------------------------------------------------------------
// IsFPZero: Checks whether this is a floating point constant with value 0.0
//
// Return Value:
// Returns true iff the tree is an GT_CNS_DBL, with value of 0.0.
inline bool GenTree::IsFPZero() const
{
if ((gtOper == GT_CNS_DBL) && (AsDblCon()->gtDconVal == 0.0))
{
return true;
}
return false;
}
//------------------------------------------------------------------------
// IsIntegralConst: Checks whether this is a constant node with the given value
//
// Arguments:
// constVal - the value of interest
//
// Return Value:
// Returns true iff the tree is an integral constant opcode, with
// the given value.
//
// Notes:
// Like gtIconVal, the argument is of ssize_t, so cannot check for
// long constants in a target-independent way.
inline bool GenTree::IsIntegralConst(ssize_t constVal) const
{
if ((gtOper == GT_CNS_INT) && (AsIntConCommon()->IconValue() == constVal))
{
return true;
}
if ((gtOper == GT_CNS_LNG) && (AsIntConCommon()->LngValue() == constVal))
{
return true;
}
return false;
}
//-------------------------------------------------------------------
// IsIntegralConstVector: returns true if this this is a SIMD vector
// with all its elements equal to an integral constant.
//
// Arguments:
// constVal - const value of vector element
//
// Returns:
// True if this represents an integral const SIMD vector.
//
inline bool GenTree::IsIntegralConstVector(ssize_t constVal) const
{
#ifdef FEATURE_SIMD
// SIMDIntrinsicInit intrinsic with a const value as initializer
// represents a const vector.
if ((gtOper == GT_SIMD) && (AsSIMD()->GetSIMDIntrinsicId() == SIMDIntrinsicInit) &&
AsSIMD()->Op(1)->IsIntegralConst(constVal))
{
assert(varTypeIsIntegral(AsSIMD()->GetSimdBaseType()));
assert(AsSIMD()->GetOperandCount() == 1);
return true;
}
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
if (gtOper == GT_HWINTRINSIC)
{
const GenTreeHWIntrinsic* node = AsHWIntrinsic();
if (!varTypeIsIntegral(node->GetSimdBaseType()))
{
// Can't be an integral constant
return false;
}
NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
if ((node->GetOperandCount() == 0) && (constVal == 0))
{
#if defined(TARGET_XARCH)
return (intrinsicId == NI_Vector128_get_Zero) || (intrinsicId == NI_Vector256_get_Zero);
#elif defined(TARGET_ARM64)
return (intrinsicId == NI_Vector64_get_Zero) || (intrinsicId == NI_Vector128_get_Zero);
#endif // !TARGET_XARCH && !TARGET_ARM64
}
else if ((node->GetOperandCount() == 1) && node->Op(1)->IsIntegralConst(constVal))
{
#if defined(TARGET_XARCH)
return (intrinsicId == NI_Vector128_Create) || (intrinsicId == NI_Vector256_Create);
#elif defined(TARGET_ARM64)
return (intrinsicId == NI_Vector64_Create) || (intrinsicId == NI_Vector128_Create);
#endif // !TARGET_XARCH && !TARGET_ARM64
}
}
#endif // FEATURE_HW_INTRINSICS
return false;
}
//-------------------------------------------------------------------
// IsSIMDZero: returns true if this this is a SIMD vector
// with all its elements equal to zero.
//
// Returns:
// True if this represents an integral const SIMD vector.
//
inline bool GenTree::IsSIMDZero() const
{
#ifdef FEATURE_SIMD
if ((gtOper == GT_SIMD) && (AsSIMD()->GetSIMDIntrinsicId() == SIMDIntrinsicInit))
{
return (AsSIMD()->Op(1)->IsIntegralConst(0) || AsSIMD()->Op(1)->IsFPZero());
}
#endif
return false;
}
//-------------------------------------------------------------------
// IsFloatPositiveZero: returns true if this is exactly a const float value of postive zero (+0.0)
//
// Returns:
// True if this represents a const floating-point value of exactly positive zero (+0.0).
// Will return false if the value is negative zero (-0.0).
//
inline bool GenTree::IsFloatPositiveZero() const
{
if (IsCnsFltOrDbl())
{
// This implementation is almost identical to IsCnsNonZeroFltOrDbl
// but it is easier to parse out
// rather than using !IsCnsNonZeroFltOrDbl.
double constValue = AsDblCon()->gtDconVal;
return *(__int64*)&constValue == 0;
}
return false;
}
//-------------------------------------------------------------------
// IsVectorZero: returns true if this node is a HWIntrinsic that is Vector*_get_Zero.
//
// Returns:
// True if this represents a HWIntrinsic node that is Vector*_get_Zero.
//
// TODO: We already have IsSIMDZero() and IsIntegralConstVector(0),
// however, IsSIMDZero() does not cover hardware intrinsics, and IsIntegralConstVector(0) does not cover floating
// point. In order to not risk adverse behaviour by modifying those, this function 'IsVectorZero' was introduced.
// At some point, it makes sense to normalize this logic to be a single function call rather than have several
// separate ones; preferably this one.
inline bool GenTree::IsVectorZero() const
{
#ifdef FEATURE_HW_INTRINSICS
if (gtOper == GT_HWINTRINSIC)
{
const GenTreeHWIntrinsic* node = AsHWIntrinsic();
const NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
#if defined(TARGET_XARCH)
return (intrinsicId == NI_Vector128_get_Zero) || (intrinsicId == NI_Vector256_get_Zero);
#elif defined(TARGET_ARM64)
return (intrinsicId == NI_Vector64_get_Zero) || (intrinsicId == NI_Vector128_get_Zero);
#endif // !TARGET_XARCH && !TARGET_ARM64
}
#endif // FEATURE_HW_INTRINSICS
return false;
}
inline bool GenTree::IsBoxedValue()
{
assert(gtOper != GT_BOX || AsBox()->BoxOp() != nullptr);
return (gtOper == GT_BOX) && (gtFlags & GTF_BOX_VALUE);
}
#ifdef DEBUG
//------------------------------------------------------------------------
// IsValidCallArgument: Given an GenTree node that represents an argument
// enforce (or don't enforce) the following invariant.
//
// Arguments:
// instance method for a GenTree node
//
// Return values:
// true: the GenTree node is accepted as a valid argument
// false: the GenTree node is not accepted as a valid argumeny
//
// Notes:
// For targets that don't support arguments as a list of fields, we do not support GT_FIELD_LIST.
//
// Currently for AMD64 UNIX we allow a limited case where a GT_FIELD_LIST is
// allowed but every element must be a GT_LCL_FLD.
//
// For the future targets that allow for Multireg args (and this includes the current ARM64 target),
// or that allow for passing promoted structs, we allow a GT_FIELD_LIST of arbitrary nodes.
// These would typically start out as GT_LCL_VARs or GT_LCL_FLDS or GT_INDs,
// but could be changed into constants or GT_COMMA trees by the later
// optimization phases.
inline bool GenTree::IsValidCallArgument()
{
if (OperIs(GT_FIELD_LIST))
{
#if !FEATURE_MULTIREG_ARGS && !FEATURE_PUT_STRUCT_ARG_STK
return false;
#else // FEATURE_MULTIREG_ARGS or FEATURE_PUT_STRUCT_ARG_STK
// We allow this GT_FIELD_LIST as an argument
return true;
#endif // FEATURE_MULTIREG_ARGS or FEATURE_PUT_STRUCT_ARG_STK
}
// We don't have either kind of list, so it satisfies the invariant.
return true;
}
#endif // DEBUG
inline GenTree* GenTree::gtGetOp1() const
{
return AsOp()->gtOp1;
}
#ifdef DEBUG
/* static */ inline bool GenTree::RequiresNonNullOp2(genTreeOps oper)
{
switch (oper)
{
case GT_ADD:
case GT_SUB:
case GT_MUL:
case GT_DIV:
case GT_MOD:
case GT_UDIV:
case GT_UMOD:
case GT_OR:
case GT_XOR:
case GT_AND:
case GT_LSH:
case GT_RSH:
case GT_RSZ:
case GT_ROL:
case GT_ROR:
case GT_INDEX:
case GT_ASG:
case GT_EQ:
case GT_NE:
case GT_LT:
case GT_LE:
case GT_GE:
case GT_GT:
case GT_COMMA:
case GT_QMARK:
case GT_COLON:
case GT_MKREFANY:
return true;
default:
return false;
}
}
#endif // DEBUG
inline GenTree* GenTree::gtGetOp2() const
{
assert(OperIsBinary());
GenTree* op2 = AsOp()->gtOp2;
// Only allow null op2 if the node type allows it, e.g. GT_LEA.
assert((op2 != nullptr) || !RequiresNonNullOp2(gtOper));
return op2;
}
inline GenTree* GenTree::gtGetOp2IfPresent() const
{
/* AsOp()->gtOp2 is only valid for GTK_BINOP nodes. */
GenTree* op2 = OperIsBinary() ? AsOp()->gtOp2 : nullptr;
// This documents the genTreeOps for which AsOp()->gtOp2 cannot be nullptr.
// This helps prefix in its analysis of code which calls gtGetOp2()
assert((op2 != nullptr) || !RequiresNonNullOp2(gtOper));
return op2;
}
inline GenTree* GenTree::gtEffectiveVal(bool commaOnly /* = false */)
{
GenTree* effectiveVal = this;
for (;;)
{
assert(!effectiveVal->OperIs(GT_PUTARG_TYPE));
if (effectiveVal->gtOper == GT_COMMA)
{
effectiveVal = effectiveVal->AsOp()->gtGetOp2();
}
else if (!commaOnly && (effectiveVal->gtOper == GT_NOP) && (effectiveVal->AsOp()->gtOp1 != nullptr))
{
effectiveVal = effectiveVal->AsOp()->gtOp1;
}
else
{
return effectiveVal;
}
}
}
//-------------------------------------------------------------------------
// gtCommaAssignVal - find value being assigned to a comma wrapped assigment
//
// Returns:
// tree representing value being assigned if this tree represents a
// comma-wrapped local definition and use.
//
// original tree, of not.
//
inline GenTree* GenTree::gtCommaAssignVal()
{
GenTree* result = this;
if (OperIs(GT_COMMA))
{
GenTree* commaOp1 = AsOp()->gtOp1;
GenTree* commaOp2 = AsOp()->gtOp2;
if (commaOp2->OperIs(GT_LCL_VAR) && commaOp1->OperIs(GT_ASG))
{
GenTree* asgOp1 = commaOp1->AsOp()->gtOp1;
GenTree* asgOp2 = commaOp1->AsOp()->gtOp2;
if (asgOp1->OperIs(GT_LCL_VAR) && (asgOp1->AsLclVar()->GetLclNum() == commaOp2->AsLclVar()->GetLclNum()))
{
result = asgOp2;
}
}
}
return result;
}
//-------------------------------------------------------------------------
// gtSkipPutArgType - skip PUTARG_TYPE if it is presented.
//
// Returns:
// the original tree or its child if it was a PUTARG_TYPE.
//
// Notes:
// PUTARG_TYPE should be skipped when we are doing transformations
// that are not affected by ABI, for example: inlining, implicit byref morphing.
//
inline GenTree* GenTree::gtSkipPutArgType()
{
if (OperIs(GT_PUTARG_TYPE))
{
GenTree* res = AsUnOp()->gtGetOp1();
assert(!res->OperIs(GT_PUTARG_TYPE));
return res;
}
return this;
}
inline GenTree* GenTree::gtSkipReloadOrCopy()
{
// There can be only one reload or copy (we can't have a reload/copy of a reload/copy)
if (gtOper == GT_RELOAD || gtOper == GT_COPY)
{
assert(gtGetOp1()->OperGet() != GT_RELOAD && gtGetOp1()->OperGet() != GT_COPY);
return gtGetOp1();
}
return this;
}
//-----------------------------------------------------------------------------------
// IsMultiRegCall: whether a call node returns its value in more than one register
//
// Arguments:
// None
//
// Return Value:
// Returns true if this GenTree is a multi register returning call
//
inline bool GenTree::IsMultiRegCall() const
{
if (this->IsCall())
{
return AsCall()->HasMultiRegRetVal();
}
return false;
}
//-----------------------------------------------------------------------------------
// IsMultiRegLclVar: whether a local var node defines multiple registers
//
// Arguments:
// None
//
// Return Value:
// Returns true if this GenTree is a multi register defining local var
//
inline bool GenTree::IsMultiRegLclVar() const
{
if (OperIsScalarLocal())
{
return AsLclVar()->IsMultiReg();
}
return false;
}
//-----------------------------------------------------------------------------------
// GetRegByIndex: Get a specific register, based on regIndex, that is produced by this node.
//
// Arguments:
// regIndex - which register to return (must be 0 for non-multireg nodes)
//
// Return Value:
// The register, if any, assigned to this index for this node.
//
// Notes:
// All targets that support multi-reg ops of any kind also support multi-reg return
// values for calls. Should that change with a future target, this method will need
// to change accordingly.
//
inline regNumber GenTree::GetRegByIndex(int regIndex) const
{
if (regIndex == 0)
{
return GetRegNum();
}
#if FEATURE_MULTIREG_RET
if (IsMultiRegCall())
{
return AsCall()->GetRegNumByIdx(regIndex);
}
#if FEATURE_ARG_SPLIT
if (OperIsPutArgSplit())
{
return AsPutArgSplit()->GetRegNumByIdx(regIndex);
}
#endif
#if !defined(TARGET_64BIT)
if (OperIsMultiRegOp())
{
return AsMultiRegOp()->GetRegNumByIdx(regIndex);
}
#endif
if (OperIs(GT_COPY, GT_RELOAD))
{
return AsCopyOrReload()->GetRegNumByIdx(regIndex);
}
#endif // FEATURE_MULTIREG_RET
#ifdef FEATURE_HW_INTRINSICS
if (OperIs(GT_HWINTRINSIC))
{
assert(regIndex == 1);
// TODO-ARM64-NYI: Support hardware intrinsics operating on multiple contiguous registers.
return AsHWIntrinsic()->GetOtherReg();
}
#endif // FEATURE_HW_INTRINSICS
if (OperIsScalarLocal())
{
return AsLclVar()->GetRegNumByIdx(regIndex);
}
assert(!"Invalid regIndex for GetRegFromMultiRegNode");
return REG_NA;
}
//-----------------------------------------------------------------------------------
// GetRegTypeByIndex: Get a specific register's type, based on regIndex, that is produced
// by this multi-reg node.
//
// Arguments:
// regIndex - index of register whose type will be returned
//
// Return Value:
// The register type assigned to this index for this node.
//
// Notes:
// This must be a multireg node that is *not* a copy or reload (which must retrieve the
// type from its source), and 'regIndex' must be a valid index for this node.
//
// All targets that support multi-reg ops of any kind also support multi-reg return
// values for calls. Should that change with a future target, this method will need
// to change accordingly.
//
inline var_types GenTree::GetRegTypeByIndex(int regIndex) const
{
#if FEATURE_MULTIREG_RET
if (IsMultiRegCall())
{
return AsCall()->AsCall()->GetReturnTypeDesc()->GetReturnRegType(regIndex);
}
#if FEATURE_ARG_SPLIT
if (OperIsPutArgSplit())
{
return AsPutArgSplit()->GetRegType(regIndex);
}
#endif // FEATURE_ARG_SPLIT
#if !defined(TARGET_64BIT)
if (OperIsMultiRegOp())
{
return AsMultiRegOp()->GetRegType(regIndex);
}
#endif // !defined(TARGET_64BIT)
#endif // FEATURE_MULTIREG_RET
if (OperIsHWIntrinsic())
{
assert(TypeGet() == TYP_STRUCT);
#ifdef TARGET_ARM64
if (AsHWIntrinsic()->GetSimdSize() == 16)
{
return TYP_SIMD16;
}
else
{
assert(AsHWIntrinsic()->GetSimdSize() == 8);
return TYP_SIMD8;
}
#elif defined(TARGET_XARCH)
// At this time, the only multi-reg HW intrinsics all return the type of their
// arguments. If this changes, we will need a way to record or determine this.
return gtGetOp1()->TypeGet();
#endif
}
if (OperIsScalarLocal())
{
if (TypeGet() == TYP_LONG)
{
return TYP_INT;
}
assert(TypeGet() == TYP_STRUCT);
assert((gtFlags & GTF_VAR_MULTIREG) != 0);
// The register type for a multireg lclVar requires looking at the LclVarDsc,
// which requires a Compiler instance. The caller must use the GetFieldTypeByIndex
// on GenTreeLclVar.
assert(!"GetRegTypeByIndex for LclVar");
}
assert(!"Invalid node type for GetRegTypeByIndex");
return TYP_UNDEF;
}
//-----------------------------------------------------------------------------------
// GetRegSpillFlagByIdx: Get a specific register's spill flags, based on regIndex,
// for this multi-reg node.
//
// Arguments:
// regIndex - which register's spill flags to return
//
// Return Value:
// The spill flags (GTF_SPILL GTF_SPILLED) for this register.
//
// Notes:
// This must be a multireg node and 'regIndex' must be a valid index for this node.
// This method returns the GTF "equivalent" flags based on the packed flags on the multireg node.
//
inline GenTreeFlags GenTree::GetRegSpillFlagByIdx(int regIndex) const
{
#if FEATURE_MULTIREG_RET
if (IsMultiRegCall())
{
return AsCall()->GetRegSpillFlagByIdx(regIndex);
}
#if FEATURE_ARG_SPLIT
if (OperIsPutArgSplit())
{
return AsPutArgSplit()->GetRegSpillFlagByIdx(regIndex);
}
#endif // FEATURE_ARG_SPLIT
#if !defined(TARGET_64BIT)
if (OperIsMultiRegOp())
{
return AsMultiRegOp()->GetRegSpillFlagByIdx(regIndex);
}
#endif // !defined(TARGET_64BIT)
#endif // FEATURE_MULTIREG_RET
if (OperIsScalarLocal())
{
return AsLclVar()->GetRegSpillFlagByIdx(regIndex);
}
assert(!"Invalid node type for GetRegSpillFlagByIdx");
return GTF_EMPTY;
}
//-----------------------------------------------------------------------------------
// GetLastUseBit: Get the last use bit for regIndex
//
// Arguments:
// regIndex - the register index
//
// Return Value:
// The bit to set, clear or query for the last-use of the regIndex'th value.
//
// Notes:
// This must be a GenTreeLclVar or GenTreeCopyOrReload node.
//
inline GenTreeFlags GenTree::GetLastUseBit(int regIndex) const
{
assert(regIndex < 4);
assert(OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR, GT_COPY, GT_RELOAD));
static_assert_no_msg((1 << MULTIREG_LAST_USE_SHIFT) == GTF_VAR_MULTIREG_DEATH0);
return (GenTreeFlags)(1 << (MULTIREG_LAST_USE_SHIFT + regIndex));
}
//-----------------------------------------------------------------------------------
// IsLastUse: Determine whether this node is a last use of the regIndex'th value
//
// Arguments:
// regIndex - the register index
//
// Return Value:
// true iff this is a last use.
//
// Notes:
// This must be a GenTreeLclVar or GenTreeCopyOrReload node.
//
inline bool GenTree::IsLastUse(int regIndex) const
{
assert(OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR, GT_COPY, GT_RELOAD));
return (gtFlags & GetLastUseBit(regIndex)) != 0;
}
//-----------------------------------------------------------------------------------
// IsLastUse: Determine whether this node is a last use of any value
//
// Return Value:
// true iff this has any last uses (i.e. at any index).
//
// Notes:
// This must be a GenTreeLclVar or GenTreeCopyOrReload node.
//
inline bool GenTree::HasLastUse() const
{
return (gtFlags & (GTF_VAR_DEATH_MASK)) != 0;
}
//-----------------------------------------------------------------------------------
// SetLastUse: Set the last use bit for the given index
//
// Arguments:
// regIndex - the register index
//
// Notes:
// This must be a GenTreeLclVar or GenTreeCopyOrReload node.
//
inline void GenTree::SetLastUse(int regIndex)
{
gtFlags |= GetLastUseBit(regIndex);
}
//-----------------------------------------------------------------------------------
// ClearLastUse: Clear the last use bit for the given index
//
// Arguments:
// regIndex - the register index
//
// Notes:
// This must be a GenTreeLclVar or GenTreeCopyOrReload node.
//
inline void GenTree::ClearLastUse(int regIndex)
{
gtFlags &= ~GetLastUseBit(regIndex);
}
//-------------------------------------------------------------------------
// IsCopyOrReload: whether this is a GT_COPY or GT_RELOAD node.
//
// Arguments:
// None
//
// Return Value:
// Returns true if this GenTree is a copy or reload node.
//
inline bool GenTree::IsCopyOrReload() const
{
return (gtOper == GT_COPY || gtOper == GT_RELOAD);
}
//-----------------------------------------------------------------------------------
// IsCopyOrReloadOfMultiRegCall: whether this is a GT_COPY or GT_RELOAD of a multi-reg
// call node.
//
// Arguments:
// None
//
// Return Value:
// Returns true if this GenTree is a copy or reload of multi-reg call node.
//
inline bool GenTree::IsCopyOrReloadOfMultiRegCall() const
{
if (IsCopyOrReload())
{
return gtGetOp1()->IsMultiRegCall();
}
return false;
}
inline bool GenTree::IsCnsIntOrI() const
{
return (gtOper == GT_CNS_INT);
}
inline bool GenTree::IsIntegralConst() const
{
#ifdef TARGET_64BIT
return IsCnsIntOrI();
#else // !TARGET_64BIT
return ((gtOper == GT_CNS_INT) || (gtOper == GT_CNS_LNG));
#endif // !TARGET_64BIT
}
//-------------------------------------------------------------------------
// IsIntegralConstUnsignedPow2: Determines whether the unsigned value of
// an integral constant is the power of 2.
//
// Return Value:
// Returns true if the unsigned value of a GenTree's integral constant
// is the power of 2.
//
// Notes:
// Integral constant nodes store its value in signed form.
// This should handle cases where an unsigned-int was logically used in
// user code.
//
inline bool GenTree::IsIntegralConstUnsignedPow2() const
{
if (IsIntegralConst())
{
return isPow2((UINT64)AsIntConCommon()->IntegralValue());
}
return false;
}
//-------------------------------------------------------------------------
// IsIntegralConstAbsPow2: Determines whether the absolute value of
// an integral constant is the power of 2.
//
// Return Value:
// Returns true if the absolute value of a GenTree's integral constant
// is the power of 2.
//
inline bool GenTree::IsIntegralConstAbsPow2() const
{
if (IsIntegralConst())
{
INT64 svalue = AsIntConCommon()->IntegralValue();
size_t value = (svalue == SSIZE_T_MIN) ? static_cast<size_t>(svalue) : static_cast<size_t>(abs(svalue));
return isPow2(value);
}
return false;
}
// Is this node an integer constant that fits in a 32-bit signed integer (INT32)
inline bool GenTree::IsIntCnsFitsInI32()
{
#ifdef TARGET_64BIT
return IsCnsIntOrI() && AsIntCon()->FitsInI32();
#else // !TARGET_64BIT
return IsCnsIntOrI();
#endif // !TARGET_64BIT
}
inline bool GenTree::IsCnsFltOrDbl() const
{
return OperGet() == GT_CNS_DBL;
}
inline bool GenTree::IsCnsNonZeroFltOrDbl() const
{
if (OperGet() == GT_CNS_DBL)
{
double constValue = AsDblCon()->gtDconVal;
return *(__int64*)&constValue != 0;
}
return false;
}
inline bool GenTree::IsHelperCall()
{
return OperGet() == GT_CALL && AsCall()->gtCallType == CT_HELPER;
}
inline var_types GenTree::CastFromType()
{
return this->AsCast()->CastOp()->TypeGet();
}
inline var_types& GenTree::CastToType()
{
return this->AsCast()->gtCastType;
}
inline bool GenTree::isUsedFromSpillTemp() const
{
// If spilled and no reg at use, then it is used from the spill temp location rather than being reloaded.
if (((gtFlags & GTF_SPILLED) != 0) && ((gtFlags & GTF_NOREG_AT_USE) != 0))
{
return true;
}
return false;
}
/*****************************************************************************/
#ifndef HOST_64BIT
#include <poppack.h>
#endif
/*****************************************************************************/
const size_t TREE_NODE_SZ_SMALL = sizeof(GenTreeLclFld);
const size_t TREE_NODE_SZ_LARGE = sizeof(GenTreeCall);
enum varRefKinds
{
VR_INVARIANT = 0x00, // an invariant value
VR_NONE = 0x00,
VR_IND_REF = 0x01, // an object reference
VR_IND_SCL = 0x02, // a non-object reference
VR_GLB_VAR = 0x04, // a global (clsVar)
};
/*****************************************************************************/
#endif // !GENTREE_H
/*****************************************************************************/
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX GenTree XX
XX XX
XX This is the node in the semantic tree graph. It represents the operation XX
XX corresponding to the node, and other information during code-gen. XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
/*****************************************************************************/
#ifndef _GENTREE_H_
#define _GENTREE_H_
/*****************************************************************************/
#include "vartype.h" // For "var_types"
#include "target.h" // For "regNumber"
#include "ssaconfig.h" // For "SsaConfig::RESERVED_SSA_NUM"
#include "valuenumtype.h"
#include "jitstd.h"
#include "jithashtable.h"
#include "simd.h"
#include "namedintrinsiclist.h"
#include "layout.h"
#include "debuginfo.h"
// Debugging GenTree is much easier if we add a magic virtual function to make the debugger able to figure out what type
// it's got. This is enabled by default in DEBUG. To enable it in RET builds (temporarily!), you need to change the
// build to define DEBUGGABLE_GENTREE=1, as well as pass /OPT:NOICF to the linker (or else all the vtables get merged,
// making the debugging value supplied by them useless).
#ifndef DEBUGGABLE_GENTREE
#ifdef DEBUG
#define DEBUGGABLE_GENTREE 1
#else // !DEBUG
#define DEBUGGABLE_GENTREE 0
#endif // !DEBUG
#endif // !DEBUGGABLE_GENTREE
// The SpecialCodeKind enum is used to indicate the type of special (unique)
// target block that will be targeted by an instruction.
// These are used by:
// GenTreeBoundsChk nodes (SCK_RNGCHK_FAIL, SCK_ARG_EXCPN, SCK_ARG_RNG_EXCPN)
// - these nodes have a field (gtThrowKind) to indicate which kind
// GenTreeOps nodes, for which codegen will generate the branch
// - it will use the appropriate kind based on the opcode, though it's not
// clear why SCK_OVERFLOW == SCK_ARITH_EXCPN
//
enum SpecialCodeKind
{
SCK_NONE,
SCK_RNGCHK_FAIL, // target when range check fails
SCK_DIV_BY_ZERO, // target for divide by zero (Not used on X86/X64)
SCK_ARITH_EXCPN, // target on arithmetic exception
SCK_OVERFLOW = SCK_ARITH_EXCPN, // target on overflow
SCK_ARG_EXCPN, // target on ArgumentException (currently used only for SIMD intrinsics)
SCK_ARG_RNG_EXCPN, // target on ArgumentOutOfRangeException (currently used only for SIMD intrinsics)
SCK_COUNT
};
/*****************************************************************************/
enum genTreeOps : BYTE
{
#define GTNODE(en, st, cm, ok) GT_##en,
#include "gtlist.h"
GT_COUNT,
#ifdef TARGET_64BIT
// GT_CNS_NATIVELONG is the gtOper symbol for GT_CNS_LNG or GT_CNS_INT, depending on the target.
// For the 64-bit targets we will only use GT_CNS_INT as it used to represent all the possible sizes
GT_CNS_NATIVELONG = GT_CNS_INT,
#else
// For the 32-bit targets we use a GT_CNS_LNG to hold a 64-bit integer constant and GT_CNS_INT for all others.
// In the future when we retarget the JIT for x86 we should consider eliminating GT_CNS_LNG
GT_CNS_NATIVELONG = GT_CNS_LNG,
#endif
};
// The following enum defines a set of bit flags that can be used
// to classify expression tree nodes.
//
enum GenTreeOperKind
{
GTK_SPECIAL = 0x00, // special operator
GTK_LEAF = 0x01, // leaf operator
GTK_UNOP = 0x02, // unary operator
GTK_BINOP = 0x04, // binary operator
GTK_KINDMASK = (GTK_SPECIAL | GTK_LEAF | GTK_UNOP | GTK_BINOP), // operator kind mask
GTK_SMPOP = (GTK_UNOP | GTK_BINOP),
GTK_COMMUTE = 0x08, // commutative operator
GTK_EXOP = 0x10, // Indicates that an oper for a node type that extends GenTreeOp (or GenTreeUnOp)
// by adding non-node fields to unary or binary operator.
GTK_NOVALUE = 0x20, // node does not produce a value
GTK_MASK = 0xFF
};
// The following enum defines a set of bit flags that describe opers for the purposes
// of DEBUG-only checks. This is separate from the above "GenTreeOperKind"s to avoid
// making the table for those larger in Release builds. However, it resides in the same
// "namespace" and so all values here must be distinct from those in "GenTreeOperKind".
//
enum GenTreeDebugOperKind
{
DBK_FIRST_FLAG = GTK_MASK + 1,
DBK_NOTHIR = DBK_FIRST_FLAG, // This oper is not supported in HIR (before rationalization).
DBK_NOTLIR = DBK_FIRST_FLAG << 1, // This oper is not supported in LIR (after rationalization).
DBK_NOCONTAIN = DBK_FIRST_FLAG << 2, // This oper produces a value, but may not be contained.
DBK_MASK = ~GTK_MASK
};
/*****************************************************************************/
enum gtCallTypes : BYTE
{
CT_USER_FUNC, // User function
CT_HELPER, // Jit-helper
CT_INDIRECT, // Indirect call
CT_COUNT // fake entry (must be last)
};
#ifdef DEBUG
/*****************************************************************************
*
* TargetHandleTypes are used to determine the type of handle present inside GenTreeIntCon node.
* The values are such that they don't overlap with helper's or user function's handle.
*/
enum TargetHandleType : BYTE
{
THT_Unknown = 2,
THT_GSCookieCheck = 4,
THT_SetGSCookie = 6,
THT_IntializeArrayIntrinsics = 8
};
#endif
/*****************************************************************************/
struct BasicBlock;
enum BasicBlockFlags : unsigned __int64;
struct InlineCandidateInfo;
struct GuardedDevirtualizationCandidateInfo;
struct ClassProfileCandidateInfo;
struct LateDevirtualizationInfo;
typedef unsigned short AssertionIndex;
static const AssertionIndex NO_ASSERTION_INDEX = 0;
//------------------------------------------------------------------------
// GetAssertionIndex: return 1-based AssertionIndex from 0-based int index.
//
// Arguments:
// index - 0-based index
// Return Value:
// 1-based AssertionIndex.
inline AssertionIndex GetAssertionIndex(unsigned index)
{
return (AssertionIndex)(index + 1);
}
class AssertionInfo
{
// true if the assertion holds on the bbNext edge instead of the bbJumpDest edge (for GT_JTRUE nodes)
unsigned short m_isNextEdgeAssertion : 1;
// 1-based index of the assertion
unsigned short m_assertionIndex : 15;
AssertionInfo(bool isNextEdgeAssertion, AssertionIndex assertionIndex)
: m_isNextEdgeAssertion(isNextEdgeAssertion), m_assertionIndex(assertionIndex)
{
assert(m_assertionIndex == assertionIndex);
}
public:
AssertionInfo() : AssertionInfo(false, 0)
{
}
AssertionInfo(AssertionIndex assertionIndex) : AssertionInfo(false, assertionIndex)
{
}
static AssertionInfo ForNextEdge(AssertionIndex assertionIndex)
{
// Ignore the edge information if there's no assertion
bool isNextEdge = (assertionIndex != NO_ASSERTION_INDEX);
return AssertionInfo(isNextEdge, assertionIndex);
}
void Clear()
{
m_isNextEdgeAssertion = 0;
m_assertionIndex = NO_ASSERTION_INDEX;
}
bool HasAssertion() const
{
return m_assertionIndex != NO_ASSERTION_INDEX;
}
AssertionIndex GetAssertionIndex() const
{
return m_assertionIndex;
}
bool IsNextEdgeAssertion() const
{
return m_isNextEdgeAssertion;
}
};
// GT_FIELD nodes will be lowered into more "code-gen-able" representations, like
// GT_IND's of addresses, or GT_LCL_FLD nodes. We'd like to preserve the more abstract
// information, and will therefore annotate such lowered nodes with FieldSeq's. A FieldSeq
// represents a (possibly) empty sequence of fields. The fields are in the order
// in which they are dereferenced. The first field may be an object field or a struct field;
// all subsequent fields must be struct fields.
class FieldSeqNode
{
public:
enum class FieldKind : uintptr_t
{
Instance = 0, // An instance field, object or struct.
SimpleStatic = 1, // Simple static field - the handle represents a unique location.
SharedStatic = 2, // Static field on a shared generic type: "Class<__Canon>.StaticField".
};
private:
static const uintptr_t FIELD_KIND_MASK = 0b11;
static_assert_no_msg(sizeof(CORINFO_FIELD_HANDLE) == sizeof(uintptr_t));
uintptr_t m_fieldHandleAndKind;
FieldSeqNode* m_next;
public:
FieldSeqNode(CORINFO_FIELD_HANDLE fieldHnd, FieldSeqNode* next, FieldKind fieldKind);
FieldKind GetKind() const
{
return static_cast<FieldKind>(m_fieldHandleAndKind & FIELD_KIND_MASK);
}
CORINFO_FIELD_HANDLE GetFieldHandle() const
{
assert(GetFieldHandleValue() != NO_FIELD_HANDLE);
return GetFieldHandleValue();
}
CORINFO_FIELD_HANDLE GetFieldHandleValue() const
{
return CORINFO_FIELD_HANDLE(m_fieldHandleAndKind & ~FIELD_KIND_MASK);
}
FieldSeqNode* GetNext() const
{
return m_next;
}
bool IsStaticField() const
{
return (GetKind() == FieldKind::SimpleStatic) || (GetKind() == FieldKind::SharedStatic);
}
bool IsSharedStaticField() const
{
return GetKind() == FieldKind::SharedStatic;
}
FieldSeqNode* GetTail()
{
FieldSeqNode* tail = this;
while (tail->m_next != nullptr)
{
tail = tail->m_next;
}
return tail;
}
// Make sure this provides methods that allow it to be used as a KeyFuncs type in JitHashTable.
// Note that there is a one-to-one relationship between the field handle and the field kind, so
// we do not need to mask away the latter for comparison purposes.
static int GetHashCode(FieldSeqNode fsn)
{
return static_cast<int>(fsn.m_fieldHandleAndKind) ^ static_cast<int>(reinterpret_cast<intptr_t>(fsn.m_next));
}
static bool Equals(const FieldSeqNode& fsn1, const FieldSeqNode& fsn2)
{
return fsn1.m_fieldHandleAndKind == fsn2.m_fieldHandleAndKind && fsn1.m_next == fsn2.m_next;
}
};
// This class canonicalizes field sequences.
class FieldSeqStore
{
typedef JitHashTable<FieldSeqNode, /*KeyFuncs*/ FieldSeqNode, FieldSeqNode*> FieldSeqNodeCanonMap;
CompAllocator m_alloc;
FieldSeqNodeCanonMap* m_canonMap;
static FieldSeqNode s_notAField; // No value, just exists to provide an address.
public:
FieldSeqStore(CompAllocator alloc);
// Returns the (canonical in the store) singleton field sequence for the given handle.
FieldSeqNode* CreateSingleton(CORINFO_FIELD_HANDLE fieldHnd,
FieldSeqNode::FieldKind fieldKind = FieldSeqNode::FieldKind::Instance);
// This is a special distinguished FieldSeqNode indicating that a constant does *not*
// represent a valid field sequence. This is "infectious", in the sense that appending it
// (on either side) to any field sequence yields the "NotAField()" sequence.
static FieldSeqNode* NotAField()
{
return &s_notAField;
}
// Returns the (canonical in the store) field sequence representing the concatenation of
// the sequences represented by "a" and "b". Assumes that "a" and "b" are canonical; that is,
// they are the results of CreateSingleton, NotAField, or Append calls. If either of the arguments
// are the "NotAField" value, so is the result.
FieldSeqNode* Append(FieldSeqNode* a, FieldSeqNode* b);
};
class GenTreeUseEdgeIterator;
class GenTreeOperandIterator;
struct Statement;
/*****************************************************************************/
// Forward declarations of the subtypes
#define GTSTRUCT_0(fn, en) struct GenTree##fn;
#define GTSTRUCT_1(fn, en) struct GenTree##fn;
#define GTSTRUCT_2(fn, en, en2) struct GenTree##fn;
#define GTSTRUCT_3(fn, en, en2, en3) struct GenTree##fn;
#define GTSTRUCT_4(fn, en, en2, en3, en4) struct GenTree##fn;
#define GTSTRUCT_N(fn, ...) struct GenTree##fn;
#define GTSTRUCT_2_SPECIAL(fn, en, en2) GTSTRUCT_2(fn, en, en2)
#define GTSTRUCT_3_SPECIAL(fn, en, en2, en3) GTSTRUCT_3(fn, en, en2, en3)
#include "gtstructs.h"
/*****************************************************************************/
// Don't format the GenTreeFlags declaration
// clang-format off
//------------------------------------------------------------------------
// GenTreeFlags: a bitmask of flags for GenTree stored in gtFlags
//
enum GenTreeFlags : unsigned int
{
GTF_EMPTY = 0,
//---------------------------------------------------------------------
// The first set of flags can be used with a large set of nodes, and
// thus they must all have distinct values. That is, one can test any
// expression node for one of these flags.
//---------------------------------------------------------------------
GTF_ASG = 0x00000001, // sub-expression contains an assignment
GTF_CALL = 0x00000002, // sub-expression contains a func. call
GTF_EXCEPT = 0x00000004, // sub-expression might throw an exception
GTF_GLOB_REF = 0x00000008, // sub-expression uses global variable(s)
GTF_ORDER_SIDEEFF = 0x00000010, // sub-expression has a re-ordering side effect
// If you set these flags, make sure that code:gtExtractSideEffList knows how to find the tree,
// otherwise the C# (run csc /o-) code:
// var v = side_eff_operation
// with no use of `v` will drop your tree on the floor.
GTF_PERSISTENT_SIDE_EFFECTS = GTF_ASG | GTF_CALL,
GTF_SIDE_EFFECT = GTF_PERSISTENT_SIDE_EFFECTS | GTF_EXCEPT,
GTF_GLOB_EFFECT = GTF_SIDE_EFFECT | GTF_GLOB_REF,
GTF_ALL_EFFECT = GTF_GLOB_EFFECT | GTF_ORDER_SIDEEFF,
GTF_REVERSE_OPS = 0x00000020, // operand op2 should be evaluated before op1 (normally, op1 is evaluated first and op2 is evaluated second)
GTF_CONTAINED = 0x00000040, // This node is contained (executed as part of its parent)
GTF_SPILLED = 0x00000080, // the value has been spilled
GTF_NOREG_AT_USE = 0x00000100, // tree node is in memory at the point of use
GTF_SET_FLAGS = 0x00000200, // Requires that codegen for this node set the flags. Use gtSetFlags() to check this flag.
GTF_USE_FLAGS = 0x00000400, // Indicates that this node uses the flags bits.
GTF_MAKE_CSE = 0x00000800, // Hoisted expression: try hard to make this into CSE (see optPerformHoistExpr)
GTF_DONT_CSE = 0x00001000, // Don't bother CSE'ing this expr
GTF_COLON_COND = 0x00002000, // This node is conditionally executed (part of ? :)
GTF_NODE_MASK = GTF_COLON_COND,
GTF_BOOLEAN = 0x00004000, // value is known to be 0/1
GTF_UNSIGNED = 0x00008000, // With GT_CAST: the source operand is an unsigned type
// With operators: the specified node is an unsigned operator
GTF_LATE_ARG = 0x00010000, // The specified node is evaluated to a temp in the arg list, and this temp is added to gtCallLateArgs.
GTF_SPILL = 0x00020000, // Needs to be spilled here
// The extra flag GTF_IS_IN_CSE is used to tell the consumer of the side effect flags
// that we are calling in the context of performing a CSE, thus we
// should allow the run-once side effects of running a class constructor.
//
// The only requirement of this flag is that it not overlap any of the
// side-effect flags. The actual bit used is otherwise arbitrary.
GTF_IS_IN_CSE = GTF_BOOLEAN,
GTF_COMMON_MASK = 0x0003FFFF, // mask of all the flags above
GTF_REUSE_REG_VAL = 0x00800000, // This is set by the register allocator on nodes whose value already exists in the
// register assigned to this node, so the code generator does not have to generate
// code to produce the value. It is currently used only on constant nodes.
// It CANNOT be set on var (GT_LCL*) nodes, or on indir (GT_IND or GT_STOREIND) nodes, since
// it is not needed for lclVars and is highly unlikely to be useful for indir nodes.
//---------------------------------------------------------------------
// The following flags can be used only with a small set of nodes, and
// thus their values need not be distinct (other than within the set
// that goes with a particular node/nodes, of course). That is, one can
// only test for one of these flags if the 'gtOper' value is tested as
// well to make sure it's the right operator for the particular flag.
//---------------------------------------------------------------------
// NB: GTF_VAR_* and GTF_REG_* share the same namespace of flags.
// These flags are also used by GT_LCL_FLD, and the last-use (DEATH) flags are also used by GenTreeCopyOrReload.
GTF_VAR_DEF = 0x80000000, // GT_LCL_VAR -- this is a definition
GTF_VAR_USEASG = 0x40000000, // GT_LCL_VAR -- this is a partial definition, a use of the previous definition is implied
// A partial definition usually occurs when a struct field is assigned to (s.f = ...) or
// when a scalar typed variable is assigned to via a narrow store (*((byte*)&i) = ...).
// Last-use bits.
// Note that a node marked GTF_VAR_MULTIREG can only be a pure definition of all the fields, or a pure use of all the fields,
// so we don't need the equivalent of GTF_VAR_USEASG.
GTF_VAR_MULTIREG_DEATH0 = 0x04000000, // GT_LCL_VAR -- The last-use bit for a lclVar (the first register if it is multireg).
GTF_VAR_DEATH = GTF_VAR_MULTIREG_DEATH0,
GTF_VAR_MULTIREG_DEATH1 = 0x08000000, // GT_LCL_VAR -- The last-use bit for the second register of a multireg lclVar.
GTF_VAR_MULTIREG_DEATH2 = 0x10000000, // GT_LCL_VAR -- The last-use bit for the third register of a multireg lclVar.
GTF_VAR_MULTIREG_DEATH3 = 0x20000000, // GT_LCL_VAR -- The last-use bit for the fourth register of a multireg lclVar.
GTF_VAR_DEATH_MASK = GTF_VAR_MULTIREG_DEATH0 | GTF_VAR_MULTIREG_DEATH1 | GTF_VAR_MULTIREG_DEATH2 | GTF_VAR_MULTIREG_DEATH3,
// This is the amount we have to shift, plus the regIndex, to get the last use bit we want.
#define MULTIREG_LAST_USE_SHIFT 26
GTF_VAR_MULTIREG = 0x02000000, // This is a struct or (on 32-bit platforms) long variable that is used or defined
// to/from a multireg source or destination (e.g. a call arg or return, or an op
// that returns its result in multiple registers such as a long multiply).
GTF_LIVENESS_MASK = GTF_VAR_DEF | GTF_VAR_USEASG | GTF_VAR_DEATH_MASK,
GTF_VAR_CAST = 0x01000000, // GT_LCL_VAR -- has been explictly cast (variable node may not be type of local)
GTF_VAR_ITERATOR = 0x00800000, // GT_LCL_VAR -- this is a iterator reference in the loop condition
GTF_VAR_CLONED = 0x00400000, // GT_LCL_VAR -- this node has been cloned or is a clone
GTF_VAR_CONTEXT = 0x00200000, // GT_LCL_VAR -- this node is part of a runtime lookup
GTF_VAR_FOLDED_IND = 0x00100000, // GT_LCL_VAR -- this node was folded from *(typ*)&lclVar expression tree in fgMorphSmpOp()
// where 'typ' is a small type and 'lclVar' corresponds to a normalized-on-store local variable.
// This flag identifies such nodes in order to make sure that fgDoNormalizeOnStore() is called
// on their parents in post-order morph.
// Relevant for inlining optimizations (see fgInlinePrependStatements)
// For additional flags for GT_CALL node see GTF_CALL_M_*
GTF_CALL_UNMANAGED = 0x80000000, // GT_CALL -- direct call to unmanaged code
GTF_CALL_INLINE_CANDIDATE = 0x40000000, // GT_CALL -- this call has been marked as an inline candidate
GTF_CALL_VIRT_KIND_MASK = 0x30000000, // GT_CALL -- mask of the below call kinds
GTF_CALL_NONVIRT = 0x00000000, // GT_CALL -- a non virtual call
GTF_CALL_VIRT_STUB = 0x10000000, // GT_CALL -- a stub-dispatch virtual call
GTF_CALL_VIRT_VTABLE = 0x20000000, // GT_CALL -- a vtable-based virtual call
GTF_CALL_NULLCHECK = 0x08000000, // GT_CALL -- must check instance pointer for null
GTF_CALL_POP_ARGS = 0x04000000, // GT_CALL -- caller pop arguments?
GTF_CALL_HOISTABLE = 0x02000000, // GT_CALL -- call is hoistable
GTF_MEMORYBARRIER_LOAD = 0x40000000, // GT_MEMORYBARRIER -- Load barrier
GTF_FLD_VOLATILE = 0x40000000, // GT_FIELD/GT_CLS_VAR -- same as GTF_IND_VOLATILE
GTF_FLD_INITCLASS = 0x20000000, // GT_FIELD/GT_CLS_VAR -- field access requires preceding class/static init helper
GTF_INX_RNGCHK = 0x80000000, // GT_INDEX/GT_INDEX_ADDR -- the array reference should be range-checked.
GTF_INX_STRING_LAYOUT = 0x40000000, // GT_INDEX -- this uses the special string array layout
GTF_INX_NOFAULT = 0x20000000, // GT_INDEX -- the INDEX does not throw an exception (morph to GTF_IND_NONFAULTING)
GTF_IND_TGT_NOT_HEAP = 0x80000000, // GT_IND -- the target is not on the heap
GTF_IND_VOLATILE = 0x40000000, // GT_IND -- the load or store must use volatile sematics (this is a nop on X86)
GTF_IND_NONFAULTING = 0x20000000, // Operations for which OperIsIndir() is true -- An indir that cannot fault.
// Same as GTF_ARRLEN_NONFAULTING.
GTF_IND_TGTANYWHERE = 0x10000000, // GT_IND -- the target could be anywhere
GTF_IND_TLS_REF = 0x08000000, // GT_IND -- the target is accessed via TLS
GTF_IND_ASG_LHS = 0x04000000, // GT_IND -- this GT_IND node is (the effective val) of the LHS of an
// assignment; don't evaluate it independently.
GTF_IND_REQ_ADDR_IN_REG = GTF_IND_ASG_LHS, // GT_IND -- requires its addr operand to be evaluated
// into a register. This flag is useful in cases where it
// is required to generate register indirect addressing mode.
// One such case is virtual stub calls on xarch. This is only
// valid in the backend, where GTF_IND_ASG_LHS is not necessary
// (all such indirections will be lowered to GT_STOREIND).
GTF_IND_UNALIGNED = 0x02000000, // GT_IND -- the load or store is unaligned (we assume worst case
// alignment of 1 byte)
GTF_IND_INVARIANT = 0x01000000, // GT_IND -- the target is invariant (a prejit indirection)
GTF_IND_NONNULL = 0x00400000, // GT_IND -- the indirection never returns null (zero)
GTF_IND_FLAGS = GTF_IND_VOLATILE | GTF_IND_TGTANYWHERE | GTF_IND_NONFAULTING | GTF_IND_TLS_REF |
GTF_IND_UNALIGNED | GTF_IND_INVARIANT | GTF_IND_NONNULL | GTF_IND_TGT_NOT_HEAP,
GTF_CLS_VAR_VOLATILE = 0x40000000, // GT_FIELD/GT_CLS_VAR -- same as GTF_IND_VOLATILE
GTF_CLS_VAR_INITCLASS = 0x20000000, // GT_FIELD/GT_CLS_VAR -- same as GTF_FLD_INITCLASS
GTF_CLS_VAR_ASG_LHS = 0x04000000, // GT_CLS_VAR -- this GT_CLS_VAR node is (the effective val) of the LHS
// of an assignment; don't evaluate it independently.
GTF_ADDRMODE_NO_CSE = 0x80000000, // GT_ADD/GT_MUL/GT_LSH -- Do not CSE this node only, forms complex
// addressing mode
GTF_MUL_64RSLT = 0x40000000, // GT_MUL -- produce 64-bit result
GTF_RELOP_NAN_UN = 0x80000000, // GT_<relop> -- Is branch taken if ops are NaN?
GTF_RELOP_JMP_USED = 0x40000000, // GT_<relop> -- result of compare used for jump or ?:
GTF_RELOP_ZTT = 0x08000000, // GT_<relop> -- Loop test cloned for converting while-loops into do-while
// with explicit "loop test" in the header block.
GTF_RELOP_SJUMP_OPT = 0x04000000, // GT_<relop> -- Swap signed jl/jge with js/jns during emitter, reuses flags
// from previous instruction.
GTF_JCMP_EQ = 0x80000000, // GTF_JCMP_EQ -- Branch on equal rather than not equal
GTF_JCMP_TST = 0x40000000, // GTF_JCMP_TST -- Use bit test instruction rather than compare against zero instruction
GTF_RET_MERGED = 0x80000000, // GT_RETURN -- This is a return generated during epilog merging.
GTF_QMARK_CAST_INSTOF = 0x80000000, // GT_QMARK -- Is this a top (not nested) level qmark created for
// castclass or instanceof?
GTF_BOX_VALUE = 0x80000000, // GT_BOX -- "box" is on a value type
GTF_ARR_ADDR_NONNULL = 0x80000000, // GT_ARR_ADDR -- this array's address is not null
GTF_ICON_HDL_MASK = 0xFF000000, // Bits used by handle types below
GTF_ICON_SCOPE_HDL = 0x01000000, // GT_CNS_INT -- constant is a scope handle
GTF_ICON_CLASS_HDL = 0x02000000, // GT_CNS_INT -- constant is a class handle
GTF_ICON_METHOD_HDL = 0x03000000, // GT_CNS_INT -- constant is a method handle
GTF_ICON_FIELD_HDL = 0x04000000, // GT_CNS_INT -- constant is a field handle
GTF_ICON_STATIC_HDL = 0x05000000, // GT_CNS_INT -- constant is a handle to static data
GTF_ICON_STR_HDL = 0x06000000, // GT_CNS_INT -- constant is a string handle
GTF_ICON_CONST_PTR = 0x07000000, // GT_CNS_INT -- constant is a pointer to immutable data, (e.g. IAT_PPVALUE)
GTF_ICON_GLOBAL_PTR = 0x08000000, // GT_CNS_INT -- constant is a pointer to mutable data (e.g. from the VM state)
GTF_ICON_VARG_HDL = 0x09000000, // GT_CNS_INT -- constant is a var arg cookie handle
GTF_ICON_PINVKI_HDL = 0x0A000000, // GT_CNS_INT -- constant is a pinvoke calli handle
GTF_ICON_TOKEN_HDL = 0x0B000000, // GT_CNS_INT -- constant is a token handle (other than class, method or field)
GTF_ICON_TLS_HDL = 0x0C000000, // GT_CNS_INT -- constant is a TLS ref with offset
GTF_ICON_FTN_ADDR = 0x0D000000, // GT_CNS_INT -- constant is a function address
GTF_ICON_CIDMID_HDL = 0x0E000000, // GT_CNS_INT -- constant is a class ID or a module ID
GTF_ICON_BBC_PTR = 0x0F000000, // GT_CNS_INT -- constant is a basic block count pointer
GTF_ICON_STATIC_BOX_PTR = 0x10000000, // GT_CNS_INT -- constant is an address of the box for a STATIC_IN_HEAP field
GTF_ICON_FIELD_SEQ = 0x11000000, // <--------> -- constant is a FieldSeqNode* (used only as VNHandle)
// GTF_ICON_REUSE_REG_VAL = 0x00800000 // GT_CNS_INT -- GTF_REUSE_REG_VAL, defined above
GTF_ICON_FIELD_OFF = 0x00400000, // GT_CNS_INT -- constant is a field offset
GTF_ICON_SIMD_COUNT = 0x00200000, // GT_CNS_INT -- constant is Vector<T>.Count
GTF_ICON_INITCLASS = 0x00100000, // GT_CNS_INT -- Constant is used to access a static that requires preceding
// class/static init helper. In some cases, the constant is
// the address of the static field itself, and in other cases
// there's an extra layer of indirection and it is the address
// of the cell that the runtime will fill in with the address
// of the static field; in both of those cases, the constant
// is what gets flagged.
GTF_BLK_VOLATILE = GTF_IND_VOLATILE, // GT_ASG, GT_STORE_BLK, GT_STORE_OBJ, GT_STORE_DYNBLK -- is a volatile block operation
GTF_BLK_UNALIGNED = GTF_IND_UNALIGNED, // GT_ASG, GT_STORE_BLK, GT_STORE_OBJ, GT_STORE_DYNBLK -- is an unaligned block operation
GTF_OVERFLOW = 0x10000000, // Supported for: GT_ADD, GT_SUB, GT_MUL and GT_CAST.
// Requires an overflow check. Use gtOverflow(Ex)() to check this flag.
GTF_DIV_BY_CNS_OPT = 0x80000000, // GT_DIV -- Uses the division by constant optimization to compute this division
GTF_CHK_INDEX_INBND = 0x80000000, // GT_BOUNDS_CHECK -- have proven this check is always in-bounds
GTF_ARRLEN_NONFAULTING = 0x20000000, // GT_ARR_LENGTH -- An array length operation that cannot fault. Same as GT_IND_NONFAULTING.
GTF_SIMDASHW_OP = 0x80000000, // GT_HWINTRINSIC -- Indicates that the structHandle should be gotten from gtGetStructHandleForSIMD
// rather than from gtGetStructHandleForHWSIMD.
// Flag used by assertion prop to indicate that a type is a TYP_LONG
#ifdef TARGET_64BIT
GTF_ASSERTION_PROP_LONG = 0x00000001,
#endif // TARGET_64BIT
};
inline constexpr GenTreeFlags operator ~(GenTreeFlags a)
{
return (GenTreeFlags)(~(unsigned int)a);
}
inline constexpr GenTreeFlags operator |(GenTreeFlags a, GenTreeFlags b)
{
return (GenTreeFlags)((unsigned int)a | (unsigned int)b);
}
inline constexpr GenTreeFlags operator &(GenTreeFlags a, GenTreeFlags b)
{
return (GenTreeFlags)((unsigned int)a & (unsigned int)b);
}
inline GenTreeFlags& operator |=(GenTreeFlags& a, GenTreeFlags b)
{
return a = (GenTreeFlags)((unsigned int)a | (unsigned int)b);
}
inline GenTreeFlags& operator &=(GenTreeFlags& a, GenTreeFlags b)
{
return a = (GenTreeFlags)((unsigned int)a & (unsigned int)b);
}
inline GenTreeFlags& operator ^=(GenTreeFlags& a, GenTreeFlags b)
{
return a = (GenTreeFlags)((unsigned int)a ^ (unsigned int)b);
}
// Can any side-effects be observed externally, say by a caller method?
// For assignments, only assignments to global memory can be observed
// externally, whereas simple assignments to local variables can not.
//
// Be careful when using this inside a "try" protected region as the
// order of assignments to local variables would need to be preserved
// wrt side effects if the variables are alive on entry to the
// "catch/finally" region. In such cases, even assignments to locals
// will have to be restricted.
#define GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(flags) \
(((flags) & (GTF_CALL | GTF_EXCEPT)) || (((flags) & (GTF_ASG | GTF_GLOB_REF)) == (GTF_ASG | GTF_GLOB_REF)))
#if defined(DEBUG)
//------------------------------------------------------------------------
// GenTreeDebugFlags: a bitmask of debug-only flags for GenTree stored in gtDebugFlags
//
enum GenTreeDebugFlags : unsigned int
{
GTF_DEBUG_NONE = 0x00000000, // No debug flags.
GTF_DEBUG_NODE_MORPHED = 0x00000001, // the node has been morphed (in the global morphing phase)
GTF_DEBUG_NODE_SMALL = 0x00000002,
GTF_DEBUG_NODE_LARGE = 0x00000004,
GTF_DEBUG_NODE_CG_PRODUCED = 0x00000008, // genProduceReg has been called on this node
GTF_DEBUG_NODE_CG_CONSUMED = 0x00000010, // genConsumeReg has been called on this node
GTF_DEBUG_NODE_LSRA_ADDED = 0x00000020, // This node was added by LSRA
GTF_DEBUG_NODE_MASK = 0x0000003F, // These flags are all node (rather than operation) properties.
GTF_DEBUG_VAR_CSE_REF = 0x00800000, // GT_LCL_VAR -- This is a CSE LCL_VAR node
};
inline constexpr GenTreeDebugFlags operator ~(GenTreeDebugFlags a)
{
return (GenTreeDebugFlags)(~(unsigned int)a);
}
inline constexpr GenTreeDebugFlags operator |(GenTreeDebugFlags a, GenTreeDebugFlags b)
{
return (GenTreeDebugFlags)((unsigned int)a | (unsigned int)b);
}
inline constexpr GenTreeDebugFlags operator &(GenTreeDebugFlags a, GenTreeDebugFlags b)
{
return (GenTreeDebugFlags)((unsigned int)a & (unsigned int)b);
}
inline GenTreeDebugFlags& operator |=(GenTreeDebugFlags& a, GenTreeDebugFlags b)
{
return a = (GenTreeDebugFlags)((unsigned int)a | (unsigned int)b);
}
inline GenTreeDebugFlags& operator &=(GenTreeDebugFlags& a, GenTreeDebugFlags b)
{
return a = (GenTreeDebugFlags)((unsigned int)a & (unsigned int)b);
}
#endif // defined(DEBUG)
// clang-format on
#ifndef HOST_64BIT
#include <pshpack4.h>
#endif
struct GenTree
{
// We use GT_STRUCT_0 only for the category of simple ops.
#define GTSTRUCT_0(fn, en) \
GenTree##fn* As##fn() \
{ \
assert(OperIsSimple()); \
return reinterpret_cast<GenTree##fn*>(this); \
} \
const GenTree##fn* As##fn() const \
{ \
assert(OperIsSimple()); \
return reinterpret_cast<const GenTree##fn*>(this); \
} \
GenTree##fn& As##fn##Ref() \
{ \
return *As##fn(); \
}
#define GTSTRUCT_N(fn, ...) \
GenTree##fn* As##fn() \
{ \
assert(OperIs(__VA_ARGS__)); \
return reinterpret_cast<GenTree##fn*>(this); \
} \
const GenTree##fn* As##fn() const \
{ \
assert(OperIs(__VA_ARGS__)); \
return reinterpret_cast<const GenTree##fn*>(this); \
} \
GenTree##fn& As##fn##Ref() \
{ \
return *As##fn(); \
}
#define GTSTRUCT_1(fn, en) GTSTRUCT_N(fn, en)
#define GTSTRUCT_2(fn, en, en2) GTSTRUCT_N(fn, en, en2)
#define GTSTRUCT_3(fn, en, en2, en3) GTSTRUCT_N(fn, en, en2, en3)
#define GTSTRUCT_4(fn, en, en2, en3, en4) GTSTRUCT_N(fn, en, en2, en3, en4)
#define GTSTRUCT_2_SPECIAL(fn, en, en2) GTSTRUCT_2(fn, en, en2)
#define GTSTRUCT_3_SPECIAL(fn, en, en2, en3) GTSTRUCT_3(fn, en, en2, en3)
#include "gtstructs.h"
genTreeOps gtOper; // enum subtype BYTE
var_types gtType; // enum subtype BYTE
genTreeOps OperGet() const
{
return gtOper;
}
var_types TypeGet() const
{
return gtType;
}
#ifdef DEBUG
genTreeOps gtOperSave; // Only used to save gtOper when we destroy a node, to aid debugging.
#endif
#define NO_CSE (0)
#define IS_CSE_INDEX(x) ((x) != 0)
#define IS_CSE_USE(x) ((x) > 0)
#define IS_CSE_DEF(x) ((x) < 0)
#define GET_CSE_INDEX(x) (((x) > 0) ? x : -(x))
#define TO_CSE_DEF(x) (-(x))
signed char gtCSEnum; // 0 or the CSE index (negated if def)
// valid only for CSE expressions
unsigned char gtLIRFlags; // Used for nodes that are in LIR. See LIR::Flags in lir.h for the various flags.
AssertionInfo gtAssertionInfo;
bool GeneratesAssertion() const
{
return gtAssertionInfo.HasAssertion();
}
void ClearAssertion()
{
gtAssertionInfo.Clear();
}
AssertionInfo GetAssertionInfo() const
{
return gtAssertionInfo;
}
void SetAssertionInfo(AssertionInfo info)
{
gtAssertionInfo = info;
}
//
// Cost metrics on the node. Don't allow direct access to the variable for setting.
//
public:
#ifdef DEBUG
// You are not allowed to read the cost values before they have been set in gtSetEvalOrder().
// Keep track of whether the costs have been initialized, and assert if they are read before being initialized.
// Obviously, this information does need to be initialized when a node is created.
// This is public so the dumpers can see it.
bool gtCostsInitialized;
#endif // DEBUG
#define MAX_COST UCHAR_MAX
#define IND_COST_EX 3 // execution cost for an indirection
unsigned char GetCostEx() const
{
assert(gtCostsInitialized);
return _gtCostEx;
}
unsigned char GetCostSz() const
{
assert(gtCostsInitialized);
return _gtCostSz;
}
// Set the costs. They are always both set at the same time.
// Don't use the "put" property: force calling this function, to make it more obvious in the few places
// that set the values.
// Note that costs are only set in gtSetEvalOrder() and its callees.
void SetCosts(unsigned costEx, unsigned costSz)
{
assert(costEx != (unsigned)-1); // looks bogus
assert(costSz != (unsigned)-1); // looks bogus
INDEBUG(gtCostsInitialized = true;)
_gtCostEx = (costEx > MAX_COST) ? MAX_COST : (unsigned char)costEx;
_gtCostSz = (costSz > MAX_COST) ? MAX_COST : (unsigned char)costSz;
}
// Opimized copy function, to avoid the SetCosts() function comparisons, and make it more clear that a node copy is
// happening.
void CopyCosts(const GenTree* const tree)
{
// If the 'tree' costs aren't initialized, we'll hit an assert below.
INDEBUG(gtCostsInitialized = tree->gtCostsInitialized;)
_gtCostEx = tree->GetCostEx();
_gtCostSz = tree->GetCostSz();
}
// Same as CopyCosts, but avoids asserts if the costs we are copying have not been initialized.
// This is because the importer, for example, clones nodes, before these costs have been initialized.
// Note that we directly access the 'tree' costs, not going through the accessor functions (either
// directly or through the properties).
void CopyRawCosts(const GenTree* const tree)
{
INDEBUG(gtCostsInitialized = tree->gtCostsInitialized;)
_gtCostEx = tree->_gtCostEx;
_gtCostSz = tree->_gtCostSz;
}
private:
unsigned char _gtCostEx; // estimate of expression execution cost
unsigned char _gtCostSz; // estimate of expression code size cost
//
// Register or register pair number of the node.
//
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
public:
enum genRegTag
{
GT_REGTAG_NONE, // Nothing has been assigned to _gtRegNum
GT_REGTAG_REG // _gtRegNum has been assigned
};
genRegTag GetRegTag() const
{
assert(gtRegTag == GT_REGTAG_NONE || gtRegTag == GT_REGTAG_REG);
return gtRegTag;
}
private:
genRegTag gtRegTag; // What is in _gtRegNum?
#endif // DEBUG
private:
// This stores the register assigned to the node. If a register is not assigned, _gtRegNum is set to REG_NA.
regNumberSmall _gtRegNum;
// Count of operands. Used *only* by GenTreeMultiOp, exists solely due to padding constraints.
friend struct GenTreeMultiOp;
uint8_t m_operandCount;
public:
// The register number is stored in a small format (8 bits), but the getters return and the setters take
// a full-size (unsigned) format, to localize the casts here.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
bool canBeContained() const;
#endif
// for codegen purposes, is this node a subnode of its parent
bool isContained() const;
bool isContainedIndir() const;
bool isIndirAddrMode();
// This returns true only for GT_IND and GT_STOREIND, and is used in contexts where a "true"
// indirection is expected (i.e. either a load to or a store from a single register).
// OperIsIndir() returns true also for indirection nodes such as GT_BLK, etc. as well as GT_NULLCHECK.
bool isIndir() const;
bool isContainedIntOrIImmed() const
{
return isContained() && IsCnsIntOrI() && !isUsedFromSpillTemp();
}
bool isContainedFltOrDblImmed() const
{
return isContained() && (OperGet() == GT_CNS_DBL);
}
bool isLclField() const
{
return OperGet() == GT_LCL_FLD || OperGet() == GT_STORE_LCL_FLD;
}
bool isUsedFromSpillTemp() const;
// Indicates whether it is a memory op.
// Right now it includes Indir and LclField ops.
bool isMemoryOp() const
{
return isIndir() || isLclField();
}
bool isUsedFromMemory() const
{
return ((isContained() && (isMemoryOp() || (OperGet() == GT_LCL_VAR) || (OperGet() == GT_CNS_DBL))) ||
isUsedFromSpillTemp());
}
bool isLclVarUsedFromMemory() const
{
return (OperGet() == GT_LCL_VAR) && (isContained() || isUsedFromSpillTemp());
}
bool isLclFldUsedFromMemory() const
{
return isLclField() && (isContained() || isUsedFromSpillTemp());
}
bool isUsedFromReg() const
{
return !isContained() && !isUsedFromSpillTemp();
}
regNumber GetRegNum() const
{
assert((gtRegTag == GT_REGTAG_REG) || (gtRegTag == GT_REGTAG_NONE)); // TODO-Cleanup: get rid of the NONE case,
// and fix everyplace that reads undefined
// values
regNumber reg = (regNumber)_gtRegNum;
assert((gtRegTag == GT_REGTAG_NONE) || // TODO-Cleanup: get rid of the NONE case, and fix everyplace that reads
// undefined values
(reg >= REG_FIRST && reg <= REG_COUNT));
return reg;
}
void SetRegNum(regNumber reg)
{
assert(reg >= REG_FIRST && reg <= REG_COUNT);
_gtRegNum = (regNumberSmall)reg;
INDEBUG(gtRegTag = GT_REGTAG_REG;)
assert(_gtRegNum == reg);
}
void ClearRegNum()
{
_gtRegNum = REG_NA;
INDEBUG(gtRegTag = GT_REGTAG_NONE;)
}
// Copy the _gtRegNum/gtRegTag fields
void CopyReg(GenTree* from);
bool gtHasReg(Compiler* comp) const;
int GetRegisterDstCount(Compiler* compiler) const;
regMaskTP gtGetRegMask() const;
regMaskTP gtGetContainedRegMask();
GenTreeFlags gtFlags;
#if defined(DEBUG)
GenTreeDebugFlags gtDebugFlags;
#endif // defined(DEBUG)
ValueNumPair gtVNPair;
regMaskSmall gtRsvdRegs; // set of fixed trashed registers
unsigned AvailableTempRegCount(regMaskTP mask = (regMaskTP)-1) const;
regNumber GetSingleTempReg(regMaskTP mask = (regMaskTP)-1);
regNumber ExtractTempReg(regMaskTP mask = (regMaskTP)-1);
void SetVNsFromNode(GenTree* tree)
{
gtVNPair = tree->gtVNPair;
}
ValueNum GetVN(ValueNumKind vnk) const
{
if (vnk == VNK_Liberal)
{
return gtVNPair.GetLiberal();
}
else
{
assert(vnk == VNK_Conservative);
return gtVNPair.GetConservative();
}
}
void SetVN(ValueNumKind vnk, ValueNum vn)
{
if (vnk == VNK_Liberal)
{
return gtVNPair.SetLiberal(vn);
}
else
{
assert(vnk == VNK_Conservative);
return gtVNPair.SetConservative(vn);
}
}
void SetVNs(ValueNumPair vnp)
{
gtVNPair = vnp;
}
void ClearVN()
{
gtVNPair = ValueNumPair(); // Initializes both elements to "NoVN".
}
GenTree* gtNext;
GenTree* gtPrev;
#ifdef DEBUG
unsigned gtTreeID;
unsigned gtSeqNum; // liveness traversal order within the current statement
int gtUseNum; // use-ordered traversal within the function
#endif
static const unsigned char gtOperKindTable[];
static unsigned OperKind(unsigned gtOper)
{
assert(gtOper < GT_COUNT);
return gtOperKindTable[gtOper];
}
unsigned OperKind() const
{
assert(gtOper < GT_COUNT);
return gtOperKindTable[gtOper];
}
static bool IsExOp(unsigned opKind)
{
return (opKind & GTK_EXOP) != 0;
}
bool IsValue() const
{
if ((OperKind(gtOper) & GTK_NOVALUE) != 0)
{
return false;
}
if (gtType == TYP_VOID)
{
// These are the only operators which can produce either VOID or non-VOID results.
assert(OperIs(GT_NOP, GT_CALL, GT_COMMA) || OperIsCompare() || OperIsLong() || OperIsSIMD() ||
OperIsHWIntrinsic());
return false;
}
return true;
}
// LIR flags
// These helper methods, along with the flag values they manipulate, are defined in lir.h
//
// UnusedValue indicates that, although this node produces a value, it is unused.
inline void SetUnusedValue();
inline void ClearUnusedValue();
inline bool IsUnusedValue() const;
// RegOptional indicates that codegen can still generate code even if it isn't allocated a register.
inline bool IsRegOptional() const;
inline void SetRegOptional();
inline void ClearRegOptional();
#ifdef DEBUG
void dumpLIRFlags();
#endif
bool TypeIs(var_types type) const
{
return gtType == type;
}
template <typename... T>
bool TypeIs(var_types type, T... rest) const
{
return TypeIs(type) || TypeIs(rest...);
}
static bool StaticOperIs(genTreeOps operCompare, genTreeOps oper)
{
return operCompare == oper;
}
template <typename... T>
static bool StaticOperIs(genTreeOps operCompare, genTreeOps oper, T... rest)
{
return StaticOperIs(operCompare, oper) || StaticOperIs(operCompare, rest...);
}
bool OperIs(genTreeOps oper) const
{
return OperGet() == oper;
}
template <typename... T>
bool OperIs(genTreeOps oper, T... rest) const
{
return OperIs(oper) || OperIs(rest...);
}
static bool OperIsConst(genTreeOps gtOper)
{
static_assert_no_msg(AreContiguous(GT_CNS_INT, GT_CNS_LNG, GT_CNS_DBL, GT_CNS_STR));
return (GT_CNS_INT <= gtOper) && (gtOper <= GT_CNS_STR);
}
bool OperIsConst() const
{
return OperIsConst(gtOper);
}
static bool OperIsLeaf(genTreeOps gtOper)
{
return (OperKind(gtOper) & GTK_LEAF) != 0;
}
bool OperIsLeaf() const
{
return (OperKind(gtOper) & GTK_LEAF) != 0;
}
static bool OperIsLocal(genTreeOps gtOper)
{
static_assert_no_msg(AreContiguous(GT_PHI_ARG, GT_LCL_VAR, GT_LCL_FLD, GT_STORE_LCL_VAR, GT_STORE_LCL_FLD));
return (GT_PHI_ARG <= gtOper) && (gtOper <= GT_STORE_LCL_FLD);
}
static bool OperIsLocalAddr(genTreeOps gtOper)
{
return (gtOper == GT_LCL_VAR_ADDR || gtOper == GT_LCL_FLD_ADDR);
}
static bool OperIsLocalField(genTreeOps gtOper)
{
return (gtOper == GT_LCL_FLD || gtOper == GT_LCL_FLD_ADDR || gtOper == GT_STORE_LCL_FLD);
}
inline bool OperIsLocalField() const
{
return OperIsLocalField(gtOper);
}
static bool OperIsScalarLocal(genTreeOps gtOper)
{
return (gtOper == GT_LCL_VAR || gtOper == GT_STORE_LCL_VAR);
}
static bool OperIsNonPhiLocal(genTreeOps gtOper)
{
return OperIsLocal(gtOper) && (gtOper != GT_PHI_ARG);
}
static bool OperIsLocalRead(genTreeOps gtOper)
{
return (OperIsLocal(gtOper) && !OperIsLocalStore(gtOper));
}
static bool OperIsLocalStore(genTreeOps gtOper)
{
return (gtOper == GT_STORE_LCL_VAR || gtOper == GT_STORE_LCL_FLD);
}
static bool OperIsAddrMode(genTreeOps gtOper)
{
return (gtOper == GT_LEA);
}
static bool OperIsInitVal(genTreeOps gtOper)
{
return (gtOper == GT_INIT_VAL);
}
bool OperIsInitVal() const
{
return OperIsInitVal(OperGet());
}
bool IsConstInitVal() const
{
return (gtOper == GT_CNS_INT) || (OperIsInitVal() && (gtGetOp1()->gtOper == GT_CNS_INT));
}
bool OperIsBlkOp();
bool OperIsCopyBlkOp();
bool OperIsInitBlkOp();
static bool OperIsBlk(genTreeOps gtOper)
{
return (gtOper == GT_BLK) || (gtOper == GT_OBJ) || OperIsStoreBlk(gtOper);
}
bool OperIsBlk() const
{
return OperIsBlk(OperGet());
}
static bool OperIsStoreBlk(genTreeOps gtOper)
{
return StaticOperIs(gtOper, GT_STORE_BLK, GT_STORE_OBJ, GT_STORE_DYN_BLK);
}
bool OperIsStoreBlk() const
{
return OperIsStoreBlk(OperGet());
}
bool OperIsPutArgSplit() const
{
#if FEATURE_ARG_SPLIT
assert((gtOper != GT_PUTARG_SPLIT) || compFeatureArgSplit());
return gtOper == GT_PUTARG_SPLIT;
#else // !FEATURE_ARG_SPLIT
return false;
#endif
}
bool OperIsPutArgStk() const
{
return gtOper == GT_PUTARG_STK;
}
bool OperIsPutArgStkOrSplit() const
{
return OperIsPutArgStk() || OperIsPutArgSplit();
}
bool OperIsPutArgReg() const
{
return gtOper == GT_PUTARG_REG;
}
bool OperIsPutArg() const
{
return OperIsPutArgStk() || OperIsPutArgReg() || OperIsPutArgSplit();
}
bool OperIsFieldList() const
{
return OperIs(GT_FIELD_LIST);
}
bool OperIsMultiRegOp() const
{
#if !defined(TARGET_64BIT)
if (OperIs(GT_MUL_LONG))
{
return true;
}
#if defined(TARGET_ARM)
if (OperIs(GT_PUTARG_REG, GT_BITCAST))
{
return true;
}
#endif // TARGET_ARM
#endif // TARGET_64BIT
return false;
}
bool OperIsAddrMode() const
{
return OperIsAddrMode(OperGet());
}
bool OperIsLocal() const
{
return OperIsLocal(OperGet());
}
bool OperIsLocalAddr() const
{
return OperIsLocalAddr(OperGet());
}
bool OperIsScalarLocal() const
{
return OperIsScalarLocal(OperGet());
}
bool OperIsNonPhiLocal() const
{
return OperIsNonPhiLocal(OperGet());
}
bool OperIsLocalStore() const
{
return OperIsLocalStore(OperGet());
}
bool OperIsLocalRead() const
{
return OperIsLocalRead(OperGet());
}
static bool OperIsCompare(genTreeOps gtOper)
{
static_assert_no_msg(AreContiguous(GT_EQ, GT_NE, GT_LT, GT_LE, GT_GE, GT_GT, GT_TEST_EQ, GT_TEST_NE));
return (GT_EQ <= gtOper) && (gtOper <= GT_TEST_NE);
}
bool OperIsCompare() const
{
return OperIsCompare(OperGet());
}
static bool OperIsShift(genTreeOps gtOper)
{
return (gtOper == GT_LSH) || (gtOper == GT_RSH) || (gtOper == GT_RSZ);
}
bool OperIsShift() const
{
return OperIsShift(OperGet());
}
static bool OperIsShiftLong(genTreeOps gtOper)
{
#ifdef TARGET_64BIT
return false;
#else
return (gtOper == GT_LSH_HI) || (gtOper == GT_RSH_LO);
#endif
}
bool OperIsShiftLong() const
{
return OperIsShiftLong(OperGet());
}
static bool OperIsRotate(genTreeOps gtOper)
{
return (gtOper == GT_ROL) || (gtOper == GT_ROR);
}
bool OperIsRotate() const
{
return OperIsRotate(OperGet());
}
static bool OperIsShiftOrRotate(genTreeOps gtOper)
{
return OperIsShift(gtOper) || OperIsRotate(gtOper) || OperIsShiftLong(gtOper);
}
bool OperIsShiftOrRotate() const
{
return OperIsShiftOrRotate(OperGet());
}
static bool OperIsMul(genTreeOps gtOper)
{
return (gtOper == GT_MUL) || (gtOper == GT_MULHI)
#if !defined(TARGET_64BIT) || defined(TARGET_ARM64)
|| (gtOper == GT_MUL_LONG)
#endif
;
}
bool OperIsMul() const
{
return OperIsMul(gtOper);
}
bool OperIsArithmetic() const
{
genTreeOps op = OperGet();
return op == GT_ADD || op == GT_SUB || op == GT_MUL || op == GT_DIV || op == GT_MOD
|| op == GT_UDIV || op == GT_UMOD
|| op == GT_OR || op == GT_XOR || op == GT_AND
|| OperIsShiftOrRotate(op);
}
#ifdef TARGET_XARCH
static bool OperIsRMWMemOp(genTreeOps gtOper)
{
// Return if binary op is one of the supported operations for RMW of memory.
return (gtOper == GT_ADD || gtOper == GT_SUB || gtOper == GT_AND || gtOper == GT_OR || gtOper == GT_XOR ||
gtOper == GT_NOT || gtOper == GT_NEG || OperIsShiftOrRotate(gtOper));
}
bool OperIsRMWMemOp() const
{
// Return if binary op is one of the supported operations for RMW of memory.
return OperIsRMWMemOp(gtOper);
}
#endif // TARGET_XARCH
static bool OperIsUnary(genTreeOps gtOper)
{
return (OperKind(gtOper) & GTK_UNOP) != 0;
}
bool OperIsUnary() const
{
return OperIsUnary(gtOper);
}
static bool OperIsBinary(genTreeOps gtOper)
{
return (OperKind(gtOper) & GTK_BINOP) != 0;
}
bool OperIsBinary() const
{
return OperIsBinary(gtOper);
}
static bool OperIsSimple(genTreeOps gtOper)
{
return (OperKind(gtOper) & GTK_SMPOP) != 0;
}
static bool OperIsSpecial(genTreeOps gtOper)
{
return ((OperKind(gtOper) & GTK_KINDMASK) == GTK_SPECIAL);
}
bool OperIsSimple() const
{
return OperIsSimple(gtOper);
}
#ifdef FEATURE_SIMD
bool isCommutativeSIMDIntrinsic();
#else // !
bool isCommutativeSIMDIntrinsic()
{
return false;
}
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
bool isCommutativeHWIntrinsic() const;
bool isContainableHWIntrinsic() const;
bool isRMWHWIntrinsic(Compiler* comp);
#else
bool isCommutativeHWIntrinsic() const
{
return false;
}
bool isContainableHWIntrinsic() const
{
return false;
}
bool isRMWHWIntrinsic(Compiler* comp)
{
return false;
}
#endif // FEATURE_HW_INTRINSICS
static bool OperIsCommutative(genTreeOps gtOper)
{
return (OperKind(gtOper) & GTK_COMMUTE) != 0;
}
bool OperIsCommutative()
{
return OperIsCommutative(gtOper) || (OperIsSIMD(gtOper) && isCommutativeSIMDIntrinsic()) ||
(OperIsHWIntrinsic(gtOper) && isCommutativeHWIntrinsic());
}
static bool OperMayOverflow(genTreeOps gtOper)
{
return ((gtOper == GT_ADD) || (gtOper == GT_SUB) || (gtOper == GT_MUL) || (gtOper == GT_CAST)
#if !defined(TARGET_64BIT)
|| (gtOper == GT_ADD_HI) || (gtOper == GT_SUB_HI)
#endif
);
}
bool OperMayOverflow() const
{
return OperMayOverflow(gtOper);
}
// This returns true only for GT_IND and GT_STOREIND, and is used in contexts where a "true"
// indirection is expected (i.e. either a load to or a store from a single register).
// OperIsIndir() returns true also for indirection nodes such as GT_BLK, etc. as well as GT_NULLCHECK.
static bool OperIsIndir(genTreeOps gtOper)
{
return gtOper == GT_IND || gtOper == GT_STOREIND || gtOper == GT_NULLCHECK || OperIsBlk(gtOper);
}
static bool OperIsIndirOrArrLength(genTreeOps gtOper)
{
return OperIsIndir(gtOper) || (gtOper == GT_ARR_LENGTH);
}
bool OperIsIndir() const
{
return OperIsIndir(gtOper);
}
bool OperIsIndirOrArrLength() const
{
return OperIsIndirOrArrLength(gtOper);
}
bool OperIsImplicitIndir() const;
static bool OperIsAtomicOp(genTreeOps gtOper)
{
switch (gtOper)
{
case GT_XADD:
case GT_XORR:
case GT_XAND:
case GT_XCHG:
case GT_LOCKADD:
case GT_CMPXCHG:
return true;
default:
return false;
}
}
bool OperIsAtomicOp() const
{
return OperIsAtomicOp(gtOper);
}
bool OperIsStore() const
{
return OperIsStore(gtOper);
}
static bool OperIsStore(genTreeOps gtOper)
{
return (gtOper == GT_STOREIND || gtOper == GT_STORE_LCL_VAR || gtOper == GT_STORE_LCL_FLD ||
OperIsStoreBlk(gtOper) || OperIsAtomicOp(gtOper));
}
static bool OperIsMultiOp(genTreeOps gtOper)
{
return OperIsSIMD(gtOper) || OperIsHWIntrinsic(gtOper);
}
bool OperIsMultiOp() const
{
return OperIsMultiOp(OperGet());
}
bool OperIsSsaDef() const
{
return OperIs(GT_ASG, GT_CALL);
}
// This is here for cleaner FEATURE_SIMD #ifdefs.
static bool OperIsSIMD(genTreeOps gtOper)
{
#ifdef FEATURE_SIMD
return gtOper == GT_SIMD;
#else // !FEATURE_SIMD
return false;
#endif // !FEATURE_SIMD
}
bool OperIsSIMD() const
{
return OperIsSIMD(gtOper);
}
static bool OperIsHWIntrinsic(genTreeOps gtOper)
{
#ifdef FEATURE_HW_INTRINSICS
return gtOper == GT_HWINTRINSIC;
#else
return false;
#endif // FEATURE_HW_INTRINSICS
}
bool OperIsHWIntrinsic() const
{
return OperIsHWIntrinsic(gtOper);
}
bool OperIsSimdOrHWintrinsic() const
{
return OperIsSIMD() || OperIsHWIntrinsic();
}
// This is here for cleaner GT_LONG #ifdefs.
static bool OperIsLong(genTreeOps gtOper)
{
#if defined(TARGET_64BIT)
return false;
#else
return gtOper == GT_LONG;
#endif
}
bool OperIsLong() const
{
return OperIsLong(gtOper);
}
bool OperIsConditionalJump() const
{
return (gtOper == GT_JTRUE) || (gtOper == GT_JCMP) || (gtOper == GT_JCC);
}
#ifdef DEBUG
static const GenTreeDebugOperKind gtDebugOperKindTable[];
static GenTreeDebugOperKind DebugOperKind(genTreeOps oper)
{
assert(oper < GT_COUNT);
return gtDebugOperKindTable[oper];
}
GenTreeDebugOperKind DebugOperKind() const
{
return DebugOperKind(OperGet());
}
bool NullOp1Legal() const
{
assert(OperIsSimple());
switch (gtOper)
{
case GT_LEA:
case GT_RETFILT:
case GT_NOP:
case GT_FIELD:
return true;
case GT_RETURN:
return gtType == TYP_VOID;
default:
return false;
}
}
bool NullOp2Legal() const
{
assert(OperIsSimple(gtOper) || OperIsBlk(gtOper));
if (!OperIsBinary(gtOper))
{
return true;
}
switch (gtOper)
{
case GT_INTRINSIC:
case GT_LEA:
#if defined(TARGET_ARM)
case GT_PUTARG_REG:
#endif // defined(TARGET_ARM)
return true;
default:
return false;
}
}
bool OperIsLIR() const
{
if (OperIs(GT_NOP))
{
// NOPs may only be present in LIR if they do not produce a value.
return IsNothingNode();
}
return (DebugOperKind() & DBK_NOTLIR) == 0;
}
bool OperSupportsReverseOpEvalOrder(Compiler* comp) const;
static bool RequiresNonNullOp2(genTreeOps oper);
bool IsValidCallArgument();
#endif // DEBUG
inline bool IsFPZero() const;
inline bool IsIntegralConst(ssize_t constVal) const;
inline bool IsIntegralConstVector(ssize_t constVal) const;
inline bool IsSIMDZero() const;
inline bool IsFloatPositiveZero() const;
inline bool IsVectorZero() const;
inline bool IsBoxedValue();
inline GenTree* gtGetOp1() const;
// Directly return op2. Asserts the node is binary. Might return nullptr if the binary node allows
// a nullptr op2, such as GT_LEA. This is more efficient than gtGetOp2IfPresent() if you know what
// node type you have.
inline GenTree* gtGetOp2() const;
// The returned pointer might be nullptr if the node is not binary, or if non-null op2 is not required.
inline GenTree* gtGetOp2IfPresent() const;
bool TryGetUse(GenTree* operand, GenTree*** pUse);
bool TryGetUse(GenTree* operand)
{
GenTree** unusedUse = nullptr;
return TryGetUse(operand, &unusedUse);
}
private:
bool TryGetUseBinOp(GenTree* operand, GenTree*** pUse);
public:
GenTree* gtGetParent(GenTree*** pUse);
void ReplaceOperand(GenTree** useEdge, GenTree* replacement);
inline GenTree* gtEffectiveVal(bool commaOnly = false);
inline GenTree* gtCommaAssignVal();
// Tunnel through any GT_RET_EXPRs
GenTree* gtRetExprVal(BasicBlockFlags* pbbFlags = nullptr);
inline GenTree* gtSkipPutArgType();
// Return the child of this node if it is a GT_RELOAD or GT_COPY; otherwise simply return the node itself
inline GenTree* gtSkipReloadOrCopy();
// Returns true if it is a call node returning its value in more than one register
inline bool IsMultiRegCall() const;
// Returns true if it is a struct lclVar node residing in multiple registers.
inline bool IsMultiRegLclVar() const;
// Returns true if it is a node returning its value in more than one register
bool IsMultiRegNode() const;
// Returns the number of registers defined by a multireg node.
unsigned GetMultiRegCount(Compiler* comp) const;
// Returns the regIndex'th register defined by a possibly-multireg node.
regNumber GetRegByIndex(int regIndex) const;
// Returns the type of the regIndex'th register defined by a multi-reg node.
var_types GetRegTypeByIndex(int regIndex) const;
// Returns the GTF flag equivalent for the regIndex'th register of a multi-reg node.
GenTreeFlags GetRegSpillFlagByIdx(int regIndex) const;
// Last-use information for either GenTreeLclVar or GenTreeCopyOrReload nodes.
private:
GenTreeFlags GetLastUseBit(int regIndex) const;
public:
bool IsLastUse(int regIndex) const;
bool HasLastUse() const;
void SetLastUse(int regIndex);
void ClearLastUse(int regIndex);
// Returns true if it is a GT_COPY or GT_RELOAD node
inline bool IsCopyOrReload() const;
// Returns true if it is a GT_COPY or GT_RELOAD of a multi-reg call node
inline bool IsCopyOrReloadOfMultiRegCall() const;
bool OperRequiresAsgFlag();
bool OperRequiresCallFlag(Compiler* comp);
bool OperMayThrow(Compiler* comp);
unsigned GetScaleIndexMul();
unsigned GetScaleIndexShf();
unsigned GetScaledIndex();
public:
static unsigned char s_gtNodeSizes[];
#if NODEBASH_STATS || MEASURE_NODE_SIZE || COUNT_AST_OPERS
static unsigned char s_gtTrueSizes[];
#endif
#if COUNT_AST_OPERS
static unsigned s_gtNodeCounts[];
#endif
static void InitNodeSize();
size_t GetNodeSize() const;
bool IsNodeProperlySized() const;
void ReplaceWith(GenTree* src, Compiler* comp);
static genTreeOps ReverseRelop(genTreeOps relop);
static genTreeOps SwapRelop(genTreeOps relop);
//---------------------------------------------------------------------
static bool Compare(GenTree* op1, GenTree* op2, bool swapOK = false);
//---------------------------------------------------------------------
#if defined(DEBUG) || NODEBASH_STATS || MEASURE_NODE_SIZE || COUNT_AST_OPERS || DUMP_FLOWGRAPHS
static const char* OpName(genTreeOps op);
#endif
#if MEASURE_NODE_SIZE
static const char* OpStructName(genTreeOps op);
#endif
//---------------------------------------------------------------------
bool IsNothingNode() const;
void gtBashToNOP();
// Value number update action enumeration
enum ValueNumberUpdate
{
CLEAR_VN, // Clear value number
PRESERVE_VN // Preserve value number
};
void SetOper(genTreeOps oper, ValueNumberUpdate vnUpdate = CLEAR_VN); // set gtOper
void SetOperResetFlags(genTreeOps oper); // set gtOper and reset flags
// set gtOper and only keep GTF_COMMON_MASK flags
void ChangeOper(genTreeOps oper, ValueNumberUpdate vnUpdate = CLEAR_VN);
void ChangeOperUnchecked(genTreeOps oper);
void SetOperRaw(genTreeOps oper);
void ChangeType(var_types newType)
{
var_types oldType = gtType;
gtType = newType;
GenTree* node = this;
while (node->gtOper == GT_COMMA)
{
node = node->gtGetOp2();
if (node->gtType != newType)
{
assert(node->gtType == oldType);
node->gtType = newType;
}
}
}
template <typename T>
void BashToConst(T value, var_types type = TYP_UNDEF);
void BashToZeroConst(var_types type);
#if NODEBASH_STATS
static void RecordOperBashing(genTreeOps operOld, genTreeOps operNew);
static void ReportOperBashing(FILE* fp);
#else
static void RecordOperBashing(genTreeOps operOld, genTreeOps operNew)
{ /* do nothing */
}
static void ReportOperBashing(FILE* fp)
{ /* do nothing */
}
#endif
bool IsLocal() const
{
return OperIsLocal(OperGet());
}
// Returns "true" iff 'this' is a GT_LCL_FLD or GT_STORE_LCL_FLD on which the type
// is not the same size as the type of the GT_LCL_VAR.
bool IsPartialLclFld(Compiler* comp);
// Returns "true" iff "this" defines a local variable. Requires "comp" to be the
// current compilation. If returns "true", sets "*pLclVarTree" to the
// tree for the local that is defined, and, if "pIsEntire" is non-null, sets "*pIsEntire" to
// true or false, depending on whether the assignment writes to the entirety of the local
// variable, or just a portion of it.
bool DefinesLocal(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire = nullptr);
bool IsLocalAddrExpr(Compiler* comp,
GenTreeLclVarCommon** pLclVarTree,
FieldSeqNode** pFldSeq,
ssize_t* pOffset = nullptr);
// Simpler variant of the above which just returns the local node if this is an expression that
// yields an address into a local
GenTreeLclVarCommon* IsLocalAddrExpr();
// Determine if this tree represents the value of an entire implicit byref parameter,
// and if so return the tree for the parameter.
GenTreeLclVar* IsImplicitByrefParameterValue(Compiler* compiler);
// Determine if this is a LclVarCommon node and return some additional info about it in the
// two out parameters.
bool IsLocalExpr(Compiler* comp, GenTreeLclVarCommon** pLclVarTree, FieldSeqNode** pFldSeq);
// Determine whether this is an assignment tree of the form X = X (op) Y,
// where Y is an arbitrary tree, and X is a lclVar.
unsigned IsLclVarUpdateTree(GenTree** otherTree, genTreeOps* updateOper);
bool IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeqNode** pFldSeq);
bool IsArrayAddr(GenTreeArrAddr** pArrAddr);
// Assumes that "this" occurs in a context where it is being dereferenced as the LHS of an assignment-like
// statement (assignment, initblk, or copyblk). The "width" should be the number of bytes copied by the
// operation. Returns "true" if "this" is an address of (or within)
// a local variable; sets "*pLclVarTree" to that local variable instance; and, if "pIsEntire" is non-null,
// sets "*pIsEntire" to true if this assignment writes the full width of the local.
bool DefinesLocalAddr(Compiler* comp, unsigned width, GenTreeLclVarCommon** pLclVarTree, bool* pIsEntire);
// These are only used for dumping.
// The GetRegNum() is only valid in LIR, but the dumping methods are not easily
// modified to check this.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
bool InReg() const
{
return (GetRegTag() != GT_REGTAG_NONE) ? true : false;
}
regNumber GetReg() const
{
return (GetRegTag() != GT_REGTAG_NONE) ? GetRegNum() : REG_NA;
}
#endif
static bool IsContained(unsigned flags)
{
return ((flags & GTF_CONTAINED) != 0);
}
void SetContained()
{
assert(IsValue());
gtFlags |= GTF_CONTAINED;
assert(isContained());
}
void ClearContained()
{
assert(IsValue());
gtFlags &= ~GTF_CONTAINED;
ClearRegOptional();
}
bool CanCSE() const
{
return ((gtFlags & GTF_DONT_CSE) == 0);
}
void SetDoNotCSE()
{
gtFlags |= GTF_DONT_CSE;
}
void ClearDoNotCSE()
{
gtFlags &= ~GTF_DONT_CSE;
}
bool IsReverseOp() const
{
return (gtFlags & GTF_REVERSE_OPS) ? true : false;
}
void SetReverseOp()
{
gtFlags |= GTF_REVERSE_OPS;
}
void ClearReverseOp()
{
gtFlags &= ~GTF_REVERSE_OPS;
}
bool IsUnsigned() const
{
return ((gtFlags & GTF_UNSIGNED) != 0);
}
void SetUnsigned()
{
assert(OperIs(GT_ADD, GT_SUB, GT_CAST, GT_LE, GT_LT, GT_GT, GT_GE) || OperIsMul());
gtFlags |= GTF_UNSIGNED;
}
void ClearUnsigned()
{
assert(OperIs(GT_ADD, GT_SUB, GT_CAST) || OperIsMul());
gtFlags &= ~GTF_UNSIGNED;
}
void SetOverflow()
{
assert(OperMayOverflow());
gtFlags |= GTF_OVERFLOW;
}
void ClearOverflow()
{
assert(OperMayOverflow());
gtFlags &= ~GTF_OVERFLOW;
}
bool Is64RsltMul() const
{
return (gtFlags & GTF_MUL_64RSLT) != 0;
}
void Set64RsltMul()
{
gtFlags |= GTF_MUL_64RSLT;
}
void Clear64RsltMul()
{
gtFlags &= ~GTF_MUL_64RSLT;
}
void SetAllEffectsFlags(GenTree* source)
{
SetAllEffectsFlags(source->gtFlags & GTF_ALL_EFFECT);
}
void SetAllEffectsFlags(GenTree* firstSource, GenTree* secondSource)
{
SetAllEffectsFlags((firstSource->gtFlags | secondSource->gtFlags) & GTF_ALL_EFFECT);
}
void SetAllEffectsFlags(GenTree* firstSource, GenTree* secondSource, GenTree* thirdSouce)
{
SetAllEffectsFlags((firstSource->gtFlags | secondSource->gtFlags | thirdSouce->gtFlags) & GTF_ALL_EFFECT);
}
void SetAllEffectsFlags(GenTreeFlags sourceFlags)
{
assert((sourceFlags & ~GTF_ALL_EFFECT) == 0);
gtFlags &= ~GTF_ALL_EFFECT;
gtFlags |= sourceFlags;
}
inline bool IsCnsIntOrI() const;
inline bool IsIntegralConst() const;
inline bool IsIntegralConstPow2() const;
inline bool IsIntegralConstUnsignedPow2() const;
inline bool IsIntegralConstAbsPow2() const;
inline bool IsIntCnsFitsInI32(); // Constant fits in INT32
inline bool IsCnsFltOrDbl() const;
inline bool IsCnsNonZeroFltOrDbl() const;
bool IsIconHandle() const
{
assert(gtOper == GT_CNS_INT);
return (gtFlags & GTF_ICON_HDL_MASK) ? true : false;
}
bool IsIconHandle(GenTreeFlags handleType) const
{
assert(gtOper == GT_CNS_INT);
assert((handleType & GTF_ICON_HDL_MASK) != 0); // check that handleType is one of the valid GTF_ICON_* values
assert((handleType & ~GTF_ICON_HDL_MASK) == 0);
return (gtFlags & GTF_ICON_HDL_MASK) == handleType;
}
// Return just the part of the flags corresponding to the GTF_ICON_*_HDL flag. For example,
// GTF_ICON_SCOPE_HDL. The tree node must be a const int, but it might not be a handle, in which
// case we'll return zero.
GenTreeFlags GetIconHandleFlag() const
{
assert(gtOper == GT_CNS_INT);
return (gtFlags & GTF_ICON_HDL_MASK);
}
// Mark this node as no longer being a handle; clear its GTF_ICON_*_HDL bits.
void ClearIconHandleMask()
{
assert(gtOper == GT_CNS_INT);
gtFlags &= ~GTF_ICON_HDL_MASK;
}
// Return true if the two GT_CNS_INT trees have the same handle flag (GTF_ICON_*_HDL).
static bool SameIconHandleFlag(GenTree* t1, GenTree* t2)
{
return t1->GetIconHandleFlag() == t2->GetIconHandleFlag();
}
bool IsArgPlaceHolderNode() const
{
return OperGet() == GT_ARGPLACE;
}
bool IsCall() const
{
return OperGet() == GT_CALL;
}
inline bool IsHelperCall();
bool gtOverflow() const;
bool gtOverflowEx() const;
bool gtSetFlags() const;
bool gtRequestSetFlags();
#ifdef DEBUG
static int gtDispFlags(GenTreeFlags flags, GenTreeDebugFlags debugFlags);
#endif
// cast operations
inline var_types CastFromType();
inline var_types& CastToType();
// Returns "true" iff "this" is a phi-related node (i.e. a GT_PHI_ARG, GT_PHI, or a PhiDefn).
bool IsPhiNode();
// Returns "true" iff "*this" is an assignment (GT_ASG) tree that defines an SSA name (lcl = phi(...));
bool IsPhiDefn();
// Returns "true" iff "*this" is a statement containing an assignment that defines an SSA name (lcl = phi(...));
// Because of the fact that we hid the assignment operator of "BitSet" (in DEBUG),
// we can't synthesize an assignment operator.
// TODO-Cleanup: Could change this w/o liveset on tree nodes
// (This is also necessary for the VTable trick.)
GenTree()
{
}
// Returns an iterator that will produce the use edge to each operand of this node. Differs
// from the sequence of nodes produced by a loop over `GetChild` in its handling of call, phi,
// and block op nodes.
GenTreeUseEdgeIterator UseEdgesBegin();
GenTreeUseEdgeIterator UseEdgesEnd();
IteratorPair<GenTreeUseEdgeIterator> UseEdges();
// Returns an iterator that will produce each operand of this node, in execution order.
GenTreeOperandIterator OperandsBegin();
GenTreeOperandIterator OperandsEnd();
// Returns a range that will produce the operands of this node in execution order.
IteratorPair<GenTreeOperandIterator> Operands();
enum class VisitResult
{
Abort = false,
Continue = true
};
// Visits each operand of this node. The operand must be either a lambda, function, or functor with the signature
// `GenTree::VisitResult VisitorFunction(GenTree* operand)`. Here is a simple example:
//
// unsigned operandCount = 0;
// node->VisitOperands([&](GenTree* operand) -> GenTree::VisitResult)
// {
// operandCount++;
// return GenTree::VisitResult::Continue;
// });
//
// This function is generally more efficient that the operand iterator and should be preferred over that API for
// hot code, as it affords better opportunities for inlining and acheives shorter dynamic path lengths when
// deciding how operands need to be accessed.
//
// Note that this function does not respect `GTF_REVERSE_OPS`. This is always safe in LIR, but may be dangerous
// in HIR if for some reason you need to visit operands in the order in which they will execute.
template <typename TVisitor>
void VisitOperands(TVisitor visitor);
private:
template <typename TVisitor>
void VisitBinOpOperands(TVisitor visitor);
public:
bool Precedes(GenTree* other);
bool IsInvariant() const;
bool IsNeverNegative(Compiler* comp) const;
bool IsReuseRegVal() const
{
// This can be extended to non-constant nodes, but not to local or indir nodes.
if (IsInvariant() && ((gtFlags & GTF_REUSE_REG_VAL) != 0))
{
return true;
}
return false;
}
void SetReuseRegVal()
{
assert(IsInvariant());
gtFlags |= GTF_REUSE_REG_VAL;
}
void ResetReuseRegVal()
{
assert(IsInvariant());
gtFlags &= ~GTF_REUSE_REG_VAL;
}
void SetIndirExceptionFlags(Compiler* comp);
#if MEASURE_NODE_SIZE
static void DumpNodeSizes(FILE* fp);
#endif
#ifdef DEBUG
private:
GenTree& operator=(const GenTree& gt)
{
assert(!"Don't copy");
return *this;
}
#endif // DEBUG
#if DEBUGGABLE_GENTREE
// In DEBUG builds, add a dummy virtual method, to give the debugger run-time type information.
virtual void DummyVirt()
{
}
typedef void* VtablePtr;
VtablePtr GetVtableForOper(genTreeOps oper);
void SetVtableForOper(genTreeOps oper);
static VtablePtr s_vtablesForOpers[GT_COUNT];
static VtablePtr s_vtableForOp;
#endif // DEBUGGABLE_GENTREE
public:
inline void* operator new(size_t sz, class Compiler*, genTreeOps oper);
inline GenTree(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false));
};
// Represents a GT_PHI node - a variable sized list of GT_PHI_ARG nodes.
// All PHI_ARG nodes must represent uses of the same local variable and
// the PHI node's type must be the same as the local variable's type.
//
// The PHI node does not represent a definition by itself, it is always
// the RHS of a GT_ASG node. The LHS of the ASG node is always a GT_LCL_VAR
// node, that is a definition for the same local variable referenced by
// all the used PHI_ARG nodes:
//
// ASG(LCL_VAR(lcl7), PHI(PHI_ARG(lcl7), PHI_ARG(lcl7), PHI_ARG(lcl7)))
//
// PHI nodes are also present in LIR, where GT_STORE_LCL_VAR replaces the
// ASG node.
//
// The order of the PHI_ARG uses is not currently relevant and it may be
// the same or not as the order of the predecessor blocks.
//
struct GenTreePhi final : public GenTree
{
class Use
{
GenTree* m_node;
Use* m_next;
public:
Use(GenTree* node, Use* next = nullptr) : m_node(node), m_next(next)
{
assert(node->OperIs(GT_PHI_ARG));
}
GenTree*& NodeRef()
{
return m_node;
}
GenTree* GetNode() const
{
assert(m_node->OperIs(GT_PHI_ARG));
return m_node;
}
void SetNode(GenTree* node)
{
assert(node->OperIs(GT_PHI_ARG));
m_node = node;
}
Use*& NextRef()
{
return m_next;
}
Use* GetNext() const
{
return m_next;
}
};
class UseIterator
{
Use* m_use;
public:
UseIterator(Use* use) : m_use(use)
{
}
Use& operator*() const
{
return *m_use;
}
Use* operator->() const
{
return m_use;
}
UseIterator& operator++()
{
m_use = m_use->GetNext();
return *this;
}
bool operator==(const UseIterator& i) const
{
return m_use == i.m_use;
}
bool operator!=(const UseIterator& i) const
{
return m_use != i.m_use;
}
};
class UseList
{
Use* m_uses;
public:
UseList(Use* uses) : m_uses(uses)
{
}
UseIterator begin() const
{
return UseIterator(m_uses);
}
UseIterator end() const
{
return UseIterator(nullptr);
}
};
Use* gtUses;
GenTreePhi(var_types type) : GenTree(GT_PHI, type), gtUses(nullptr)
{
}
UseList Uses()
{
return UseList(gtUses);
}
//--------------------------------------------------------------------------
// Equals: Checks if 2 PHI nodes are equal.
//
// Arguments:
// phi1 - The first PHI node
// phi2 - The second PHI node
//
// Return Value:
// true if the 2 PHI nodes have the same type, number of uses, and the
// uses are equal.
//
// Notes:
// The order of uses must be the same for equality, even if the
// order is not usually relevant and is not guaranteed to reflect
// a particular order of the predecessor blocks.
//
static bool Equals(GenTreePhi* phi1, GenTreePhi* phi2)
{
if (phi1->TypeGet() != phi2->TypeGet())
{
return false;
}
GenTreePhi::UseIterator i1 = phi1->Uses().begin();
GenTreePhi::UseIterator end1 = phi1->Uses().end();
GenTreePhi::UseIterator i2 = phi2->Uses().begin();
GenTreePhi::UseIterator end2 = phi2->Uses().end();
for (; (i1 != end1) && (i2 != end2); ++i1, ++i2)
{
if (!Compare(i1->GetNode(), i2->GetNode()))
{
return false;
}
}
return (i1 == end1) && (i2 == end2);
}
#if DEBUGGABLE_GENTREE
GenTreePhi() : GenTree()
{
}
#endif
};
// Represents a list of fields constituting a struct, when it is passed as an argument.
//
struct GenTreeFieldList : public GenTree
{
class Use
{
GenTree* m_node;
Use* m_next;
uint16_t m_offset;
var_types m_type;
public:
Use(GenTree* node, unsigned offset, var_types type)
: m_node(node), m_next(nullptr), m_offset(static_cast<uint16_t>(offset)), m_type(type)
{
// We can save space on 32 bit hosts by storing the offset as uint16_t. Struct promotion
// only accepts structs which are much smaller than that - 128 bytes = max 4 fields * max
// SIMD vector size (32 bytes).
assert(offset <= UINT16_MAX);
}
GenTree*& NodeRef()
{
return m_node;
}
GenTree* GetNode() const
{
return m_node;
}
void SetNode(GenTree* node)
{
assert(node != nullptr);
m_node = node;
}
Use*& NextRef()
{
return m_next;
}
Use* GetNext() const
{
return m_next;
}
void SetNext(Use* next)
{
m_next = next;
}
unsigned GetOffset() const
{
return m_offset;
}
var_types GetType() const
{
return m_type;
}
void SetType(var_types type)
{
m_type = type;
}
};
class UseIterator
{
Use* use;
public:
UseIterator(Use* use) : use(use)
{
}
Use& operator*()
{
return *use;
}
Use* operator->()
{
return use;
}
void operator++()
{
use = use->GetNext();
}
bool operator==(const UseIterator& other)
{
return use == other.use;
}
bool operator!=(const UseIterator& other)
{
return use != other.use;
}
};
class UseList
{
Use* m_head;
Use* m_tail;
public:
UseList() : m_head(nullptr), m_tail(nullptr)
{
}
Use* GetHead() const
{
return m_head;
}
UseIterator begin() const
{
return m_head;
}
UseIterator end() const
{
return nullptr;
}
void AddUse(Use* newUse)
{
assert(newUse->GetNext() == nullptr);
if (m_head == nullptr)
{
m_head = newUse;
}
else
{
m_tail->SetNext(newUse);
}
m_tail = newUse;
}
void InsertUse(Use* insertAfter, Use* newUse)
{
assert(newUse->GetNext() == nullptr);
newUse->SetNext(insertAfter->GetNext());
insertAfter->SetNext(newUse);
if (m_tail == insertAfter)
{
m_tail = newUse;
}
}
void Reverse()
{
m_tail = m_head;
m_head = nullptr;
for (Use *next, *use = m_tail; use != nullptr; use = next)
{
next = use->GetNext();
use->SetNext(m_head);
m_head = use;
}
}
bool IsSorted() const
{
unsigned offset = 0;
for (GenTreeFieldList::Use& use : *this)
{
if (use.GetOffset() < offset)
{
return false;
}
offset = use.GetOffset();
}
return true;
}
};
private:
UseList m_uses;
public:
GenTreeFieldList() : GenTree(GT_FIELD_LIST, TYP_STRUCT)
{
SetContained();
}
UseList& Uses()
{
return m_uses;
}
// Add a new field use to the end of the use list and update side effect flags.
void AddField(Compiler* compiler, GenTree* node, unsigned offset, var_types type);
// Add a new field use to the end of the use list without updating side effect flags.
void AddFieldLIR(Compiler* compiler, GenTree* node, unsigned offset, var_types type);
// Insert a new field use after the specified use and update side effect flags.
void InsertField(Compiler* compiler, Use* insertAfter, GenTree* node, unsigned offset, var_types type);
// Insert a new field use after the specified use without updating side effect flags.
void InsertFieldLIR(Compiler* compiler, Use* insertAfter, GenTree* node, unsigned offset, var_types type);
//--------------------------------------------------------------------------
// Equals: Check if 2 FIELD_LIST nodes are equal.
//
// Arguments:
// list1 - The first FIELD_LIST node
// list2 - The second FIELD_LIST node
//
// Return Value:
// true if the 2 FIELD_LIST nodes have the same type, number of uses, and the
// uses are equal.
//
static bool Equals(GenTreeFieldList* list1, GenTreeFieldList* list2)
{
assert(list1->TypeGet() == TYP_STRUCT);
assert(list2->TypeGet() == TYP_STRUCT);
UseIterator i1 = list1->Uses().begin();
UseIterator end1 = list1->Uses().end();
UseIterator i2 = list2->Uses().begin();
UseIterator end2 = list2->Uses().end();
for (; (i1 != end1) && (i2 != end2); ++i1, ++i2)
{
if (!Compare(i1->GetNode(), i2->GetNode()) || (i1->GetOffset() != i2->GetOffset()) ||
(i1->GetType() != i2->GetType()))
{
return false;
}
}
return (i1 == end1) && (i2 == end2);
}
};
//------------------------------------------------------------------------
// GenTreeUseEdgeIterator: an iterator that will produce each use edge of a GenTree node in the order in which
// they are used.
//
// Operand iteration is common enough in the back end of the compiler that the implementation of this type has
// traded some simplicity for speed:
// - As much work as is reasonable is done in the constructor rather than during operand iteration
// - Node-specific functionality is handled by a small class of "advance" functions called by operator++
// rather than making operator++ itself handle all nodes
// - Some specialization has been performed for specific node types/shapes (e.g. the advance function for
// binary nodes is specialized based on whether or not the node has the GTF_REVERSE_OPS flag set)
//
// Valid values of this type may be obtained by calling `GenTree::UseEdgesBegin` and `GenTree::UseEdgesEnd`.
//
class GenTreeUseEdgeIterator final
{
friend class GenTreeOperandIterator;
friend GenTreeUseEdgeIterator GenTree::UseEdgesBegin();
friend GenTreeUseEdgeIterator GenTree::UseEdgesEnd();
enum
{
CALL_INSTANCE = 0,
CALL_ARGS = 1,
CALL_LATE_ARGS = 2,
CALL_CONTROL_EXPR = 3,
CALL_COOKIE = 4,
CALL_ADDRESS = 5,
CALL_TERMINAL = 6,
};
typedef void (GenTreeUseEdgeIterator::*AdvanceFn)();
AdvanceFn m_advance;
GenTree* m_node;
GenTree** m_edge;
// Pointer sized state storage, GenTreePhi::Use* or GenTreeCall::Use*
// or the exclusive end/beginning of GenTreeMultiOp's operand array.
void* m_statePtr;
// Integer sized state storage, usually the operand index for non-list based nodes.
int m_state;
GenTreeUseEdgeIterator(GenTree* node);
// Advance functions for special nodes
void AdvanceCmpXchg();
void AdvanceArrElem();
void AdvanceArrOffset();
void AdvanceStoreDynBlk();
void AdvanceFieldList();
void AdvancePhi();
template <bool ReverseOperands>
void AdvanceBinOp();
void SetEntryStateForBinOp();
// The advance function for call nodes
template <int state>
void AdvanceCall();
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
void AdvanceMultiOp();
void AdvanceReversedMultiOp();
void SetEntryStateForMultiOp();
#endif
void Terminate();
public:
GenTreeUseEdgeIterator();
inline GenTree** operator*()
{
assert(m_state != -1);
return m_edge;
}
inline GenTree** operator->()
{
assert(m_state != -1);
return m_edge;
}
inline bool operator==(const GenTreeUseEdgeIterator& other) const
{
if (m_state == -1 || other.m_state == -1)
{
return m_state == other.m_state;
}
return (m_node == other.m_node) && (m_edge == other.m_edge) && (m_statePtr == other.m_statePtr) &&
(m_state == other.m_state);
}
inline bool operator!=(const GenTreeUseEdgeIterator& other) const
{
return !(operator==(other));
}
GenTreeUseEdgeIterator& operator++();
};
//------------------------------------------------------------------------
// GenTreeOperandIterator: an iterator that will produce each operand of a
// GenTree node in the order in which they are
// used. This uses `GenTreeUseEdgeIterator` under
// the covers.
//
// Note: valid values of this type may be obtained by calling
// `GenTree::OperandsBegin` and `GenTree::OperandsEnd`.
class GenTreeOperandIterator final
{
friend GenTreeOperandIterator GenTree::OperandsBegin();
friend GenTreeOperandIterator GenTree::OperandsEnd();
GenTreeUseEdgeIterator m_useEdges;
GenTreeOperandIterator(GenTree* node) : m_useEdges(node)
{
}
public:
GenTreeOperandIterator() : m_useEdges()
{
}
inline GenTree* operator*()
{
return *(*m_useEdges);
}
inline GenTree* operator->()
{
return *(*m_useEdges);
}
inline bool operator==(const GenTreeOperandIterator& other) const
{
return m_useEdges == other.m_useEdges;
}
inline bool operator!=(const GenTreeOperandIterator& other) const
{
return !(operator==(other));
}
inline GenTreeOperandIterator& operator++()
{
++m_useEdges;
return *this;
}
};
/*****************************************************************************/
// In the current design, we never instantiate GenTreeUnOp: it exists only to be
// used as a base class. For unary operators, we instantiate GenTreeOp, with a NULL second
// argument. We check that this is true dynamically. We could tighten this and get static
// checking, but that would entail accessing the first child of a unary operator via something
// like gtUnOp.gtOp1 instead of AsOp()->gtOp1.
struct GenTreeUnOp : public GenTree
{
GenTree* gtOp1;
protected:
GenTreeUnOp(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false))
: GenTree(oper, type DEBUGARG(largeNode)), gtOp1(nullptr)
{
}
GenTreeUnOp(genTreeOps oper, var_types type, GenTree* op1 DEBUGARG(bool largeNode = false))
: GenTree(oper, type DEBUGARG(largeNode)), gtOp1(op1)
{
assert(op1 != nullptr || NullOp1Legal());
if (op1 != nullptr)
{ // Propagate effects flags from child.
gtFlags |= op1->gtFlags & GTF_ALL_EFFECT;
}
}
#if DEBUGGABLE_GENTREE
GenTreeUnOp() : GenTree(), gtOp1(nullptr)
{
}
#endif
};
struct GenTreeOp : public GenTreeUnOp
{
GenTree* gtOp2;
GenTreeOp(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2 DEBUGARG(bool largeNode = false))
: GenTreeUnOp(oper, type, op1 DEBUGARG(largeNode)), gtOp2(op2)
{
// comparisons are always integral types
assert(!GenTree::OperIsCompare(oper) || varTypeIsIntegral(type));
// Binary operators, with a few exceptions, require a non-nullptr
// second argument.
assert(op2 != nullptr || NullOp2Legal());
// Unary operators, on the other hand, require a null second argument.
assert(!OperIsUnary(oper) || op2 == nullptr);
// Propagate effects flags from child. (UnOp handled this for first child.)
if (op2 != nullptr)
{
gtFlags |= op2->gtFlags & GTF_ALL_EFFECT;
}
}
// A small set of types are unary operators with optional arguments. We use
// this constructor to build those.
GenTreeOp(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false))
: GenTreeUnOp(oper, type DEBUGARG(largeNode)), gtOp2(nullptr)
{
// Unary operators with optional arguments:
assert(oper == GT_NOP || oper == GT_RETURN || oper == GT_RETFILT || OperIsBlk(oper));
}
// returns true if we will use the division by constant optimization for this node.
bool UsesDivideByConstOptimized(Compiler* comp);
// checks if we will use the division by constant optimization this node
// then sets the flag GTF_DIV_BY_CNS_OPT and GTF_DONT_CSE on the constant
void CheckDivideByConstOptimized(Compiler* comp);
// True if this node is marked as using the division by constant optimization
bool MarkedDivideByConstOptimized() const
{
return (gtFlags & GTF_DIV_BY_CNS_OPT) != 0;
}
#if !defined(TARGET_64BIT) || defined(TARGET_ARM64)
bool IsValidLongMul();
#endif
#if !defined(TARGET_64BIT) && defined(DEBUG)
void DebugCheckLongMul();
#endif
#if DEBUGGABLE_GENTREE
GenTreeOp() : GenTreeUnOp(), gtOp2(nullptr)
{
}
#endif
// True if this relop is marked for a transform during the emitter
// phase, e.g., jge => jns
bool MarkedForSignJumpOpt() const
{
return (gtFlags & GTF_RELOP_SJUMP_OPT) != 0;
}
};
struct GenTreeVal : public GenTree
{
size_t gtVal1;
GenTreeVal(genTreeOps oper, var_types type, ssize_t val) : GenTree(oper, type), gtVal1(val)
{
}
#if DEBUGGABLE_GENTREE
GenTreeVal() : GenTree()
{
}
#endif
};
struct GenTreeIntConCommon : public GenTree
{
inline INT64 LngValue() const;
inline void SetLngValue(INT64 val);
inline ssize_t IconValue() const;
inline void SetIconValue(ssize_t val);
inline INT64 IntegralValue() const;
inline void SetIntegralValue(int64_t value);
template <typename T>
inline void SetValueTruncating(T value);
GenTreeIntConCommon(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false))
: GenTree(oper, type DEBUGARG(largeNode))
{
}
bool FitsInI8() // IconValue() fits into 8-bit signed storage
{
return FitsInI8(IconValue());
}
static bool FitsInI8(ssize_t val) // Constant fits into 8-bit signed storage
{
return (int8_t)val == val;
}
bool FitsInI32() // IconValue() fits into 32-bit signed storage
{
return FitsInI32(IconValue());
}
static bool FitsInI32(ssize_t val) // Constant fits into 32-bit signed storage
{
#ifdef TARGET_64BIT
return (int32_t)val == val;
#else
return true;
#endif
}
bool ImmedValNeedsReloc(Compiler* comp);
bool ImmedValCanBeFolded(Compiler* comp, genTreeOps op);
#ifdef TARGET_XARCH
bool FitsInAddrBase(Compiler* comp);
bool AddrNeedsReloc(Compiler* comp);
#endif
#if DEBUGGABLE_GENTREE
GenTreeIntConCommon() : GenTree()
{
}
#endif
};
// node representing a read from a physical register
struct GenTreePhysReg : public GenTree
{
// physregs need a field beyond GetRegNum() because
// GetRegNum() indicates the destination (and can be changed)
// whereas reg indicates the source
regNumber gtSrcReg;
GenTreePhysReg(regNumber r, var_types type = TYP_I_IMPL) : GenTree(GT_PHYSREG, type), gtSrcReg(r)
{
}
#if DEBUGGABLE_GENTREE
GenTreePhysReg() : GenTree()
{
}
#endif
};
/* gtIntCon -- integer constant (GT_CNS_INT) */
struct GenTreeIntCon : public GenTreeIntConCommon
{
/*
* This is the GT_CNS_INT struct definition.
* It's used to hold for both int constants and pointer handle constants.
* For the 64-bit targets we will only use GT_CNS_INT as it used to represent all the possible sizes
* For the 32-bit targets we use a GT_CNS_LNG to hold a 64-bit integer constant and GT_CNS_INT for all others.
* In the future when we retarget the JIT for x86 we should consider eliminating GT_CNS_LNG
*/
ssize_t gtIconVal; // Must overlap and have the same offset with the gtIconVal field in GenTreeLngCon below.
/* The InitializeArray intrinsic needs to go back to the newarray statement
to find the class handle of the array so that we can get its size. However,
in ngen mode, the handle in that statement does not correspond to the compile
time handle (rather it lets you get a handle at run-time). In that case, we also
need to store a compile time handle, which goes in this gtCompileTimeHandle field.
*/
ssize_t gtCompileTimeHandle;
// TODO-Cleanup: It's not clear what characterizes the cases where the field
// above is used. It may be that its uses and those of the "gtFieldSeq" field below
// are mutually exclusive, and they could be put in a union. Or else we should separate
// this type into three subtypes.
// If this constant represents the offset of one or more fields, "gtFieldSeq" represents that
// sequence of fields.
FieldSeqNode* gtFieldSeq;
#ifdef DEBUG
// If the value represents target address, holds the method handle to that target which is used
// to fetch target method name and display in the disassembled code.
size_t gtTargetHandle = 0;
#endif
GenTreeIntCon(var_types type, ssize_t value DEBUGARG(bool largeNode = false))
: GenTreeIntConCommon(GT_CNS_INT, type DEBUGARG(largeNode))
, gtIconVal(value)
, gtCompileTimeHandle(0)
, gtFieldSeq(FieldSeqStore::NotAField())
{
}
GenTreeIntCon(var_types type, ssize_t value, FieldSeqNode* fields DEBUGARG(bool largeNode = false))
: GenTreeIntConCommon(GT_CNS_INT, type DEBUGARG(largeNode))
, gtIconVal(value)
, gtCompileTimeHandle(0)
, gtFieldSeq(fields)
{
assert(fields != nullptr);
}
void FixupInitBlkValue(var_types asgType);
#if DEBUGGABLE_GENTREE
GenTreeIntCon() : GenTreeIntConCommon()
{
}
#endif
};
/* gtLngCon -- long constant (GT_CNS_LNG) */
struct GenTreeLngCon : public GenTreeIntConCommon
{
INT64 gtLconVal; // Must overlap and have the same offset with the gtIconVal field in GenTreeIntCon above.
INT32 LoVal()
{
return (INT32)(gtLconVal & 0xffffffff);
}
INT32 HiVal()
{
return (INT32)(gtLconVal >> 32);
}
GenTreeLngCon(INT64 val) : GenTreeIntConCommon(GT_CNS_NATIVELONG, TYP_LONG)
{
SetLngValue(val);
}
#if DEBUGGABLE_GENTREE
GenTreeLngCon() : GenTreeIntConCommon()
{
}
#endif
};
inline INT64 GenTreeIntConCommon::LngValue() const
{
#ifndef TARGET_64BIT
assert(gtOper == GT_CNS_LNG);
return AsLngCon()->gtLconVal;
#else
return IconValue();
#endif
}
inline void GenTreeIntConCommon::SetLngValue(INT64 val)
{
#ifndef TARGET_64BIT
assert(gtOper == GT_CNS_LNG);
AsLngCon()->gtLconVal = val;
#else
// Compile time asserts that these two fields overlap and have the same offsets: gtIconVal and gtLconVal
C_ASSERT(offsetof(GenTreeLngCon, gtLconVal) == offsetof(GenTreeIntCon, gtIconVal));
C_ASSERT(sizeof(AsLngCon()->gtLconVal) == sizeof(AsIntCon()->gtIconVal));
SetIconValue(ssize_t(val));
#endif
}
inline ssize_t GenTreeIntConCommon::IconValue() const
{
assert(gtOper == GT_CNS_INT); // We should never see a GT_CNS_LNG for a 64-bit target!
return AsIntCon()->gtIconVal;
}
inline void GenTreeIntConCommon::SetIconValue(ssize_t val)
{
assert(gtOper == GT_CNS_INT); // We should never see a GT_CNS_LNG for a 64-bit target!
AsIntCon()->gtIconVal = val;
}
inline INT64 GenTreeIntConCommon::IntegralValue() const
{
#ifdef TARGET_64BIT
return LngValue();
#else
return gtOper == GT_CNS_LNG ? LngValue() : (INT64)IconValue();
#endif // TARGET_64BIT
}
inline void GenTreeIntConCommon::SetIntegralValue(int64_t value)
{
#ifdef TARGET_64BIT
SetIconValue(value);
#else
if (OperIs(GT_CNS_LNG))
{
SetLngValue(value);
}
else
{
assert(FitsIn<int32_t>(value));
SetIconValue(static_cast<int32_t>(value));
}
#endif // TARGET_64BIT
}
//------------------------------------------------------------------------
// SetValueTruncating: Set the value, truncating to TYP_INT if necessary.
//
// The function will truncate the supplied value to a 32 bit signed
// integer if the node's type is not TYP_LONG, otherwise setting it
// as-is. Note that this function intentionally does not check for
// small types (such nodes are created in lowering) for TP reasons.
//
// This function is intended to be used where its truncating behavior is
// desirable. One example is folding of ADD(CNS_INT, CNS_INT) performed in
// wider integers, which is typical when compiling on 64 bit hosts, as
// most aritmetic is done in ssize_t's aka int64_t's in that case, while
// the node itself can be of a narrower type.
//
// Arguments:
// value - Value to set, truncating to TYP_INT if the node is not of TYP_LONG
//
// Notes:
// This function is templated so that it works well with compiler warnings of
// the form "Operation may overflow before being assigned to a wider type", in
// case "value" is of type ssize_t, which is common.
//
template <typename T>
inline void GenTreeIntConCommon::SetValueTruncating(T value)
{
static_assert_no_msg((std::is_same<T, int32_t>::value || std::is_same<T, int64_t>::value));
if (TypeIs(TYP_LONG))
{
SetLngValue(value);
}
else
{
SetIconValue(static_cast<int32_t>(value));
}
}
/* gtDblCon -- double constant (GT_CNS_DBL) */
struct GenTreeDblCon : public GenTree
{
double gtDconVal;
bool isBitwiseEqual(GenTreeDblCon* other)
{
unsigned __int64 bits = *(unsigned __int64*)(>DconVal);
unsigned __int64 otherBits = *(unsigned __int64*)(&(other->gtDconVal));
return (bits == otherBits);
}
GenTreeDblCon(double val, var_types type = TYP_DOUBLE) : GenTree(GT_CNS_DBL, type), gtDconVal(val)
{
assert(varTypeIsFloating(type));
}
#if DEBUGGABLE_GENTREE
GenTreeDblCon() : GenTree()
{
}
#endif
};
/* gtStrCon -- string constant (GT_CNS_STR) */
#define EMPTY_STRING_SCON (unsigned)-1
struct GenTreeStrCon : public GenTree
{
unsigned gtSconCPX;
CORINFO_MODULE_HANDLE gtScpHnd;
// Returns true if this GT_CNS_STR was imported for String.Empty field
bool IsStringEmptyField()
{
return gtSconCPX == EMPTY_STRING_SCON && gtScpHnd == nullptr;
}
// Because this node can come from an inlined method we need to
// have the scope handle, since it will become a helper call.
GenTreeStrCon(unsigned sconCPX, CORINFO_MODULE_HANDLE mod DEBUGARG(bool largeNode = false))
: GenTree(GT_CNS_STR, TYP_REF DEBUGARG(largeNode)), gtSconCPX(sconCPX), gtScpHnd(mod)
{
}
#if DEBUGGABLE_GENTREE
GenTreeStrCon() : GenTree()
{
}
#endif
};
// Common supertype of LCL_VAR, LCL_FLD, REG_VAR, PHI_ARG
// This inherits from UnOp because lclvar stores are Unops
struct GenTreeLclVarCommon : public GenTreeUnOp
{
private:
unsigned _gtLclNum; // The local number. An index into the Compiler::lvaTable array.
unsigned _gtSsaNum; // The SSA number.
public:
GenTreeLclVarCommon(genTreeOps oper, var_types type, unsigned lclNum DEBUGARG(bool largeNode = false))
: GenTreeUnOp(oper, type DEBUGARG(largeNode))
{
SetLclNum(lclNum);
}
unsigned GetLclNum() const
{
return _gtLclNum;
}
void SetLclNum(unsigned lclNum)
{
_gtLclNum = lclNum;
_gtSsaNum = SsaConfig::RESERVED_SSA_NUM;
}
uint16_t GetLclOffs() const;
unsigned GetSsaNum() const
{
return _gtSsaNum;
}
void SetSsaNum(unsigned ssaNum)
{
_gtSsaNum = ssaNum;
}
bool HasSsaName()
{
return (GetSsaNum() != SsaConfig::RESERVED_SSA_NUM);
}
#if DEBUGGABLE_GENTREE
GenTreeLclVarCommon() : GenTreeUnOp()
{
}
#endif
};
//------------------------------------------------------------------------
// MultiRegSpillFlags
//
// GTF_SPILL or GTF_SPILLED flag on a multi-reg node indicates that one or
// more of its result regs are in that state. The spill flags of each register
// are stored here. We only need 2 bits per returned register,
// so this is treated as a 2-bit array. No architecture needs more than 8 bits.
//
typedef unsigned char MultiRegSpillFlags;
static const unsigned PACKED_GTF_SPILL = 1;
static const unsigned PACKED_GTF_SPILLED = 2;
//----------------------------------------------------------------------
// GetMultiRegSpillFlagsByIdx: get spill flag associated with the return register
// specified by its index.
//
// Arguments:
// idx - Position or index of the return register
//
// Return Value:
// Returns GTF_* flags associated with the register. Only GTF_SPILL and GTF_SPILLED are considered.
//
inline GenTreeFlags GetMultiRegSpillFlagsByIdx(MultiRegSpillFlags flags, unsigned idx)
{
static_assert_no_msg(MAX_RET_REG_COUNT * 2 <= sizeof(unsigned char) * BITS_PER_BYTE);
assert(idx < MAX_RET_REG_COUNT);
unsigned bits = flags >> (idx * 2); // It doesn't matter that we possibly leave other high bits here.
GenTreeFlags spillFlags = GTF_EMPTY;
if (bits & PACKED_GTF_SPILL)
{
spillFlags |= GTF_SPILL;
}
if (bits & PACKED_GTF_SPILLED)
{
spillFlags |= GTF_SPILLED;
}
return spillFlags;
}
//----------------------------------------------------------------------
// SetMultiRegSpillFlagsByIdx: set spill flags for the register specified by its index.
//
// Arguments:
// oldFlags - The current value of the MultiRegSpillFlags for a node.
// flagsToSet - GTF_* flags. Only GTF_SPILL and GTF_SPILLED are allowed.
// Note that these are the flags used on non-multireg nodes,
// and this method adds the appropriate flags to the
// incoming MultiRegSpillFlags and returns it.
// idx - Position or index of the register
//
// Return Value:
// The new value for the node's MultiRegSpillFlags.
//
inline MultiRegSpillFlags SetMultiRegSpillFlagsByIdx(MultiRegSpillFlags oldFlags, GenTreeFlags flagsToSet, unsigned idx)
{
static_assert_no_msg(MAX_RET_REG_COUNT * 2 <= sizeof(unsigned char) * BITS_PER_BYTE);
assert(idx < MAX_RET_REG_COUNT);
MultiRegSpillFlags newFlags = oldFlags;
unsigned bits = 0;
if (flagsToSet & GTF_SPILL)
{
bits |= PACKED_GTF_SPILL;
}
if (flagsToSet & GTF_SPILLED)
{
bits |= PACKED_GTF_SPILLED;
}
const unsigned char packedFlags = PACKED_GTF_SPILL | PACKED_GTF_SPILLED;
// Clear anything that was already there by masking out the bits before 'or'ing in what we want there.
newFlags = (unsigned char)((newFlags & ~(packedFlags << (idx * 2))) | (bits << (idx * 2)));
return newFlags;
}
// gtLclVar -- load/store/addr of local variable
struct GenTreeLclVar : public GenTreeLclVarCommon
{
private:
regNumberSmall gtOtherReg[MAX_MULTIREG_COUNT - 1];
MultiRegSpillFlags gtSpillFlags;
public:
INDEBUG(IL_OFFSET gtLclILoffs;) // instr offset of ref (only for JIT dumps)
// Multireg support
bool IsMultiReg() const
{
return ((gtFlags & GTF_VAR_MULTIREG) != 0);
}
void ClearMultiReg()
{
gtFlags &= ~GTF_VAR_MULTIREG;
}
void SetMultiReg()
{
gtFlags |= GTF_VAR_MULTIREG;
ClearOtherRegFlags();
}
regNumber GetRegNumByIdx(int regIndex) const
{
assert(regIndex < MAX_MULTIREG_COUNT);
return (regIndex == 0) ? GetRegNum() : (regNumber)gtOtherReg[regIndex - 1];
}
void SetRegNumByIdx(regNumber reg, int regIndex)
{
assert(regIndex < MAX_MULTIREG_COUNT);
if (regIndex == 0)
{
SetRegNum(reg);
}
else
{
gtOtherReg[regIndex - 1] = regNumberSmall(reg);
}
}
GenTreeFlags GetRegSpillFlagByIdx(unsigned idx) const
{
return GetMultiRegSpillFlagsByIdx(gtSpillFlags, idx);
}
void SetRegSpillFlagByIdx(GenTreeFlags flags, unsigned idx)
{
gtSpillFlags = SetMultiRegSpillFlagsByIdx(gtSpillFlags, flags, idx);
}
unsigned int GetFieldCount(Compiler* compiler) const;
var_types GetFieldTypeByIndex(Compiler* compiler, unsigned idx);
//-------------------------------------------------------------------
// clearOtherRegFlags: clear GTF_* flags associated with gtOtherRegs
//
// Arguments:
// None
//
// Return Value:
// None
void ClearOtherRegFlags()
{
gtSpillFlags = 0;
}
//-------------------------------------------------------------------------
// CopyOtherRegFlags: copy GTF_* flags associated with gtOtherRegs from
// the given LclVar node.
//
// Arguments:
// fromCall - GenTreeLclVar node from which to copy
//
// Return Value:
// None
//
void CopyOtherRegFlags(GenTreeLclVar* from)
{
this->gtSpillFlags = from->gtSpillFlags;
}
GenTreeLclVar(genTreeOps oper,
var_types type,
unsigned lclNum DEBUGARG(IL_OFFSET ilOffs = BAD_IL_OFFSET) DEBUGARG(bool largeNode = false))
: GenTreeLclVarCommon(oper, type, lclNum DEBUGARG(largeNode)) DEBUGARG(gtLclILoffs(ilOffs))
{
assert(OperIsLocal(oper) || OperIsLocalAddr(oper));
}
#if DEBUGGABLE_GENTREE
GenTreeLclVar() : GenTreeLclVarCommon()
{
}
#endif
};
// gtLclFld -- load/store/addr of local variable field
struct GenTreeLclFld : public GenTreeLclVarCommon
{
private:
uint16_t m_lclOffs; // offset into the variable to access
FieldSeqNode* m_fieldSeq; // This LclFld node represents some sequences of accesses.
public:
GenTreeLclFld(genTreeOps oper, var_types type, unsigned lclNum, unsigned lclOffs)
: GenTreeLclVarCommon(oper, type, lclNum), m_lclOffs(static_cast<uint16_t>(lclOffs)), m_fieldSeq(nullptr)
{
assert(lclOffs <= UINT16_MAX);
}
uint16_t GetLclOffs() const
{
return m_lclOffs;
}
void SetLclOffs(unsigned lclOffs)
{
assert(lclOffs <= UINT16_MAX);
m_lclOffs = static_cast<uint16_t>(lclOffs);
}
FieldSeqNode* GetFieldSeq() const
{
return m_fieldSeq;
}
void SetFieldSeq(FieldSeqNode* fieldSeq)
{
m_fieldSeq = fieldSeq;
}
#ifdef TARGET_ARM
bool IsOffsetMisaligned() const;
#endif // TARGET_ARM
#if DEBUGGABLE_GENTREE
GenTreeLclFld() : GenTreeLclVarCommon()
{
}
#endif
};
// GenTreeCast - conversion to a different type (GT_CAST).
//
// This node represents all "conv[.ovf].{type}[.un]" IL opcodes.
//
// There are four semantically significant values that determine what it does:
//
// 1) "genActualType(CastOp())" - the type being cast from.
// 2) "gtCastType" - the type being cast to.
// 3) "IsUnsigned" (the "GTF_UNSIGNED" flag) - whether the cast is "unsigned".
// 4) "gtOverflow" (the "GTF_OVERFLOW" flag) - whether the cast is checked.
//
// Different "kinds" of casts use these values differently; not all are always
// meaningful or legal:
//
// 1) For casts from FP types, "IsUnsigned" will always be "false".
// 2) Checked casts use "IsUnsigned" to represent the fact the type being cast
// from is unsigned. The target type's signedness is similarly significant.
// 3) For unchecked casts, "IsUnsigned" is significant for "int -> long", where
// it decides whether the cast sign- or zero-extends its source, and "integer
// -> FP" cases. For all other unchecked casts, "IsUnsigned" is meaningless.
// 4) For unchecked casts, signedness of the target type is only meaningful if
// the cast is to an FP or small type. In the latter case (and everywhere
// else in IR) it decided whether the value will be sign- or zero-extended.
//
// For additional context on "GT_CAST"'s semantics, see "IntegralRange::ForCast"
// methods and "GenIntCastDesc"'s constructor.
//
struct GenTreeCast : public GenTreeOp
{
GenTree*& CastOp()
{
return gtOp1;
}
var_types gtCastType;
GenTreeCast(var_types type, GenTree* op, bool fromUnsigned, var_types castType DEBUGARG(bool largeNode = false))
: GenTreeOp(GT_CAST, type, op, nullptr DEBUGARG(largeNode)), gtCastType(castType)
{
// We do not allow casts from floating point types to be treated as from
// unsigned to avoid bugs related to wrong GTF_UNSIGNED in case the
// CastOp's type changes.
assert(!varTypeIsFloating(op) || !fromUnsigned);
gtFlags |= fromUnsigned ? GTF_UNSIGNED : GTF_EMPTY;
}
#if DEBUGGABLE_GENTREE
GenTreeCast() : GenTreeOp()
{
}
#endif
};
// GT_BOX nodes are place markers for boxed values. The "real" tree
// for most purposes is in gtBoxOp.
struct GenTreeBox : public GenTreeUnOp
{
// An expanded helper call to implement the "box" if we don't get
// rid of it any other way. Must be in same position as op1.
GenTree*& BoxOp()
{
return gtOp1;
}
// This is the statement that contains the assignment tree when the node is an inlined GT_BOX on a value
// type
Statement* gtAsgStmtWhenInlinedBoxValue;
// And this is the statement that copies from the value being boxed to the box payload
Statement* gtCopyStmtWhenInlinedBoxValue;
GenTreeBox(var_types type,
GenTree* boxOp,
Statement* asgStmtWhenInlinedBoxValue,
Statement* copyStmtWhenInlinedBoxValue)
: GenTreeUnOp(GT_BOX, type, boxOp)
, gtAsgStmtWhenInlinedBoxValue(asgStmtWhenInlinedBoxValue)
, gtCopyStmtWhenInlinedBoxValue(copyStmtWhenInlinedBoxValue)
{
}
#if DEBUGGABLE_GENTREE
GenTreeBox() : GenTreeUnOp()
{
}
#endif
};
// GenTreeField -- data member ref (GT_FIELD)
struct GenTreeField : public GenTreeUnOp
{
CORINFO_FIELD_HANDLE gtFldHnd;
DWORD gtFldOffset;
bool gtFldMayOverlap;
#ifdef FEATURE_READYTORUN
CORINFO_CONST_LOOKUP gtFieldLookup;
#endif
GenTreeField(var_types type, GenTree* obj, CORINFO_FIELD_HANDLE fldHnd, DWORD offs)
: GenTreeUnOp(GT_FIELD, type, obj), gtFldHnd(fldHnd), gtFldOffset(offs), gtFldMayOverlap(false)
{
#ifdef FEATURE_READYTORUN
gtFieldLookup.addr = nullptr;
#endif
}
#if DEBUGGABLE_GENTREE
GenTreeField() : GenTreeUnOp()
{
}
#endif
// The object this field belongs to. Will be "nullptr" for static fields.
// Note that this is an address, i. e. for struct fields it will be ADDR(STRUCT).
GenTree* GetFldObj() const
{
return gtOp1;
}
// True if this field is a volatile memory operation.
bool IsVolatile() const
{
return (gtFlags & GTF_FLD_VOLATILE) != 0;
}
};
// There was quite a bit of confusion in the code base about which of gtOp1 and gtOp2 was the
// 'then' and 'else' clause of a colon node. Adding these accessors, while not enforcing anything,
// at least *allows* the programmer to be obviously correct.
// However, these conventions seem backward.
// TODO-Cleanup: If we could get these accessors used everywhere, then we could switch them.
struct GenTreeColon : public GenTreeOp
{
GenTree*& ThenNode()
{
return gtOp2;
}
GenTree*& ElseNode()
{
return gtOp1;
}
#if DEBUGGABLE_GENTREE
GenTreeColon() : GenTreeOp()
{
}
#endif
GenTreeColon(var_types typ, GenTree* thenNode, GenTree* elseNode) : GenTreeOp(GT_COLON, typ, elseNode, thenNode)
{
}
};
// gtCall -- method call (GT_CALL)
enum class InlineObservation;
//------------------------------------------------------------------------
// GenTreeCallFlags: a bitmask of flags for GenTreeCall stored in gtCallMoreFlags.
//
// clang-format off
enum GenTreeCallFlags : unsigned int
{
GTF_CALL_M_EMPTY = 0,
GTF_CALL_M_EXPLICIT_TAILCALL = 0x00000001, // the call is "tail" prefixed and importer has performed tail call checks
GTF_CALL_M_TAILCALL = 0x00000002, // the call is a tailcall
GTF_CALL_M_VARARGS = 0x00000004, // the call uses varargs ABI
GTF_CALL_M_RETBUFFARG = 0x00000008, // call has a return buffer argument
GTF_CALL_M_DELEGATE_INV = 0x00000010, // call to Delegate.Invoke
GTF_CALL_M_NOGCCHECK = 0x00000020, // not a call for computing full interruptability and therefore no GC check is required.
GTF_CALL_M_SPECIAL_INTRINSIC = 0x00000040, // function that could be optimized as an intrinsic
// in special cases. Used to optimize fast way out in morphing
GTF_CALL_M_UNMGD_THISCALL = 0x00000080, // "this" pointer (first argument) should be enregistered (only for GTF_CALL_UNMANAGED)
GTF_CALL_M_VIRTSTUB_REL_INDIRECT = 0x00000080, // the virtstub is indirected through a relative address (only for GTF_CALL_VIRT_STUB)
GTF_CALL_M_NONVIRT_SAME_THIS = 0x00000080, // callee "this" pointer is equal to caller this pointer (only for GTF_CALL_NONVIRT)
GTF_CALL_M_FRAME_VAR_DEATH = 0x00000100, // the compLvFrameListRoot variable dies here (last use)
GTF_CALL_M_TAILCALL_VIA_JIT_HELPER = 0x00000200, // call is a tail call dispatched via tail call JIT helper.
#if FEATURE_TAILCALL_OPT
GTF_CALL_M_IMPLICIT_TAILCALL = 0x00000400, // call is an opportunistic tail call and importer has performed tail call checks
GTF_CALL_M_TAILCALL_TO_LOOP = 0x00000800, // call is a fast recursive tail call that can be converted into a loop
#endif
GTF_CALL_M_PINVOKE = 0x00001000, // call is a pinvoke. This mirrors VM flag CORINFO_FLG_PINVOKE.
// A call marked as Pinvoke is not necessarily a GT_CALL_UNMANAGED. For e.g.
// an IL Stub dynamically generated for a PInvoke declaration is flagged as
// a Pinvoke but not as an unmanaged call. See impCheckForPInvokeCall() to
// know when these flags are set.
GTF_CALL_M_R2R_REL_INDIRECT = 0x00002000, // ready to run call is indirected through a relative address
GTF_CALL_M_DOES_NOT_RETURN = 0x00004000, // call does not return
GTF_CALL_M_WRAPPER_DELEGATE_INV = 0x00008000, // call is in wrapper delegate
GTF_CALL_M_FAT_POINTER_CHECK = 0x00010000, // CoreRT managed calli needs transformation, that checks
// special bit in calli address. If it is set, then it is necessary
// to restore real function address and load hidden argument
// as the first argument for calli. It is CoreRT replacement for instantiating
// stubs, because executable code cannot be generated at runtime.
GTF_CALL_M_HELPER_SPECIAL_DCE = 0x00020000, // this helper call can be removed if it is part of a comma and
// the comma result is unused.
GTF_CALL_M_DEVIRTUALIZED = 0x00040000, // this call was devirtualized
GTF_CALL_M_UNBOXED = 0x00080000, // this call was optimized to use the unboxed entry point
GTF_CALL_M_GUARDED_DEVIRT = 0x00100000, // this call is a candidate for guarded devirtualization
GTF_CALL_M_GUARDED_DEVIRT_CHAIN = 0x00200000, // this call is a candidate for chained guarded devirtualization
GTF_CALL_M_GUARDED = 0x00400000, // this call was transformed by guarded devirtualization
GTF_CALL_M_ALLOC_SIDE_EFFECTS = 0x00800000, // this is a call to an allocator with side effects
GTF_CALL_M_SUPPRESS_GC_TRANSITION = 0x01000000, // suppress the GC transition (i.e. during a pinvoke) but a separate GC safe point is required.
GTF_CALL_M_EXP_RUNTIME_LOOKUP = 0x02000000, // this call needs to be tranformed into CFG for the dynamic dictionary expansion feature.
GTF_CALL_M_STRESS_TAILCALL = 0x04000000, // the call is NOT "tail" prefixed but GTF_CALL_M_EXPLICIT_TAILCALL was added because of tail call stress mode
GTF_CALL_M_EXPANDED_EARLY = 0x08000000, // the Virtual Call target address is expanded and placed in gtControlExpr in Morph rather than in Lower
GTF_CALL_M_LATE_DEVIRT = 0x10000000, // this call has late devirtualzation info
};
inline constexpr GenTreeCallFlags operator ~(GenTreeCallFlags a)
{
return (GenTreeCallFlags)(~(unsigned int)a);
}
inline constexpr GenTreeCallFlags operator |(GenTreeCallFlags a, GenTreeCallFlags b)
{
return (GenTreeCallFlags)((unsigned int)a | (unsigned int)b);
}
inline constexpr GenTreeCallFlags operator &(GenTreeCallFlags a, GenTreeCallFlags b)
{
return (GenTreeCallFlags)((unsigned int)a & (unsigned int)b);
}
inline GenTreeCallFlags& operator |=(GenTreeCallFlags& a, GenTreeCallFlags b)
{
return a = (GenTreeCallFlags)((unsigned int)a | (unsigned int)b);
}
inline GenTreeCallFlags& operator &=(GenTreeCallFlags& a, GenTreeCallFlags b)
{
return a = (GenTreeCallFlags)((unsigned int)a & (unsigned int)b);
}
// clang-format on
// Return type descriptor of a GT_CALL node.
// x64 Unix, Arm64, Arm32 and x86 allow a value to be returned in multiple
// registers. For such calls this struct provides the following info
// on their return type
// - type of value returned in each return register
// - ABI return register numbers in which the value is returned
// - count of return registers in which the value is returned
//
// TODO-ARM: Update this to meet the needs of Arm64 and Arm32
//
// TODO-AllArch: Right now it is used for describing multi-reg returned types.
// Eventually we would want to use it for describing even single-reg
// returned types (e.g. structs returned in single register x64/arm).
// This would allow us not to lie or normalize single struct return
// values in importer/morph.
struct ReturnTypeDesc
{
private:
var_types m_regType[MAX_RET_REG_COUNT];
bool m_isEnclosingType;
#ifdef DEBUG
bool m_inited;
#endif
public:
ReturnTypeDesc()
{
Reset();
}
// Initialize the Return Type Descriptor for a method that returns a struct type
void InitializeStructReturnType(Compiler* comp, CORINFO_CLASS_HANDLE retClsHnd, CorInfoCallConvExtension callConv);
// Initialize the Return Type Descriptor for a method that returns a TYP_LONG
// Only needed for X86 and arm32.
void InitializeLongReturnType();
// Reset type descriptor to defaults
void Reset()
{
for (unsigned i = 0; i < MAX_RET_REG_COUNT; ++i)
{
m_regType[i] = TYP_UNKNOWN;
}
m_isEnclosingType = false;
#ifdef DEBUG
m_inited = false;
#endif
}
#ifdef DEBUG
// NOTE: we only use this function when writing out IR dumps. These dumps may take place before the ReturnTypeDesc
// has been initialized.
unsigned TryGetReturnRegCount() const
{
return m_inited ? GetReturnRegCount() : 0;
}
#endif // DEBUG
//--------------------------------------------------------------------------------------------
// GetReturnRegCount: Get the count of return registers in which the return value is returned.
//
// Arguments:
// None
//
// Return Value:
// Count of return registers.
// Returns 0 if the return type is not returned in registers.
unsigned GetReturnRegCount() const
{
assert(m_inited);
int regCount = 0;
for (unsigned i = 0; i < MAX_RET_REG_COUNT; ++i)
{
if (m_regType[i] == TYP_UNKNOWN)
{
break;
}
// otherwise
regCount++;
}
#ifdef DEBUG
// Any remaining elements in m_regTypes[] should also be TYP_UNKNOWN
for (unsigned i = regCount + 1; i < MAX_RET_REG_COUNT; ++i)
{
assert(m_regType[i] == TYP_UNKNOWN);
}
#endif
return regCount;
}
//-----------------------------------------------------------------------
// IsMultiRegRetType: check whether the type is returned in multiple
// return registers.
//
// Arguments:
// None
//
// Return Value:
// Returns true if the type is returned in multiple return registers.
// False otherwise.
// Note that we only have to examine the first two values to determine this
//
bool IsMultiRegRetType() const
{
if (MAX_RET_REG_COUNT < 2)
{
return false;
}
else
{
assert(m_inited);
return ((m_regType[0] != TYP_UNKNOWN) && (m_regType[1] != TYP_UNKNOWN));
}
}
//--------------------------------------------------------------------------
// GetReturnRegType: Get var_type of the return register specified by index.
//
// Arguments:
// index - Index of the return register.
// First return register will have an index 0 and so on.
//
// Return Value:
// var_type of the return register specified by its index.
// asserts if the index does not have a valid register return type.
var_types GetReturnRegType(unsigned index) const
{
var_types result = m_regType[index];
assert(result != TYP_UNKNOWN);
return result;
}
// True if this value is returned in integer register
// that is larger than the type itself.
bool IsEnclosingType() const
{
return m_isEnclosingType;
}
// Get i'th ABI return register
regNumber GetABIReturnReg(unsigned idx) const;
// Get reg mask of ABI return registers
regMaskTP GetABIReturnRegs() const;
};
class TailCallSiteInfo
{
bool m_isCallvirt : 1;
bool m_isCalli : 1;
CORINFO_SIG_INFO m_sig;
CORINFO_RESOLVED_TOKEN m_token;
public:
// Is the tailcall a callvirt instruction?
bool IsCallvirt()
{
return m_isCallvirt;
}
// Is the tailcall a calli instruction?
bool IsCalli()
{
return m_isCalli;
}
// Get the token of the callee
CORINFO_RESOLVED_TOKEN* GetToken()
{
assert(!IsCalli());
return &m_token;
}
// Get the signature of the callee
CORINFO_SIG_INFO* GetSig()
{
return &m_sig;
}
// Mark the tailcall as a calli with the given signature
void SetCalli(CORINFO_SIG_INFO* sig)
{
m_isCallvirt = false;
m_isCalli = true;
m_sig = *sig;
}
// Mark the tailcall as a callvirt with the given signature and token
void SetCallvirt(CORINFO_SIG_INFO* sig, CORINFO_RESOLVED_TOKEN* token)
{
m_isCallvirt = true;
m_isCalli = false;
m_sig = *sig;
m_token = *token;
}
// Mark the tailcall as a call with the given signature and token
void SetCall(CORINFO_SIG_INFO* sig, CORINFO_RESOLVED_TOKEN* token)
{
m_isCallvirt = false;
m_isCalli = false;
m_sig = *sig;
m_token = *token;
}
};
class fgArgInfo;
enum class NonStandardArgKind : unsigned
{
None,
PInvokeFrame,
PInvokeTarget,
PInvokeCookie,
WrapperDelegateCell,
ShiftLow,
ShiftHigh,
FixedRetBuffer,
VirtualStubCell,
R2RIndirectionCell,
ValidateIndirectCallTarget,
// If changing this enum also change getNonStandardArgKindName and isNonStandardArgAddedLate in fgArgInfo
};
#ifdef DEBUG
const char* getNonStandardArgKindName(NonStandardArgKind kind);
#endif
enum class CFGCallKind
{
ValidateAndCall,
Dispatch,
};
struct GenTreeCall final : public GenTree
{
class Use
{
GenTree* m_node;
Use* m_next;
public:
Use(GenTree* node, Use* next = nullptr) : m_node(node), m_next(next)
{
assert(node != nullptr);
}
GenTree*& NodeRef()
{
return m_node;
}
GenTree* GetNode() const
{
assert(m_node != nullptr);
return m_node;
}
void SetNode(GenTree* node)
{
assert(node != nullptr);
m_node = node;
}
Use*& NextRef()
{
return m_next;
}
Use* GetNext() const
{
return m_next;
}
void SetNext(Use* next)
{
m_next = next;
}
};
class UseIterator
{
Use* m_use;
public:
UseIterator(Use* use) : m_use(use)
{
}
Use& operator*() const
{
return *m_use;
}
Use* operator->() const
{
return m_use;
}
Use* GetUse() const
{
return m_use;
}
UseIterator& operator++()
{
m_use = m_use->GetNext();
return *this;
}
bool operator==(const UseIterator& i) const
{
return m_use == i.m_use;
}
bool operator!=(const UseIterator& i) const
{
return m_use != i.m_use;
}
};
class UseList
{
Use* m_uses;
public:
UseList(Use* uses) : m_uses(uses)
{
}
UseIterator begin() const
{
return UseIterator(m_uses);
}
UseIterator end() const
{
return UseIterator(nullptr);
}
};
Use* gtCallThisArg; // The instance argument ('this' pointer)
Use* gtCallArgs; // The list of arguments in original evaluation order
Use* gtCallLateArgs; // On x86: The register arguments in an optimal order
// On ARM/x64: - also includes any outgoing arg space arguments
// - that were evaluated into a temp LclVar
fgArgInfo* fgArgInfo;
UseList Args()
{
return UseList(gtCallArgs);
}
UseList LateArgs()
{
return UseList(gtCallLateArgs);
}
#ifdef DEBUG
// Used to register callsites with the EE
CORINFO_SIG_INFO* callSig;
#endif
union {
TailCallSiteInfo* tailCallInfo;
// Only used for unmanaged calls, which cannot be tail-called
CorInfoCallConvExtension unmgdCallConv;
};
#if FEATURE_MULTIREG_RET
// State required to support multi-reg returning call nodes.
//
// TODO-AllArch: enable for all call nodes to unify single-reg and multi-reg returns.
ReturnTypeDesc gtReturnTypeDesc;
// GetRegNum() would always be the first return reg.
// The following array holds the other reg numbers of multi-reg return.
regNumberSmall gtOtherRegs[MAX_RET_REG_COUNT - 1];
MultiRegSpillFlags gtSpillFlags;
#endif // FEATURE_MULTIREG_RET
//-----------------------------------------------------------------------
// GetReturnTypeDesc: get the type descriptor of return value of the call
//
// Arguments:
// None
//
// Returns
// Type descriptor of the value returned by call
//
// TODO-AllArch: enable for all call nodes to unify single-reg and multi-reg returns.
const ReturnTypeDesc* GetReturnTypeDesc() const
{
#if FEATURE_MULTIREG_RET
return >ReturnTypeDesc;
#else
return nullptr;
#endif
}
void InitializeLongReturnType()
{
#if FEATURE_MULTIREG_RET
gtReturnTypeDesc.InitializeLongReturnType();
#endif
}
void InitializeStructReturnType(Compiler* comp, CORINFO_CLASS_HANDLE retClsHnd, CorInfoCallConvExtension callConv)
{
#if FEATURE_MULTIREG_RET
gtReturnTypeDesc.InitializeStructReturnType(comp, retClsHnd, callConv);
#endif
}
void ResetReturnType()
{
#if FEATURE_MULTIREG_RET
gtReturnTypeDesc.Reset();
#endif
}
//---------------------------------------------------------------------------
// GetRegNumByIdx: get i'th return register allocated to this call node.
//
// Arguments:
// idx - index of the return register
//
// Return Value:
// Return regNumber of i'th return register of call node.
// Returns REG_NA if there is no valid return register for the given index.
//
regNumber GetRegNumByIdx(unsigned idx) const
{
assert(idx < MAX_RET_REG_COUNT);
if (idx == 0)
{
return GetRegNum();
}
#if FEATURE_MULTIREG_RET
return (regNumber)gtOtherRegs[idx - 1];
#else
return REG_NA;
#endif
}
//----------------------------------------------------------------------
// SetRegNumByIdx: set i'th return register of this call node
//
// Arguments:
// reg - reg number
// idx - index of the return register
//
// Return Value:
// None
//
void SetRegNumByIdx(regNumber reg, unsigned idx)
{
assert(idx < MAX_RET_REG_COUNT);
if (idx == 0)
{
SetRegNum(reg);
}
#if FEATURE_MULTIREG_RET
else
{
gtOtherRegs[idx - 1] = (regNumberSmall)reg;
assert(gtOtherRegs[idx - 1] == reg);
}
#else
unreached();
#endif
}
//----------------------------------------------------------------------------
// ClearOtherRegs: clear multi-reg state to indicate no regs are allocated
//
// Arguments:
// None
//
// Return Value:
// None
//
void ClearOtherRegs()
{
#if FEATURE_MULTIREG_RET
for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
{
gtOtherRegs[i] = REG_NA;
}
#endif
}
//----------------------------------------------------------------------------
// CopyOtherRegs: copy multi-reg state from the given call node to this node
//
// Arguments:
// fromCall - GenTreeCall node from which to copy multi-reg state
//
// Return Value:
// None
//
void CopyOtherRegs(GenTreeCall* fromCall)
{
#if FEATURE_MULTIREG_RET
for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
{
this->gtOtherRegs[i] = fromCall->gtOtherRegs[i];
}
#endif
}
// Get reg mask of all the valid registers of gtOtherRegs array
regMaskTP GetOtherRegMask() const;
GenTreeFlags GetRegSpillFlagByIdx(unsigned idx) const
{
#if FEATURE_MULTIREG_RET
return GetMultiRegSpillFlagsByIdx(gtSpillFlags, idx);
#else
assert(!"unreached");
return GTF_EMPTY;
#endif
}
void SetRegSpillFlagByIdx(GenTreeFlags flags, unsigned idx)
{
#if FEATURE_MULTIREG_RET
gtSpillFlags = SetMultiRegSpillFlagsByIdx(gtSpillFlags, flags, idx);
#endif
}
//-------------------------------------------------------------------
// clearOtherRegFlags: clear GTF_* flags associated with gtOtherRegs
//
// Arguments:
// None
//
// Return Value:
// None
void ClearOtherRegFlags()
{
#if FEATURE_MULTIREG_RET
gtSpillFlags = 0;
#endif
}
//-------------------------------------------------------------------------
// CopyOtherRegFlags: copy GTF_* flags associated with gtOtherRegs from
// the given call node.
//
// Arguments:
// fromCall - GenTreeCall node from which to copy
//
// Return Value:
// None
//
void CopyOtherRegFlags(GenTreeCall* fromCall)
{
#if FEATURE_MULTIREG_RET
this->gtSpillFlags = fromCall->gtSpillFlags;
#endif
}
bool IsUnmanaged() const
{
return (gtFlags & GTF_CALL_UNMANAGED) != 0;
}
bool NeedsNullCheck() const
{
return (gtFlags & GTF_CALL_NULLCHECK) != 0;
}
bool CallerPop() const
{
return (gtFlags & GTF_CALL_POP_ARGS) != 0;
}
bool IsVirtual() const
{
return (gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT;
}
bool IsVirtualStub() const
{
return (gtFlags & GTF_CALL_VIRT_KIND_MASK) == GTF_CALL_VIRT_STUB;
}
bool IsVirtualVtable() const
{
return (gtFlags & GTF_CALL_VIRT_KIND_MASK) == GTF_CALL_VIRT_VTABLE;
}
bool IsInlineCandidate() const
{
return (gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0;
}
bool IsR2ROrVirtualStubRelativeIndir()
{
#if defined(FEATURE_READYTORUN)
if (IsR2RRelativeIndir())
{
return true;
}
#endif
return IsVirtualStubRelativeIndir();
}
bool HasNonStandardAddedArgs(Compiler* compiler) const;
int GetNonStandardAddedArgCount(Compiler* compiler) const;
// Returns true if this call uses a retBuf argument and its calling convention
bool HasRetBufArg() const
{
return (gtCallMoreFlags & GTF_CALL_M_RETBUFFARG) != 0;
}
//-------------------------------------------------------------------------
// TreatAsHasRetBufArg:
//
// Arguments:
// compiler, the compiler instance so that we can call eeGetHelperNum
//
// Return Value:
// Returns true if we treat the call as if it has a retBuf argument
// This method may actually have a retBuf argument
// or it could be a JIT helper that we are still transforming during
// the importer phase.
//
// Notes:
// On ARM64 marking the method with the GTF_CALL_M_RETBUFFARG flag
// will make HasRetBufArg() return true, but will also force the
// use of register x8 to pass the RetBuf argument.
//
bool TreatAsHasRetBufArg(Compiler* compiler) const;
bool HasFixedRetBufArg() const
{
if (!(hasFixedRetBuffReg() && HasRetBufArg()))
{
return false;
}
#if !defined(TARGET_ARM)
return !TargetOS::IsWindows || !callConvIsInstanceMethodCallConv(GetUnmanagedCallConv());
#else
return true;
#endif
}
//-----------------------------------------------------------------------------------------
// HasMultiRegRetVal: whether the call node returns its value in multiple return registers.
//
// Arguments:
// None
//
// Return Value:
// True if the call is returning a multi-reg return value. False otherwise.
//
bool HasMultiRegRetVal() const
{
#ifdef FEATURE_MULTIREG_RET
#if defined(TARGET_LOONGARCH64)
return (gtType == TYP_STRUCT) && (gtReturnTypeDesc.GetReturnRegCount() > 1);
#else
#if defined(TARGET_X86) || defined(TARGET_ARM)
if (varTypeIsLong(gtType))
{
return true;
}
#endif
if (!varTypeIsStruct(gtType) || HasRetBufArg())
{
return false;
}
// Now it is a struct that is returned in registers.
return GetReturnTypeDesc()->IsMultiRegRetType();
#endif
#else // !FEATURE_MULTIREG_RET
return false;
#endif // !FEATURE_MULTIREG_RET
}
// Returns true if VM has flagged this method as CORINFO_FLG_PINVOKE.
bool IsPInvoke() const
{
return (gtCallMoreFlags & GTF_CALL_M_PINVOKE) != 0;
}
// Note that the distinction of whether tail prefixed or an implicit tail call
// is maintained on a call node till fgMorphCall() after which it will be
// either a tail call (i.e. IsTailCall() is true) or a non-tail call.
bool IsTailPrefixedCall() const
{
return (gtCallMoreFlags & GTF_CALL_M_EXPLICIT_TAILCALL) != 0;
}
// Returns true if this call didn't have an explicit tail. prefix in the IL
// but was marked as an explicit tail call because of tail call stress mode.
bool IsStressTailCall() const
{
return (gtCallMoreFlags & GTF_CALL_M_STRESS_TAILCALL) != 0;
}
// This method returning "true" implies that tail call flowgraph morhphing has
// performed final checks and committed to making a tail call.
bool IsTailCall() const
{
return (gtCallMoreFlags & GTF_CALL_M_TAILCALL) != 0;
}
// This method returning "true" implies that importer has performed tail call checks
// and providing a hint that this can be converted to a tail call.
bool CanTailCall() const
{
return IsTailPrefixedCall() || IsImplicitTailCall();
}
// Check whether this is a tailcall dispatched via JIT helper. We only use
// this mechanism on x86 as it is faster than our other more general
// tailcall mechanism.
bool IsTailCallViaJitHelper() const
{
#ifdef TARGET_X86
return IsTailCall() && (gtCallMoreFlags & GTF_CALL_M_TAILCALL_VIA_JIT_HELPER);
#else
return false;
#endif
}
#if FEATURE_FASTTAILCALL
bool IsFastTailCall() const
{
#ifdef TARGET_X86
return IsTailCall() && !(gtCallMoreFlags & GTF_CALL_M_TAILCALL_VIA_JIT_HELPER);
#else
return IsTailCall();
#endif
}
#else // !FEATURE_FASTTAILCALL
bool IsFastTailCall() const
{
return false;
}
#endif // !FEATURE_FASTTAILCALL
#if FEATURE_TAILCALL_OPT
// Returns true if this is marked for opportunistic tail calling.
// That is, can be tail called though not explicitly prefixed with "tail" prefix.
bool IsImplicitTailCall() const
{
return (gtCallMoreFlags & GTF_CALL_M_IMPLICIT_TAILCALL) != 0;
}
bool IsTailCallConvertibleToLoop() const
{
return (gtCallMoreFlags & GTF_CALL_M_TAILCALL_TO_LOOP) != 0;
}
#else // !FEATURE_TAILCALL_OPT
bool IsImplicitTailCall() const
{
return false;
}
bool IsTailCallConvertibleToLoop() const
{
return false;
}
#endif // !FEATURE_TAILCALL_OPT
bool NormalizesSmallTypesOnReturn()
{
return GetUnmanagedCallConv() == CorInfoCallConvExtension::Managed;
}
bool IsSameThis() const
{
return (gtCallMoreFlags & GTF_CALL_M_NONVIRT_SAME_THIS) != 0;
}
bool IsDelegateInvoke() const
{
return (gtCallMoreFlags & GTF_CALL_M_DELEGATE_INV) != 0;
}
bool IsVirtualStubRelativeIndir() const
{
return IsVirtualStub() && (gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT) != 0;
}
bool IsR2RRelativeIndir() const
{
#ifdef FEATURE_READYTORUN
return (gtCallMoreFlags & GTF_CALL_M_R2R_REL_INDIRECT) != 0;
#else
return false;
#endif
}
#ifdef FEATURE_READYTORUN
void setEntryPoint(const CORINFO_CONST_LOOKUP& entryPoint)
{
gtEntryPoint = entryPoint;
if (gtEntryPoint.accessType == IAT_PVALUE)
{
gtCallMoreFlags |= GTF_CALL_M_R2R_REL_INDIRECT;
}
}
#endif // FEATURE_READYTORUN
bool IsVarargs() const
{
return (gtCallMoreFlags & GTF_CALL_M_VARARGS) != 0;
}
bool IsNoReturn() const
{
return (gtCallMoreFlags & GTF_CALL_M_DOES_NOT_RETURN) != 0;
}
bool IsFatPointerCandidate() const
{
return (gtCallMoreFlags & GTF_CALL_M_FAT_POINTER_CHECK) != 0;
}
bool IsGuardedDevirtualizationCandidate() const
{
return (gtCallMoreFlags & GTF_CALL_M_GUARDED_DEVIRT) != 0;
}
bool IsPure(Compiler* compiler) const;
bool HasSideEffects(Compiler* compiler, bool ignoreExceptions = false, bool ignoreCctors = false) const;
void ClearFatPointerCandidate()
{
gtCallMoreFlags &= ~GTF_CALL_M_FAT_POINTER_CHECK;
}
void SetFatPointerCandidate()
{
gtCallMoreFlags |= GTF_CALL_M_FAT_POINTER_CHECK;
}
bool IsDevirtualized() const
{
return (gtCallMoreFlags & GTF_CALL_M_DEVIRTUALIZED) != 0;
}
bool IsGuarded() const
{
return (gtCallMoreFlags & GTF_CALL_M_GUARDED) != 0;
}
bool IsUnboxed() const
{
return (gtCallMoreFlags & GTF_CALL_M_UNBOXED) != 0;
}
bool IsSuppressGCTransition() const
{
return (gtCallMoreFlags & GTF_CALL_M_SUPPRESS_GC_TRANSITION) != 0;
}
void ClearGuardedDevirtualizationCandidate()
{
gtCallMoreFlags &= ~GTF_CALL_M_GUARDED_DEVIRT;
}
void SetGuardedDevirtualizationCandidate()
{
gtCallMoreFlags |= GTF_CALL_M_GUARDED_DEVIRT;
}
void SetIsGuarded()
{
gtCallMoreFlags |= GTF_CALL_M_GUARDED;
}
void SetExpRuntimeLookup()
{
gtCallMoreFlags |= GTF_CALL_M_EXP_RUNTIME_LOOKUP;
}
void ClearExpRuntimeLookup()
{
gtCallMoreFlags &= ~GTF_CALL_M_EXP_RUNTIME_LOOKUP;
}
bool IsExpRuntimeLookup() const
{
return (gtCallMoreFlags & GTF_CALL_M_EXP_RUNTIME_LOOKUP) != 0;
}
void SetExpandedEarly()
{
gtCallMoreFlags |= GTF_CALL_M_EXPANDED_EARLY;
}
void ClearExpandedEarly()
{
gtCallMoreFlags &= ~GTF_CALL_M_EXPANDED_EARLY;
}
bool IsExpandedEarly() const
{
return (gtCallMoreFlags & GTF_CALL_M_EXPANDED_EARLY) != 0;
}
//-----------------------------------------------------------------------------------------
// GetIndirectionCellArgKind: Get the kind of indirection cell used by this call.
//
// Arguments:
// None
//
// Return Value:
// The kind (either R2RIndirectionCell or VirtualStubCell),
// or NonStandardArgKind::None if this call does not have an indirection cell.
//
NonStandardArgKind GetIndirectionCellArgKind() const
{
if (IsVirtualStub())
{
return NonStandardArgKind::VirtualStubCell;
}
#if defined(TARGET_ARMARCH)
// For ARM architectures, we always use an indirection cell for R2R calls.
if (IsR2RRelativeIndir())
{
return NonStandardArgKind::R2RIndirectionCell;
}
#elif defined(TARGET_XARCH)
// On XARCH we disassemble it from callsite except for tailcalls that need indirection cell.
if (IsR2RRelativeIndir() && IsFastTailCall())
{
return NonStandardArgKind::R2RIndirectionCell;
}
#endif
return NonStandardArgKind::None;
}
CFGCallKind GetCFGCallKind()
{
#if defined(TARGET_AMD64)
// On x64 the dispatcher is more performant, but we cannot use it when
// we need to pass indirection cells as those go into registers that
// are clobbered by the dispatch helper.
bool mayUseDispatcher = GetIndirectionCellArgKind() == NonStandardArgKind::None;
bool shouldUseDispatcher = true;
#elif defined(TARGET_ARM64)
bool mayUseDispatcher = true;
// Branch predictors on ARM64 generally do not handle the dispatcher as
// well as on x64 hardware, so only use the validator by default.
bool shouldUseDispatcher = false;
#else
// Other platforms do not even support the dispatcher.
bool mayUseDispatcher = false;
bool shouldUseDispatcher = false;
#endif
#ifdef DEBUG
switch (JitConfig.JitCFGUseDispatcher())
{
case 0:
shouldUseDispatcher = false;
break;
case 1:
shouldUseDispatcher = true;
break;
default:
break;
}
#endif
return mayUseDispatcher && shouldUseDispatcher ? CFGCallKind::Dispatch : CFGCallKind::ValidateAndCall;
}
void ResetArgInfo();
GenTreeCallFlags gtCallMoreFlags; // in addition to gtFlags
gtCallTypes gtCallType : 3; // value from the gtCallTypes enumeration
var_types gtReturnType : 5; // exact return type
CORINFO_CLASS_HANDLE gtRetClsHnd; // The return type handle of the call if it is a struct; always available
void* gtStubCallStubAddr; // GTF_CALL_VIRT_STUB - these are never inlined
union {
// only used for CALLI unmanaged calls (CT_INDIRECT)
GenTree* gtCallCookie;
// gtInlineCandidateInfo is only used when inlining methods
InlineCandidateInfo* gtInlineCandidateInfo;
GuardedDevirtualizationCandidateInfo* gtGuardedDevirtualizationCandidateInfo;
ClassProfileCandidateInfo* gtClassProfileCandidateInfo;
LateDevirtualizationInfo* gtLateDevirtualizationInfo;
CORINFO_GENERIC_HANDLE compileTimeHelperArgumentHandle; // Used to track type handle argument of dynamic helpers
void* gtDirectCallAddress; // Used to pass direct call address between lower and codegen
};
// expression evaluated after args are placed which determines the control target
GenTree* gtControlExpr;
union {
CORINFO_METHOD_HANDLE gtCallMethHnd; // CT_USER_FUNC or CT_HELPER
GenTree* gtCallAddr; // CT_INDIRECT
};
#ifdef FEATURE_READYTORUN
// Call target lookup info for method call from a Ready To Run module
CORINFO_CONST_LOOKUP gtEntryPoint;
#endif
#if defined(DEBUG) || defined(INLINE_DATA)
// For non-inline candidates, track the first observation
// that blocks candidacy.
InlineObservation gtInlineObservation;
// IL offset of the call wrt its parent method.
IL_OFFSET gtRawILOffset;
// In DEBUG we report even non inline candidates in the inline tree in
// fgNoteNonInlineCandidate. We need to keep around the inline context for
// this as normally it's part of the candidate info.
class InlineContext* gtInlineContext;
#endif // defined(DEBUG) || defined(INLINE_DATA)
bool IsHelperCall() const
{
return gtCallType == CT_HELPER;
}
bool IsHelperCall(CORINFO_METHOD_HANDLE callMethHnd) const
{
return IsHelperCall() && (callMethHnd == gtCallMethHnd);
}
bool IsHelperCall(Compiler* compiler, unsigned helper) const;
void ReplaceCallOperand(GenTree** operandUseEdge, GenTree* replacement);
bool AreArgsComplete() const;
CorInfoCallConvExtension GetUnmanagedCallConv() const
{
return IsUnmanaged() ? unmgdCallConv : CorInfoCallConvExtension::Managed;
}
static bool Equals(GenTreeCall* c1, GenTreeCall* c2);
GenTreeCall(var_types type) : GenTree(GT_CALL, type)
{
fgArgInfo = nullptr;
gtRetBufArg = nullptr;
}
#if DEBUGGABLE_GENTREE
GenTreeCall() : GenTree()
{
}
#endif
GenTree* GetLclRetBufArgNode() const
{
if (gtRetBufArg == nullptr)
{
return nullptr;
}
assert(HasRetBufArg());
GenTree* lclRetBufArgNode = gtRetBufArg->GetNode();
switch (lclRetBufArgNode->OperGet())
{
// Get the true value from setup args
case GT_ASG:
return lclRetBufArgNode->AsOp()->gtGetOp2();
case GT_STORE_LCL_VAR:
return lclRetBufArgNode->AsUnOp()->gtGetOp1();
// Get the value from putarg wrapper nodes
case GT_PUTARG_REG:
case GT_PUTARG_STK:
return lclRetBufArgNode->AsOp()->gtGetOp1();
// Otherwise the node should be in the Use*
default:
return lclRetBufArgNode;
}
}
void SetLclRetBufArg(Use* retBufArg);
Use* gtRetBufArg; // The argument that holds return buffer argument
};
struct GenTreeCmpXchg : public GenTree
{
GenTree* gtOpLocation;
GenTree* gtOpValue;
GenTree* gtOpComparand;
GenTreeCmpXchg(var_types type, GenTree* loc, GenTree* val, GenTree* comparand)
: GenTree(GT_CMPXCHG, type), gtOpLocation(loc), gtOpValue(val), gtOpComparand(comparand)
{
// There's no reason to do a compare-exchange on a local location, so we'll assume that all of these
// have global effects.
gtFlags |= (GTF_GLOB_REF | GTF_ASG);
// Merge in flags from operands
gtFlags |= gtOpLocation->gtFlags & GTF_ALL_EFFECT;
gtFlags |= gtOpValue->gtFlags & GTF_ALL_EFFECT;
gtFlags |= gtOpComparand->gtFlags & GTF_ALL_EFFECT;
}
#if DEBUGGABLE_GENTREE
GenTreeCmpXchg() : GenTree()
{
}
#endif
};
#if !defined(TARGET_64BIT)
struct GenTreeMultiRegOp : public GenTreeOp
{
regNumber gtOtherReg;
// GTF_SPILL or GTF_SPILLED flag on a multi-reg node indicates that one or
// more of its result regs are in that state. The spill flag of each of the
// return register is stored here. We only need 2 bits per returned register,
// so this is treated as a 2-bit array. No architecture needs more than 8 bits.
MultiRegSpillFlags gtSpillFlags;
GenTreeMultiRegOp(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2)
: GenTreeOp(oper, type, op1, op2), gtOtherReg(REG_NA)
{
ClearOtherRegFlags();
}
unsigned GetRegCount() const
{
return (TypeGet() == TYP_LONG) ? 2 : 1;
}
//---------------------------------------------------------------------------
// GetRegNumByIdx: get i'th register allocated to this struct argument.
//
// Arguments:
// idx - index of the register
//
// Return Value:
// Return regNumber of i'th register of this register argument
//
regNumber GetRegNumByIdx(unsigned idx) const
{
assert(idx < 2);
if (idx == 0)
{
return GetRegNum();
}
return gtOtherReg;
}
GenTreeFlags GetRegSpillFlagByIdx(unsigned idx) const
{
return GetMultiRegSpillFlagsByIdx(gtSpillFlags, idx);
}
void SetRegSpillFlagByIdx(GenTreeFlags flags, unsigned idx)
{
#if FEATURE_MULTIREG_RET
gtSpillFlags = SetMultiRegSpillFlagsByIdx(gtSpillFlags, flags, idx);
#endif
}
//--------------------------------------------------------------------------
// GetRegType: Get var_type of the register specified by index.
//
// Arguments:
// index - Index of the register.
// First register will have an index 0 and so on.
//
// Return Value:
// var_type of the register specified by its index.
//
var_types GetRegType(unsigned index) const
{
assert(index < 2);
// The type of register is usually the same as GenTree type, since GenTreeMultiRegOp usually defines a single
// reg.
// The special case is when we have TYP_LONG, which may be a MUL_LONG, or a DOUBLE arg passed as LONG,
// in which case we need to separate them into int for each index.
var_types result = TypeGet();
if (result == TYP_LONG)
{
result = TYP_INT;
}
return result;
}
//-------------------------------------------------------------------
// clearOtherRegFlags: clear GTF_* flags associated with gtOtherRegs
//
// Arguments:
// None
//
// Return Value:
// None
//
void ClearOtherRegFlags()
{
gtSpillFlags = 0;
}
#if DEBUGGABLE_GENTREE
GenTreeMultiRegOp() : GenTreeOp()
{
}
#endif
};
#endif // !defined(TARGET_64BIT)
struct GenTreeFptrVal : public GenTree
{
CORINFO_METHOD_HANDLE gtFptrMethod;
bool gtFptrDelegateTarget;
#ifdef FEATURE_READYTORUN
CORINFO_CONST_LOOKUP gtEntryPoint;
#endif
GenTreeFptrVal(var_types type, CORINFO_METHOD_HANDLE meth)
: GenTree(GT_FTN_ADDR, type), gtFptrMethod(meth), gtFptrDelegateTarget(false)
{
#ifdef FEATURE_READYTORUN
gtEntryPoint.addr = nullptr;
gtEntryPoint.accessType = IAT_VALUE;
#endif
}
#if DEBUGGABLE_GENTREE
GenTreeFptrVal() : GenTree()
{
}
#endif
};
/* gtQmark */
struct GenTreeQmark : public GenTreeOp
{
GenTreeQmark(var_types type, GenTree* cond, GenTreeColon* colon) : GenTreeOp(GT_QMARK, type, cond, colon)
{
// These must follow a specific form.
assert((cond != nullptr) && cond->TypeIs(TYP_INT));
assert((colon != nullptr) && colon->OperIs(GT_COLON));
}
#if DEBUGGABLE_GENTREE
GenTreeQmark() : GenTreeOp()
{
}
#endif
};
/* gtIntrinsic -- intrinsic (possibly-binary op [NULL op2 is allowed] with an additional field) */
struct GenTreeIntrinsic : public GenTreeOp
{
NamedIntrinsic gtIntrinsicName;
CORINFO_METHOD_HANDLE gtMethodHandle; // Method handle of the method which is treated as an intrinsic.
#ifdef FEATURE_READYTORUN
// Call target lookup info for method call from a Ready To Run module
CORINFO_CONST_LOOKUP gtEntryPoint;
#endif
GenTreeIntrinsic(var_types type, GenTree* op1, NamedIntrinsic intrinsicName, CORINFO_METHOD_HANDLE methodHandle)
: GenTreeOp(GT_INTRINSIC, type, op1, nullptr), gtIntrinsicName(intrinsicName), gtMethodHandle(methodHandle)
{
assert(intrinsicName != NI_Illegal);
}
GenTreeIntrinsic(
var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic intrinsicName, CORINFO_METHOD_HANDLE methodHandle)
: GenTreeOp(GT_INTRINSIC, type, op1, op2), gtIntrinsicName(intrinsicName), gtMethodHandle(methodHandle)
{
assert(intrinsicName != NI_Illegal);
}
#if DEBUGGABLE_GENTREE
GenTreeIntrinsic() : GenTreeOp()
{
}
#endif
};
// GenTreeMultiOp - a node with a flexible count of operands stored in an array.
// The array can be an inline one, or a dynamic one, or both, with switching
// between them supported. See GenTreeJitIntrinsic for an example of a node
// utilizing GenTreeMultiOp. GTF_REVERSE_OPS is supported for GenTreeMultiOp's
// with two operands.
//
struct GenTreeMultiOp : public GenTree
{
public:
class Iterator
{
protected:
GenTree** m_use;
Iterator(GenTree** use) : m_use(use)
{
}
public:
Iterator& operator++()
{
m_use++;
return *this;
}
bool operator==(const Iterator& other) const
{
return m_use == other.m_use;
}
bool operator!=(const Iterator& other) const
{
return m_use != other.m_use;
}
};
class OperandsIterator final : public Iterator
{
public:
OperandsIterator(GenTree** use) : Iterator(use)
{
}
GenTree* operator*()
{
return *m_use;
}
};
class UseEdgesIterator final : public Iterator
{
public:
UseEdgesIterator(GenTree** use) : Iterator(use)
{
}
GenTree** operator*()
{
return m_use;
}
};
private:
GenTree** m_operands;
protected:
template <unsigned InlineOperandCount, typename... Operands>
GenTreeMultiOp(genTreeOps oper,
var_types type,
CompAllocator allocator,
GenTree* (&inlineOperands)[InlineOperandCount] DEBUGARG(bool largeNode),
Operands... operands)
: GenTree(oper, type DEBUGARG(largeNode))
{
const size_t OperandCount = sizeof...(Operands);
m_operands = (OperandCount <= InlineOperandCount) ? inlineOperands : allocator.allocate<GenTree*>(OperandCount);
// "OperandCount + 1" so that it works well when OperandCount is 0.
GenTree* operandsArray[OperandCount + 1]{operands...};
InitializeOperands(operandsArray, OperandCount);
}
// Note that this constructor takes the owndership of the "operands" array.
template <unsigned InlineOperandCount>
GenTreeMultiOp(genTreeOps oper,
var_types type,
GenTree** operands,
size_t operandCount,
GenTree* (&inlineOperands)[InlineOperandCount] DEBUGARG(bool largeNode))
: GenTree(oper, type DEBUGARG(largeNode))
{
m_operands = (operandCount <= InlineOperandCount) ? inlineOperands : operands;
InitializeOperands(operands, operandCount);
}
public:
#if DEBUGGABLE_GENTREE
GenTreeMultiOp() : GenTree()
{
}
#endif
GenTree*& Op(size_t index)
{
size_t actualIndex = index - 1;
assert(actualIndex < m_operandCount);
assert(m_operands[actualIndex] != nullptr);
return m_operands[actualIndex];
}
GenTree* Op(size_t index) const
{
return const_cast<GenTreeMultiOp*>(this)->Op(index);
}
// Note that unlike the general "Operands" iterator, this specialized version does not respect GTF_REVERSE_OPS.
IteratorPair<OperandsIterator> Operands()
{
return MakeIteratorPair(OperandsIterator(GetOperandArray()),
OperandsIterator(GetOperandArray() + GetOperandCount()));
}
// Note that unlike the general "UseEdges" iterator, this specialized version does not respect GTF_REVERSE_OPS.
IteratorPair<UseEdgesIterator> UseEdges()
{
return MakeIteratorPair(UseEdgesIterator(GetOperandArray()),
UseEdgesIterator(GetOperandArray() + GetOperandCount()));
}
size_t GetOperandCount() const
{
return m_operandCount;
}
GenTree** GetOperandArray(size_t startIndex = 0) const
{
return m_operands + startIndex;
}
protected:
// Reconfigures the operand array, leaving it in a "dirty" state.
void ResetOperandArray(size_t newOperandCount,
Compiler* compiler,
GenTree** inlineOperands,
size_t inlineOperandCount);
static bool OperandsAreEqual(GenTreeMultiOp* op1, GenTreeMultiOp* op2);
private:
void InitializeOperands(GenTree** operands, size_t operandCount);
void SetOperandCount(size_t newOperandCount)
{
assert(FitsIn<uint8_t>(newOperandCount));
m_operandCount = static_cast<uint8_t>(newOperandCount);
}
};
// Helper class used to implement the constructor of GenTreeJitIntrinsic which
// transfers the ownership of the passed-in array to the underlying MultiOp node.
class IntrinsicNodeBuilder final
{
friend struct GenTreeJitIntrinsic;
GenTree** m_operands;
size_t m_operandCount;
GenTree* m_inlineOperands[2];
public:
IntrinsicNodeBuilder(CompAllocator allocator, size_t operandCount) : m_operandCount(operandCount)
{
m_operands =
(operandCount <= ArrLen(m_inlineOperands)) ? m_inlineOperands : allocator.allocate<GenTree*>(operandCount);
#ifdef DEBUG
for (size_t i = 0; i < operandCount; i++)
{
m_operands[i] = nullptr;
}
#endif // DEBUG
}
IntrinsicNodeBuilder(CompAllocator allocator, GenTreeMultiOp* source) : m_operandCount(source->GetOperandCount())
{
m_operands = (m_operandCount <= ArrLen(m_inlineOperands)) ? m_inlineOperands
: allocator.allocate<GenTree*>(m_operandCount);
for (size_t i = 0; i < m_operandCount; i++)
{
m_operands[i] = source->Op(i + 1);
}
}
void AddOperand(size_t index, GenTree* operand)
{
assert(index < m_operandCount);
assert(m_operands[index] == nullptr);
m_operands[index] = operand;
}
GenTree* GetOperand(size_t index) const
{
assert(index < m_operandCount);
assert(m_operands[index] != nullptr);
return m_operands[index];
}
size_t GetOperandCount() const
{
return m_operandCount;
}
private:
GenTree** GetBuiltOperands()
{
#ifdef DEBUG
for (size_t i = 0; i < m_operandCount; i++)
{
assert(m_operands[i] != nullptr);
}
#endif // DEBUG
return m_operands;
}
};
struct GenTreeJitIntrinsic : public GenTreeMultiOp
{
protected:
GenTree* gtInlineOperands[2];
uint16_t gtLayoutNum;
unsigned char gtAuxiliaryJitType; // For intrinsics than need another type (e.g. Avx2.Gather* or SIMD (by element))
regNumberSmall gtOtherReg; // For intrinsics that return 2 registers
unsigned char gtSimdBaseJitType; // SIMD vector base JIT type
unsigned char gtSimdSize; // SIMD vector size in bytes, use 0 for scalar intrinsics
#if defined(FEATURE_SIMD)
union {
SIMDIntrinsicID gtSIMDIntrinsicID; // operation Id
NamedIntrinsic gtHWIntrinsicId;
};
#else
NamedIntrinsic gtHWIntrinsicId;
#endif
public:
unsigned GetLayoutNum() const
{
return gtLayoutNum;
}
void SetLayoutNum(unsigned layoutNum)
{
assert(FitsIn<uint16_t>(layoutNum));
gtLayoutNum = static_cast<uint16_t>(layoutNum);
}
regNumber GetOtherReg() const
{
return (regNumber)gtOtherReg;
}
void SetOtherReg(regNumber reg)
{
gtOtherReg = (regNumberSmall)reg;
assert(gtOtherReg == reg);
}
CorInfoType GetAuxiliaryJitType() const
{
return (CorInfoType)gtAuxiliaryJitType;
}
void SetAuxiliaryJitType(CorInfoType auxiliaryJitType)
{
gtAuxiliaryJitType = (unsigned char)auxiliaryJitType;
assert(gtAuxiliaryJitType == auxiliaryJitType);
}
var_types GetAuxiliaryType() const;
CorInfoType GetSimdBaseJitType() const
{
return (CorInfoType)gtSimdBaseJitType;
}
CorInfoType GetNormalizedSimdBaseJitType() const
{
CorInfoType simdBaseJitType = GetSimdBaseJitType();
switch (simdBaseJitType)
{
case CORINFO_TYPE_NATIVEINT:
{
#ifdef TARGET_64BIT
return CORINFO_TYPE_LONG;
#else
return CORINFO_TYPE_INT;
#endif
}
case CORINFO_TYPE_NATIVEUINT:
{
#ifdef TARGET_64BIT
return CORINFO_TYPE_ULONG;
#else
return CORINFO_TYPE_UINT;
#endif
}
default:
return simdBaseJitType;
}
}
void SetSimdBaseJitType(CorInfoType simdBaseJitType)
{
gtSimdBaseJitType = (unsigned char)simdBaseJitType;
assert(gtSimdBaseJitType == simdBaseJitType);
}
var_types GetSimdBaseType() const;
unsigned char GetSimdSize() const
{
return gtSimdSize;
}
void SetSimdSize(unsigned simdSize)
{
gtSimdSize = (unsigned char)simdSize;
assert(gtSimdSize == simdSize);
}
template <typename... Operands>
GenTreeJitIntrinsic(genTreeOps oper,
var_types type,
CompAllocator allocator,
CorInfoType simdBaseJitType,
unsigned simdSize,
Operands... operands)
: GenTreeMultiOp(oper, type, allocator, gtInlineOperands DEBUGARG(false), operands...)
, gtLayoutNum(0)
, gtAuxiliaryJitType(CORINFO_TYPE_UNDEF)
, gtOtherReg(REG_NA)
, gtSimdBaseJitType((unsigned char)simdBaseJitType)
, gtSimdSize((unsigned char)simdSize)
, gtHWIntrinsicId(NI_Illegal)
{
assert(gtSimdBaseJitType == simdBaseJitType);
assert(gtSimdSize == simdSize);
}
#if DEBUGGABLE_GENTREE
GenTreeJitIntrinsic() : GenTreeMultiOp()
{
}
#endif
protected:
GenTreeJitIntrinsic(genTreeOps oper,
var_types type,
IntrinsicNodeBuilder&& nodeBuilder,
CorInfoType simdBaseJitType,
unsigned simdSize)
: GenTreeMultiOp(oper,
type,
nodeBuilder.GetBuiltOperands(),
nodeBuilder.GetOperandCount(),
gtInlineOperands DEBUGARG(false))
, gtLayoutNum(0)
, gtAuxiliaryJitType(CORINFO_TYPE_UNDEF)
, gtOtherReg(REG_NA)
, gtSimdBaseJitType((unsigned char)simdBaseJitType)
, gtSimdSize((unsigned char)simdSize)
, gtHWIntrinsicId(NI_Illegal)
{
assert(gtSimdBaseJitType == simdBaseJitType);
assert(gtSimdSize == simdSize);
}
public:
bool isSIMD() const
{
return gtSimdSize != 0;
}
};
#ifdef FEATURE_SIMD
/* gtSIMD -- SIMD intrinsic (possibly-binary op [NULL op2 is allowed] with additional fields) */
struct GenTreeSIMD : public GenTreeJitIntrinsic
{
GenTreeSIMD(var_types type,
IntrinsicNodeBuilder&& nodeBuilder,
SIMDIntrinsicID simdIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
: GenTreeJitIntrinsic(GT_SIMD, type, std::move(nodeBuilder), simdBaseJitType, simdSize)
{
gtSIMDIntrinsicID = simdIntrinsicID;
}
GenTreeSIMD(var_types type,
CompAllocator allocator,
GenTree* op1,
SIMDIntrinsicID simdIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
: GenTreeJitIntrinsic(GT_SIMD, type, allocator, simdBaseJitType, simdSize, op1)
{
gtSIMDIntrinsicID = simdIntrinsicID;
}
GenTreeSIMD(var_types type,
CompAllocator allocator,
GenTree* op1,
GenTree* op2,
SIMDIntrinsicID simdIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize)
: GenTreeJitIntrinsic(GT_SIMD, type, allocator, simdBaseJitType, simdSize, op1, op2)
{
gtSIMDIntrinsicID = simdIntrinsicID;
}
#if DEBUGGABLE_GENTREE
GenTreeSIMD() : GenTreeJitIntrinsic()
{
}
#endif
bool OperIsMemoryLoad() const; // Returns true for the SIMD Intrinsic instructions that have MemoryLoad semantics,
// false otherwise
SIMDIntrinsicID GetSIMDIntrinsicId() const
{
return gtSIMDIntrinsicID;
}
static bool Equals(GenTreeSIMD* op1, GenTreeSIMD* op2);
};
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
struct GenTreeHWIntrinsic : public GenTreeJitIntrinsic
{
GenTreeHWIntrinsic(var_types type,
IntrinsicNodeBuilder&& nodeBuilder,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic)
: GenTreeJitIntrinsic(GT_HWINTRINSIC, type, std::move(nodeBuilder), simdBaseJitType, simdSize)
{
Initialize(hwIntrinsicID, isSimdAsHWIntrinsic);
}
template <typename... Operands>
GenTreeHWIntrinsic(var_types type,
CompAllocator allocator,
NamedIntrinsic hwIntrinsicID,
CorInfoType simdBaseJitType,
unsigned simdSize,
bool isSimdAsHWIntrinsic,
Operands... operands)
: GenTreeJitIntrinsic(GT_HWINTRINSIC, type, allocator, simdBaseJitType, simdSize, operands...)
{
Initialize(hwIntrinsicID, isSimdAsHWIntrinsic);
}
#if DEBUGGABLE_GENTREE
GenTreeHWIntrinsic() : GenTreeJitIntrinsic()
{
}
#endif
bool OperIsMemoryLoad() const; // Returns true for the HW Intrinsic instructions that have MemoryLoad semantics,
// false otherwise
bool OperIsMemoryStore() const; // Returns true for the HW Intrinsic instructions that have MemoryStore semantics,
// false otherwise
bool OperIsMemoryLoadOrStore() const; // Returns true for the HW Intrinsic instructions that have MemoryLoad or
// MemoryStore semantics, false otherwise
bool IsSimdAsHWIntrinsic() const
{
return (gtFlags & GTF_SIMDASHW_OP) != 0;
}
unsigned GetResultOpNumForFMA(GenTree* use, GenTree* op1, GenTree* op2, GenTree* op3);
NamedIntrinsic GetHWIntrinsicId() const;
//---------------------------------------------------------------------------------------
// ChangeHWIntrinsicId: Change the intrinsic id for this node.
//
// This method just sets the intrinsic id, asserting that the new intrinsic
// has the same number of operands as the old one, optionally setting some of
// the new operands. Intrinsics with an unknown number of operands are exempt
// from the "do I have the same number of operands" check however, so this method must
// be used with care. Use "ResetHWIntrinsicId" if you need to fully reconfigure
// the node for a different intrinsic, with a possibly different number of operands.
//
// Arguments:
// intrinsicId - the new intrinsic id for the node
// operands - optional operands to set while changing the id
//
// Notes:
// It is the caller's responsibility to update side effect flags.
//
template <typename... Operands>
void ChangeHWIntrinsicId(NamedIntrinsic intrinsicId, Operands... operands)
{
const size_t OperandCount = sizeof...(Operands);
assert(OperandCount <= GetOperandCount());
SetHWIntrinsicId(intrinsicId);
GenTree* operandsArray[OperandCount + 1]{operands...};
GenTree** operandsStore = GetOperandArray();
for (size_t i = 0; i < OperandCount; i++)
{
operandsStore[i] = operandsArray[i];
}
}
//---------------------------------------------------------------------------------------
// ResetHWIntrinsicId: Reset the intrinsic id for this node.
//
// This method resets the intrinsic id, fully reconfiguring the node. It must
// be supplied with all the operands the new node needs, and can allocate a
// new dynamic array if the operands do not fit into in an inline one, in which
// case a compiler argument is used to get the memory allocator.
//
// This method is similar to "ChangeHWIntrinsicId" but is more versatile and
// thus more expensive. Use it when you need to bash to an intrinsic id with
// a different number of operands than what the original node had, or, which
// is equivalent, when you do not know the original number of operands.
//
// Arguments:
// intrinsicId - the new intrinsic id for the node
// compiler - compiler to allocate memory with, can be "nullptr" if the
// number of new operands does not exceed the length of the
// inline array (so, there are 2 or fewer of them)
// operands - *all* operands for the new node
//
// Notes:
// It is the caller's responsibility to update side effect flags.
//
template <typename... Operands>
void ResetHWIntrinsicId(NamedIntrinsic intrinsicId, Compiler* compiler, Operands... operands)
{
const size_t NewOperandCount = sizeof...(Operands);
assert((compiler != nullptr) || (NewOperandCount <= ArrLen(gtInlineOperands)));
ResetOperandArray(NewOperandCount, compiler, gtInlineOperands, ArrLen(gtInlineOperands));
ChangeHWIntrinsicId(intrinsicId, operands...);
}
void ResetHWIntrinsicId(NamedIntrinsic intrinsicId, GenTree* op1, GenTree* op2)
{
ResetHWIntrinsicId(intrinsicId, static_cast<Compiler*>(nullptr), op1, op2);
}
void ResetHWIntrinsicId(NamedIntrinsic intrinsicId, GenTree* op1)
{
ResetHWIntrinsicId(intrinsicId, static_cast<Compiler*>(nullptr), op1);
}
void ResetHWIntrinsicId(NamedIntrinsic intrinsicId)
{
ResetHWIntrinsicId(intrinsicId, static_cast<Compiler*>(nullptr));
}
static bool Equals(GenTreeHWIntrinsic* op1, GenTreeHWIntrinsic* op2);
private:
void SetHWIntrinsicId(NamedIntrinsic intrinsicId);
void Initialize(NamedIntrinsic intrinsicId, bool isSimdAsHWIntrinsic)
{
SetHWIntrinsicId(intrinsicId);
bool isStore = OperIsMemoryStore();
bool isLoad = OperIsMemoryLoad();
if (isStore || isLoad)
{
gtFlags |= (GTF_GLOB_REF | GTF_EXCEPT);
if (isStore)
{
gtFlags |= GTF_ASG;
}
}
if (isSimdAsHWIntrinsic)
{
gtFlags |= GTF_SIMDASHW_OP;
}
}
};
#endif // FEATURE_HW_INTRINSICS
/* gtIndex -- array access */
struct GenTreeIndex : public GenTreeOp
{
GenTree*& Arr()
{
return gtOp1;
}
GenTree*& Index()
{
return gtOp2;
}
unsigned gtIndElemSize; // size of elements in the array
CORINFO_CLASS_HANDLE gtStructElemClass; // If the element type is a struct, this is the struct type.
GenTreeIndex(var_types type, GenTree* arr, GenTree* ind, unsigned indElemSize)
: GenTreeOp(GT_INDEX, type, arr, ind)
, gtIndElemSize(indElemSize)
, gtStructElemClass(nullptr) // We always initialize this after construction.
{
#ifdef DEBUG
if (JitConfig.JitSkipArrayBoundCheck() == 1)
{
// Skip bounds check
}
else
#endif
{
// Do bounds check
gtFlags |= GTF_INX_RNGCHK;
}
gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
}
#if DEBUGGABLE_GENTREE
GenTreeIndex() : GenTreeOp()
{
}
#endif
};
// gtIndexAddr: given an array object and an index, checks that the index is within the bounds of the array if
// necessary and produces the address of the value at that index of the array.
struct GenTreeIndexAddr : public GenTreeOp
{
GenTree*& Arr()
{
return gtOp1;
}
GenTree*& Index()
{
return gtOp2;
}
CORINFO_CLASS_HANDLE gtStructElemClass; // If the element type is a struct, this is the struct type.
BasicBlock* gtIndRngFailBB; // Basic block to jump to for array-index-out-of-range
var_types gtElemType; // The element type of the array.
unsigned gtElemSize; // size of elements in the array
unsigned gtLenOffset; // The offset from the array's base address to its length.
unsigned gtElemOffset; // The offset from the array's base address to its first element.
GenTreeIndexAddr(GenTree* arr,
GenTree* ind,
var_types elemType,
CORINFO_CLASS_HANDLE structElemClass,
unsigned elemSize,
unsigned lenOffset,
unsigned elemOffset)
: GenTreeOp(GT_INDEX_ADDR, TYP_BYREF, arr, ind)
, gtStructElemClass(structElemClass)
, gtIndRngFailBB(nullptr)
, gtElemType(elemType)
, gtElemSize(elemSize)
, gtLenOffset(lenOffset)
, gtElemOffset(elemOffset)
{
#ifdef DEBUG
if (JitConfig.JitSkipArrayBoundCheck() == 1)
{
// Skip bounds check
}
else
#endif
{
// Do bounds check
gtFlags |= GTF_INX_RNGCHK;
}
gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
}
#if DEBUGGABLE_GENTREE
GenTreeIndexAddr() : GenTreeOp()
{
}
#endif
};
// GenTreeArrAddr - GT_ARR_ADDR, carries information about the array type from morph to VN.
// This node is just a wrapper (similar to GenTreeBox), the real address
// expression is contained in its first operand.
//
struct GenTreeArrAddr : GenTreeUnOp
{
private:
CORINFO_CLASS_HANDLE m_elemClassHandle; // The array element class. Currently only used for arrays of TYP_STRUCT.
var_types m_elemType; // The normalized (TYP_SIMD != TYP_STRUCT) array element type.
uint8_t m_firstElemOffset; // Offset to the first element of the array.
public:
GenTreeArrAddr(GenTree* addr, var_types elemType, CORINFO_CLASS_HANDLE elemClassHandle, uint8_t firstElemOffset)
: GenTreeUnOp(GT_ARR_ADDR, TYP_BYREF, addr DEBUGARG(/* largeNode */ false))
, m_elemClassHandle(elemClassHandle)
, m_elemType(elemType)
, m_firstElemOffset(firstElemOffset)
{
// Temporarily disable this assert. Tracking: https://github.com/dotnet/runtime/issues/67600
// assert(addr->TypeIs(TYP_BYREF) || addr->IsIntegralConst(0));
assert(((elemType == TYP_STRUCT) && (elemClassHandle != NO_CLASS_HANDLE)) ||
(elemClassHandle == NO_CLASS_HANDLE));
// We will only consider "addr" for CSE. This is more profitable and precise
// because ARR_ADDR can get its VN "polluted" by zero-offset field sequences.
SetDoNotCSE();
}
#if DEBUGGABLE_GENTREE
GenTreeArrAddr() : GenTreeUnOp()
{
}
#endif
GenTree*& Addr()
{
return gtOp1;
}
CORINFO_CLASS_HANDLE GetElemClassHandle() const
{
return m_elemClassHandle;
}
var_types GetElemType() const
{
return m_elemType;
}
uint8_t GetFirstElemOffset() const
{
return m_firstElemOffset;
}
void ParseArrayAddress(Compiler* comp, GenTree** pArr, ValueNum* pInxVN);
private:
static void ParseArrayAddressWork(GenTree* tree,
Compiler* comp,
target_ssize_t inputMul,
GenTree** pArr,
ValueNum* pInxVN,
target_ssize_t* pOffset);
};
/* gtArrLen -- array length (GT_ARR_LENGTH)
GT_ARR_LENGTH is used for "arr.length" */
struct GenTreeArrLen : public GenTreeUnOp
{
GenTree*& ArrRef()
{
return gtOp1;
} // the array address node
private:
int gtArrLenOffset; // constant to add to "gtArrRef" to get the address of the array length.
public:
inline int ArrLenOffset()
{
return gtArrLenOffset;
}
GenTreeArrLen(var_types type, GenTree* arrRef, int lenOffset)
: GenTreeUnOp(GT_ARR_LENGTH, type, arrRef), gtArrLenOffset(lenOffset)
{
}
#if DEBUGGABLE_GENTREE
GenTreeArrLen() : GenTreeUnOp()
{
}
#endif
};
// This takes:
// - a length value
// - an index value, and
// - the label to jump to if the index is out of range.
// - the "kind" of the throw block to branch to on failure
// It generates no result.
//
struct GenTreeBoundsChk : public GenTreeOp
{
BasicBlock* gtIndRngFailBB; // Basic block to jump to for index-out-of-range
SpecialCodeKind gtThrowKind; // Kind of throw block to branch to on failure
// Store some information about the array element type that was in the GT_INDEX node before morphing.
// Note that this information is also stored in the m_arrayInfoMap of the morphed IND node (that
// is marked with GTF_IND_ARR_INDEX), but that can be hard to find.
var_types gtInxType; // Save the GT_INDEX type
GenTreeBoundsChk(GenTree* index, GenTree* length, SpecialCodeKind kind)
: GenTreeOp(GT_BOUNDS_CHECK, TYP_VOID, index, length)
, gtIndRngFailBB(nullptr)
, gtThrowKind(kind)
, gtInxType(TYP_UNKNOWN)
{
gtFlags |= GTF_EXCEPT;
}
#if DEBUGGABLE_GENTREE
GenTreeBoundsChk() : GenTreeOp()
{
}
#endif
// If this check is against GT_ARR_LENGTH, returns array reference, else "NULL".
GenTree* GetArray() const
{
return GetArrayLength()->OperIs(GT_ARR_LENGTH) ? GetArrayLength()->AsArrLen()->ArrRef() : nullptr;
}
// The index expression.
GenTree* GetIndex() const
{
return gtOp1;
}
// An expression for the length.
GenTree* GetArrayLength() const
{
return gtOp2;
}
};
// GenTreeArrElem - bounds checked address (byref) of a general array element,
// for multidimensional arrays, or 1-d arrays with non-zero lower bounds.
//
struct GenTreeArrElem : public GenTree
{
GenTree* gtArrObj;
#define GT_ARR_MAX_RANK 3
GenTree* gtArrInds[GT_ARR_MAX_RANK]; // Indices
unsigned char gtArrRank; // Rank of the array
unsigned char gtArrElemSize; // !!! Caution, this is an "unsigned char", it is used only
// on the optimization path of array intrisics.
// It stores the size of array elements WHEN it can fit
// into an "unsigned char".
// This has caused VSW 571394.
var_types gtArrElemType; // The array element type
// Requires that "inds" is a pointer to an array of "rank" nodes for the indices.
GenTreeArrElem(
var_types type, GenTree* arr, unsigned char rank, unsigned char elemSize, var_types elemType, GenTree** inds)
: GenTree(GT_ARR_ELEM, type), gtArrObj(arr), gtArrRank(rank), gtArrElemSize(elemSize), gtArrElemType(elemType)
{
gtFlags |= (arr->gtFlags & GTF_ALL_EFFECT);
for (unsigned char i = 0; i < rank; i++)
{
gtArrInds[i] = inds[i];
gtFlags |= (inds[i]->gtFlags & GTF_ALL_EFFECT);
}
gtFlags |= GTF_EXCEPT;
}
#if DEBUGGABLE_GENTREE
GenTreeArrElem() : GenTree()
{
}
#endif
};
//--------------------------------------------
//
// GenTreeArrIndex (gtArrIndex): Expression to bounds-check the index for one dimension of a
// multi-dimensional or non-zero-based array., and compute the effective index
// (i.e. subtracting the lower bound).
//
// Notes:
// This node is similar in some ways to GenTreeBoundsChk, which ONLY performs the check.
// The reason that this node incorporates the check into the effective index computation is
// to avoid duplicating the codegen, as the effective index is required to compute the
// offset anyway.
// TODO-CQ: Enable optimization of the lower bound and length by replacing this:
// /--* <arrObj>
// +--* <index0>
// +--* ArrIndex[i, ]
// with something like:
// /--* <arrObj>
// /--* ArrLowerBound[i, ]
// | /--* <arrObj>
// +--* ArrLen[i, ] (either generalize GT_ARR_LENGTH or add a new node)
// +--* <index0>
// +--* ArrIndex[i, ]
// Which could, for example, be optimized to the following when known to be within bounds:
// /--* TempForLowerBoundDim0
// +--* <index0>
// +--* - (GT_SUB)
//
struct GenTreeArrIndex : public GenTreeOp
{
// The array object - may be any expression producing an Array reference, but is likely to be a lclVar.
GenTree*& ArrObj()
{
return gtOp1;
}
// The index expression - may be any integral expression.
GenTree*& IndexExpr()
{
return gtOp2;
}
unsigned char gtCurrDim; // The current dimension
unsigned char gtArrRank; // Rank of the array
var_types gtArrElemType; // The array element type
GenTreeArrIndex(var_types type,
GenTree* arrObj,
GenTree* indexExpr,
unsigned char currDim,
unsigned char arrRank,
var_types elemType)
: GenTreeOp(GT_ARR_INDEX, type, arrObj, indexExpr)
, gtCurrDim(currDim)
, gtArrRank(arrRank)
, gtArrElemType(elemType)
{
gtFlags |= GTF_EXCEPT;
}
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
// Used only for GenTree::GetVtableForOper()
GenTreeArrIndex() : GenTreeOp()
{
}
#endif
};
//--------------------------------------------
//
// GenTreeArrOffset (gtArrOffset): Expression to compute the accumulated offset for the address
// of an element of a multi-dimensional or non-zero-based array.
//
// Notes:
// The result of this expression is (gtOffset * dimSize) + gtIndex
// where dimSize is the length/stride/size of the dimension, and is obtained from gtArrObj.
// This node is generated in conjunction with the GenTreeArrIndex node, which computes the
// effective index for a single dimension. The sub-trees can be separately optimized, e.g.
// within a loop body where the expression for the 0th dimension may be invariant.
//
// Here is an example of how the tree might look for a two-dimension array reference:
// /--* const 0
// | /--* <arrObj>
// | +--* <index0>
// +--* ArrIndex[i, ]
// +--* <arrObj>
// /--| arrOffs[i, ]
// | +--* <arrObj>
// | +--* <index1>
// +--* ArrIndex[*,j]
// +--* <arrObj>
// /--| arrOffs[*,j]
// TODO-CQ: see comment on GenTreeArrIndex for how its representation may change. When that
// is done, we will also want to replace the <arrObj> argument to arrOffs with the
// ArrLen as for GenTreeArrIndex.
//
struct GenTreeArrOffs : public GenTree
{
GenTree* gtOffset; // The accumulated offset for lower dimensions - must be TYP_I_IMPL, and
// will either be a CSE temp, the constant 0, or another GenTreeArrOffs node.
GenTree* gtIndex; // The effective index for the current dimension - must be non-negative
// and can be any expression (though it is likely to be either a GenTreeArrIndex,
// node, a lclVar, or a constant).
GenTree* gtArrObj; // The array object - may be any expression producing an Array reference,
// but is likely to be a lclVar.
unsigned char gtCurrDim; // The current dimension
unsigned char gtArrRank; // Rank of the array
var_types gtArrElemType; // The array element type
GenTreeArrOffs(var_types type,
GenTree* offset,
GenTree* index,
GenTree* arrObj,
unsigned char currDim,
unsigned char rank,
var_types elemType)
: GenTree(GT_ARR_OFFSET, type)
, gtOffset(offset)
, gtIndex(index)
, gtArrObj(arrObj)
, gtCurrDim(currDim)
, gtArrRank(rank)
, gtArrElemType(elemType)
{
assert(index->gtFlags & GTF_EXCEPT);
gtFlags |= GTF_EXCEPT;
}
#if DEBUGGABLE_GENTREE
GenTreeArrOffs() : GenTree()
{
}
#endif
};
/* gtAddrMode -- Target-specific canonicalized addressing expression (GT_LEA) */
struct GenTreeAddrMode : public GenTreeOp
{
// Address is Base + Index*Scale + Offset.
// These are the legal patterns:
//
// Base // Base != nullptr && Index == nullptr && Scale == 0 && Offset == 0
// Base + Index*Scale // Base != nullptr && Index != nullptr && Scale != 0 && Offset == 0
// Base + Offset // Base != nullptr && Index == nullptr && Scale == 0 && Offset != 0
// Base + Index*Scale + Offset // Base != nullptr && Index != nullptr && Scale != 0 && Offset != 0
// Index*Scale // Base == nullptr && Index != nullptr && Scale > 1 && Offset == 0
// Index*Scale + Offset // Base == nullptr && Index != nullptr && Scale > 1 && Offset != 0
// Offset // Base == nullptr && Index == nullptr && Scale == 0 && Offset != 0
//
// So, for example:
// 1. Base + Index is legal with Scale==1
// 2. If Index is null, Scale should be zero (or unintialized / unused)
// 3. If Scale==1, then we should have "Base" instead of "Index*Scale", and "Base + Offset" instead of
// "Index*Scale + Offset".
// First operand is base address/pointer
bool HasBase() const
{
return gtOp1 != nullptr;
}
GenTree*& Base()
{
return gtOp1;
}
void SetBase(GenTree* base)
{
gtOp1 = base;
}
// Second operand is scaled index value
bool HasIndex() const
{
return gtOp2 != nullptr;
}
GenTree*& Index()
{
return gtOp2;
}
void SetIndex(GenTree* index)
{
gtOp2 = index;
}
unsigned GetScale() const
{
return gtScale;
}
void SetScale(unsigned scale)
{
gtScale = scale;
}
int Offset()
{
return static_cast<int>(gtOffset);
}
void SetOffset(int offset)
{
gtOffset = offset;
}
unsigned gtScale; // The scale factor
private:
ssize_t gtOffset; // The offset to add
public:
GenTreeAddrMode(var_types type, GenTree* base, GenTree* index, unsigned scale, ssize_t offset)
: GenTreeOp(GT_LEA, type, base, index)
{
assert(base != nullptr || index != nullptr);
gtScale = scale;
gtOffset = offset;
}
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
// Used only for GenTree::GetVtableForOper()
GenTreeAddrMode() : GenTreeOp()
{
}
#endif
};
// Indir is just an op, no additional data, but some additional abstractions
struct GenTreeIndir : public GenTreeOp
{
// The address for the indirection.
GenTree*& Addr()
{
return gtOp1;
}
void SetAddr(GenTree* addr)
{
assert(addr != nullptr);
assert(addr->TypeIs(TYP_I_IMPL, TYP_BYREF));
gtOp1 = addr;
}
// these methods provide an interface to the indirection node which
bool HasBase();
bool HasIndex();
GenTree* Base();
GenTree* Index();
unsigned Scale();
ssize_t Offset();
GenTreeIndir(genTreeOps oper, var_types type, GenTree* addr, GenTree* data) : GenTreeOp(oper, type, addr, data)
{
}
// True if this indirection is a volatile memory operation.
bool IsVolatile() const
{
return (gtFlags & GTF_IND_VOLATILE) != 0;
}
// True if this indirection is an unaligned memory operation.
bool IsUnaligned() const
{
return (gtFlags & GTF_IND_UNALIGNED) != 0;
}
#if DEBUGGABLE_GENTREE
// Used only for GenTree::GetVtableForOper()
GenTreeIndir() : GenTreeOp()
{
}
#else
// Used by XARCH codegen to construct temporary trees to pass to the emitter.
GenTreeIndir() : GenTreeOp(GT_NOP, TYP_UNDEF)
{
}
#endif
};
// gtBlk -- 'block' (GT_BLK, GT_STORE_BLK).
//
// This is the base type for all of the nodes that represent block or struct
// values.
// Since it can be a store, it includes gtBlkOpKind to specify the type of
// code generation that will be used for the block operation.
struct GenTreeBlk : public GenTreeIndir
{
private:
ClassLayout* m_layout;
public:
ClassLayout* GetLayout() const
{
return m_layout;
}
void SetLayout(ClassLayout* layout)
{
assert((layout != nullptr) || OperIs(GT_STORE_DYN_BLK));
m_layout = layout;
}
// The data to be stored (null for GT_BLK)
GenTree*& Data()
{
return gtOp2;
}
void SetData(GenTree* dataNode)
{
gtOp2 = dataNode;
}
// The size of the buffer to be copied.
unsigned Size() const
{
assert((m_layout != nullptr) || OperIs(GT_STORE_DYN_BLK));
return (m_layout != nullptr) ? m_layout->GetSize() : 0;
}
// Instruction selection: during codegen time, what code sequence we will be using
// to encode this operation.
enum
{
BlkOpKindInvalid,
#ifndef TARGET_X86
BlkOpKindHelper,
#endif
#ifdef TARGET_XARCH
BlkOpKindRepInstr,
#endif
BlkOpKindUnroll,
} gtBlkOpKind;
#ifndef JIT32_GCENCODER
bool gtBlkOpGcUnsafe;
#endif
#ifdef TARGET_XARCH
bool IsOnHeapAndContainsReferences()
{
return (m_layout != nullptr) && m_layout->HasGCPtr() && !Addr()->OperIsLocalAddr();
}
#endif
GenTreeBlk(genTreeOps oper, var_types type, GenTree* addr, ClassLayout* layout)
: GenTreeIndir(oper, type, addr, nullptr)
, m_layout(layout)
, gtBlkOpKind(BlkOpKindInvalid)
#ifndef JIT32_GCENCODER
, gtBlkOpGcUnsafe(false)
#endif
{
assert(OperIsBlk(oper));
assert((layout != nullptr) || OperIs(GT_STORE_DYN_BLK));
gtFlags |= (addr->gtFlags & GTF_ALL_EFFECT);
}
GenTreeBlk(genTreeOps oper, var_types type, GenTree* addr, GenTree* data, ClassLayout* layout)
: GenTreeIndir(oper, type, addr, data)
, m_layout(layout)
, gtBlkOpKind(BlkOpKindInvalid)
#ifndef JIT32_GCENCODER
, gtBlkOpGcUnsafe(false)
#endif
{
assert(OperIsBlk(oper));
assert((layout != nullptr) || OperIs(GT_STORE_DYN_BLK));
gtFlags |= (addr->gtFlags & GTF_ALL_EFFECT);
gtFlags |= (data->gtFlags & GTF_ALL_EFFECT);
}
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
GenTreeBlk() : GenTreeIndir()
{
}
#endif // DEBUGGABLE_GENTREE
};
// gtObj -- 'object' (GT_OBJ).
//
// This node is used for block values that may have GC pointers.
struct GenTreeObj : public GenTreeBlk
{
void Init()
{
// By default, an OBJ is assumed to be a global reference, unless it is local.
GenTreeLclVarCommon* lcl = Addr()->IsLocalAddrExpr();
if ((lcl == nullptr) || ((lcl->gtFlags & GTF_GLOB_EFFECT) != 0))
{
gtFlags |= GTF_GLOB_REF;
}
noway_assert(GetLayout()->GetClassHandle() != NO_CLASS_HANDLE);
}
GenTreeObj(var_types type, GenTree* addr, ClassLayout* layout) : GenTreeBlk(GT_OBJ, type, addr, layout)
{
Init();
}
GenTreeObj(var_types type, GenTree* addr, GenTree* data, ClassLayout* layout)
: GenTreeBlk(GT_STORE_OBJ, type, addr, data, layout)
{
Init();
}
#if DEBUGGABLE_GENTREE
GenTreeObj() : GenTreeBlk()
{
}
#endif
};
// GenTreeStoreDynBlk -- 'dynamic block store' (GT_STORE_DYN_BLK).
//
// This node is used to represent stores that have a dynamic size - the "cpblk" and "initblk"
// IL instructions are implemented with it. Note that such stores assume the input has no GC
// pointers in it, and as such do not ever use write barriers.
//
// The "Data()" member of this node will either be a "dummy" IND(struct) node, for "cpblk", or
// the zero constant/INIT_VAL for "initblk".
//
struct GenTreeStoreDynBlk : public GenTreeBlk
{
public:
GenTree* gtDynamicSize;
GenTreeStoreDynBlk(GenTree* dstAddr, GenTree* data, GenTree* dynamicSize)
: GenTreeBlk(GT_STORE_DYN_BLK, TYP_VOID, dstAddr, data, nullptr), gtDynamicSize(dynamicSize)
{
// Conservatively the 'dstAddr' could be null or point into the global heap.
// Likewise, this is a store and so must be marked with the GTF_ASG flag.
gtFlags |= (GTF_ASG | GTF_EXCEPT | GTF_GLOB_REF);
gtFlags |= (dynamicSize->gtFlags & GTF_ALL_EFFECT);
}
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
GenTreeStoreDynBlk() : GenTreeBlk()
{
}
#endif // DEBUGGABLE_GENTREE
};
// Read-modify-write status of a RMW memory op rooted at a storeInd
enum RMWStatus
{
STOREIND_RMW_STATUS_UNKNOWN, // RMW status of storeInd unknown
// Default status unless modified by IsRMWMemOpRootedAtStoreInd()
// One of these denote storeind is a RMW memory operation.
STOREIND_RMW_DST_IS_OP1, // StoreInd is known to be a RMW memory op and dst candidate is op1
STOREIND_RMW_DST_IS_OP2, // StoreInd is known to be a RMW memory op and dst candidate is op2
// One of these denote the reason for storeind is marked as non-RMW operation
STOREIND_RMW_UNSUPPORTED_ADDR, // Addr mode is not yet supported for RMW memory
STOREIND_RMW_UNSUPPORTED_OPER, // Operation is not supported for RMW memory
STOREIND_RMW_UNSUPPORTED_TYPE, // Type is not supported for RMW memory
STOREIND_RMW_INDIR_UNEQUAL // Indir to read value is not equivalent to indir that writes the value
};
#ifdef DEBUG
inline const char* RMWStatusDescription(RMWStatus status)
{
switch (status)
{
case STOREIND_RMW_STATUS_UNKNOWN:
return "RMW status unknown";
case STOREIND_RMW_DST_IS_OP1:
return "dst candidate is op1";
case STOREIND_RMW_DST_IS_OP2:
return "dst candidate is op2";
case STOREIND_RMW_UNSUPPORTED_ADDR:
return "address mode is not supported";
case STOREIND_RMW_UNSUPPORTED_OPER:
return "oper is not supported";
case STOREIND_RMW_UNSUPPORTED_TYPE:
return "type is not supported";
case STOREIND_RMW_INDIR_UNEQUAL:
return "read indir is not equivalent to write indir";
default:
unreached();
}
}
#endif
// StoreInd is just a BinOp, with additional RMW status
struct GenTreeStoreInd : public GenTreeIndir
{
#if !CPU_LOAD_STORE_ARCH
// The below flag is set and used during lowering
RMWStatus gtRMWStatus;
bool IsRMWStatusUnknown()
{
return gtRMWStatus == STOREIND_RMW_STATUS_UNKNOWN;
}
bool IsNonRMWMemoryOp()
{
return gtRMWStatus == STOREIND_RMW_UNSUPPORTED_ADDR || gtRMWStatus == STOREIND_RMW_UNSUPPORTED_OPER ||
gtRMWStatus == STOREIND_RMW_UNSUPPORTED_TYPE || gtRMWStatus == STOREIND_RMW_INDIR_UNEQUAL;
}
bool IsRMWMemoryOp()
{
return gtRMWStatus == STOREIND_RMW_DST_IS_OP1 || gtRMWStatus == STOREIND_RMW_DST_IS_OP2;
}
bool IsRMWDstOp1()
{
return gtRMWStatus == STOREIND_RMW_DST_IS_OP1;
}
bool IsRMWDstOp2()
{
return gtRMWStatus == STOREIND_RMW_DST_IS_OP2;
}
#endif //! CPU_LOAD_STORE_ARCH
RMWStatus GetRMWStatus()
{
#if !CPU_LOAD_STORE_ARCH
return gtRMWStatus;
#else
return STOREIND_RMW_STATUS_UNKNOWN;
#endif
}
void SetRMWStatusDefault()
{
#if !CPU_LOAD_STORE_ARCH
gtRMWStatus = STOREIND_RMW_STATUS_UNKNOWN;
#endif
}
void SetRMWStatus(RMWStatus status)
{
#if !CPU_LOAD_STORE_ARCH
gtRMWStatus = status;
#endif
}
GenTree*& Data()
{
return gtOp2;
}
GenTreeStoreInd(var_types type, GenTree* destPtr, GenTree* data) : GenTreeIndir(GT_STOREIND, type, destPtr, data)
{
SetRMWStatusDefault();
}
#if DEBUGGABLE_GENTREE
protected:
friend GenTree;
// Used only for GenTree::GetVtableForOper()
GenTreeStoreInd() : GenTreeIndir()
{
SetRMWStatusDefault();
}
#endif
};
/* gtRetExp -- Place holder for the return expression from an inline candidate (GT_RET_EXPR) */
struct GenTreeRetExpr : public GenTree
{
GenTree* gtInlineCandidate;
BasicBlockFlags bbFlags;
CORINFO_CLASS_HANDLE gtRetClsHnd;
GenTreeRetExpr(var_types type) : GenTree(GT_RET_EXPR, type)
{
}
#if DEBUGGABLE_GENTREE
GenTreeRetExpr() : GenTree()
{
}
#endif
};
// In LIR there are no longer statements so debug information is inserted linearly using these nodes.
struct GenTreeILOffset : public GenTree
{
DebugInfo gtStmtDI; // debug info
#ifdef DEBUG
IL_OFFSET gtStmtLastILoffs; // instr offset at end of stmt
#endif
GenTreeILOffset(const DebugInfo& di DEBUGARG(IL_OFFSET lastOffset = BAD_IL_OFFSET))
: GenTree(GT_IL_OFFSET, TYP_VOID)
, gtStmtDI(di)
#ifdef DEBUG
, gtStmtLastILoffs(lastOffset)
#endif
{
}
#if DEBUGGABLE_GENTREE
GenTreeILOffset() : GenTree(GT_IL_OFFSET, TYP_VOID)
{
}
#endif
};
// GenTreeList: adapter class for forward iteration of the execution order GenTree linked list
// using range-based `for`, normally used via Statement::TreeList(), e.g.:
// for (GenTree* const tree : stmt->TreeList()) ...
//
class GenTreeList
{
GenTree* m_trees;
// Forward iterator for the execution order GenTree linked list (using `gtNext` pointer).
//
class iterator
{
GenTree* m_tree;
public:
iterator(GenTree* tree) : m_tree(tree)
{
}
GenTree* operator*() const
{
return m_tree;
}
iterator& operator++()
{
m_tree = m_tree->gtNext;
return *this;
}
bool operator!=(const iterator& i) const
{
return m_tree != i.m_tree;
}
};
public:
GenTreeList(GenTree* trees) : m_trees(trees)
{
}
iterator begin() const
{
return iterator(m_trees);
}
iterator end() const
{
return iterator(nullptr);
}
};
// We use the following format when printing the Statement number: Statement->GetID()
// This define is used with string concatenation to put this in printf format strings (Note that %u means unsigned int)
#define FMT_STMT "STMT%05u"
struct Statement
{
public:
Statement(GenTree* expr DEBUGARG(unsigned stmtID))
: m_rootNode(expr)
, m_treeList(nullptr)
, m_next(nullptr)
, m_prev(nullptr)
#ifdef DEBUG
, m_lastILOffset(BAD_IL_OFFSET)
, m_stmtID(stmtID)
#endif
{
}
GenTree* GetRootNode() const
{
return m_rootNode;
}
GenTree** GetRootNodePointer()
{
return &m_rootNode;
}
void SetRootNode(GenTree* treeRoot)
{
m_rootNode = treeRoot;
}
GenTree* GetTreeList() const
{
return m_treeList;
}
void SetTreeList(GenTree* treeHead)
{
m_treeList = treeHead;
}
// TreeList: convenience method for enabling range-based `for` iteration over the
// execution order of the GenTree linked list, e.g.:
// for (GenTree* const tree : stmt->TreeList()) ...
//
GenTreeList TreeList() const
{
return GenTreeList(GetTreeList());
}
const DebugInfo& GetDebugInfo() const
{
return m_debugInfo;
}
void SetDebugInfo(const DebugInfo& di)
{
m_debugInfo = di;
di.Validate();
}
#ifdef DEBUG
IL_OFFSET GetLastILOffset() const
{
return m_lastILOffset;
}
void SetLastILOffset(IL_OFFSET lastILOffset)
{
m_lastILOffset = lastILOffset;
}
unsigned GetID() const
{
return m_stmtID;
}
#endif // DEBUG
Statement* GetNextStmt() const
{
return m_next;
}
void SetNextStmt(Statement* nextStmt)
{
m_next = nextStmt;
}
Statement* GetPrevStmt() const
{
return m_prev;
}
void SetPrevStmt(Statement* prevStmt)
{
m_prev = prevStmt;
}
bool IsPhiDefnStmt() const
{
return m_rootNode->IsPhiDefn();
}
unsigned char GetCostSz() const
{
return m_rootNode->GetCostSz();
}
unsigned char GetCostEx() const
{
return m_rootNode->GetCostEx();
}
private:
// The root of the expression tree.
// Note: It will be the last node in evaluation order.
GenTree* m_rootNode;
// The tree list head (for forward walks in evaluation order).
// The value is `nullptr` until we have set the sequencing of the nodes.
GenTree* m_treeList;
// The statement nodes are doubly-linked. The first statement node in a block points
// to the last node in the block via its `m_prev` link. Note that the last statement node
// does not point to the first: it has `m_next == nullptr`; that is, the list is not fully circular.
Statement* m_next;
Statement* m_prev;
DebugInfo m_debugInfo;
#ifdef DEBUG
IL_OFFSET m_lastILOffset; // The instr offset at the end of this statement.
unsigned m_stmtID;
#endif
};
// StatementList: adapter class for forward iteration of the statement linked list using range-based `for`,
// normally used via BasicBlock::Statements(), e.g.:
// for (Statement* const stmt : block->Statements()) ...
// or:
// for (Statement* const stmt : block->NonPhiStatements()) ...
//
class StatementList
{
Statement* m_stmts;
// Forward iterator for the statement linked list.
//
class iterator
{
Statement* m_stmt;
public:
iterator(Statement* stmt) : m_stmt(stmt)
{
}
Statement* operator*() const
{
return m_stmt;
}
iterator& operator++()
{
m_stmt = m_stmt->GetNextStmt();
return *this;
}
bool operator!=(const iterator& i) const
{
return m_stmt != i.m_stmt;
}
};
public:
StatementList(Statement* stmts) : m_stmts(stmts)
{
}
iterator begin() const
{
return iterator(m_stmts);
}
iterator end() const
{
return iterator(nullptr);
}
};
/* NOTE: Any tree nodes that are larger than 8 bytes (two ints or
pointers) must be flagged as 'large' in GenTree::InitNodeSize().
*/
/* AsClsVar() -- 'static data member' (GT_CLS_VAR) */
struct GenTreeClsVar : public GenTree
{
CORINFO_FIELD_HANDLE gtClsVarHnd;
FieldSeqNode* gtFieldSeq;
GenTreeClsVar(var_types type, CORINFO_FIELD_HANDLE clsVarHnd, FieldSeqNode* fldSeq)
: GenTree(GT_CLS_VAR, type), gtClsVarHnd(clsVarHnd), gtFieldSeq(fldSeq)
{
gtFlags |= GTF_GLOB_REF;
}
GenTreeClsVar(genTreeOps oper, var_types type, CORINFO_FIELD_HANDLE clsVarHnd, FieldSeqNode* fldSeq)
: GenTree(oper, type), gtClsVarHnd(clsVarHnd), gtFieldSeq(fldSeq)
{
assert((oper == GT_CLS_VAR) || (oper == GT_CLS_VAR_ADDR));
gtFlags |= GTF_GLOB_REF;
}
#if DEBUGGABLE_GENTREE
GenTreeClsVar() : GenTree()
{
}
#endif
};
/* gtArgPlace -- 'register argument placeholder' (GT_ARGPLACE) */
struct GenTreeArgPlace : public GenTree
{
CORINFO_CLASS_HANDLE gtArgPlaceClsHnd; // Needed when we have a TYP_STRUCT argument
GenTreeArgPlace(var_types type, CORINFO_CLASS_HANDLE clsHnd) : GenTree(GT_ARGPLACE, type), gtArgPlaceClsHnd(clsHnd)
{
}
#if DEBUGGABLE_GENTREE
GenTreeArgPlace() : GenTree()
{
}
#endif
};
/* gtPhiArg -- phi node rhs argument, var = phi(phiarg, phiarg, phiarg...); GT_PHI_ARG */
struct GenTreePhiArg : public GenTreeLclVarCommon
{
BasicBlock* gtPredBB;
GenTreePhiArg(var_types type, unsigned lclNum, unsigned ssaNum, BasicBlock* block)
: GenTreeLclVarCommon(GT_PHI_ARG, type, lclNum), gtPredBB(block)
{
SetSsaNum(ssaNum);
}
#if DEBUGGABLE_GENTREE
GenTreePhiArg() : GenTreeLclVarCommon()
{
}
#endif
};
/* gtPutArgStk -- Argument passed on stack (GT_PUTARG_STK) */
struct GenTreePutArgStk : public GenTreeUnOp
{
private:
unsigned m_byteOffset;
#ifdef FEATURE_PUT_STRUCT_ARG_STK
unsigned m_byteSize; // The number of bytes that this argument is occupying on the stack with padding.
#endif
public:
#if defined(DEBUG_ARG_SLOTS)
unsigned gtSlotNum; // Slot number of the argument to be passed on stack
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned gtNumSlots; // Number of slots for the argument to be passed on stack
#endif
#endif
#if defined(UNIX_X86_ABI)
unsigned gtPadAlign; // Number of padding slots for stack alignment
#endif
#if defined(DEBUG) || defined(UNIX_X86_ABI)
GenTreeCall* gtCall; // the call node to which this argument belongs
#endif
#if FEATURE_FASTTAILCALL
bool gtPutInIncomingArgArea; // Whether this arg needs to be placed in incoming arg area.
// By default this is false and will be placed in out-going arg area.
// Fast tail calls set this to true.
// In future if we need to add more such bool fields consider bit fields.
#endif
#ifdef FEATURE_PUT_STRUCT_ARG_STK
// Instruction selection: during codegen time, what code sequence we will be using
// to encode this operation.
// TODO-Throughput: The following information should be obtained from the child
// block node.
enum class Kind : __int8{
Invalid, RepInstr, PartialRepInstr, Unroll, Push,
};
Kind gtPutArgStkKind;
#endif
GenTreePutArgStk(genTreeOps oper,
var_types type,
GenTree* op1,
unsigned stackByteOffset,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned stackByteSize,
#endif
#if defined(DEBUG_ARG_SLOTS)
unsigned slotNum,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned numSlots,
#endif
#endif
GenTreeCall* callNode,
bool putInIncomingArgArea)
: GenTreeUnOp(oper, type, op1 DEBUGARG(/*largeNode*/ false))
, m_byteOffset(stackByteOffset)
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
, m_byteSize(stackByteSize)
#endif
#if defined(DEBUG_ARG_SLOTS)
, gtSlotNum(slotNum)
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
, gtNumSlots(numSlots)
#endif
#endif
#if defined(UNIX_X86_ABI)
, gtPadAlign(0)
#endif
#if defined(DEBUG) || defined(UNIX_X86_ABI)
, gtCall(callNode)
#endif
#if FEATURE_FASTTAILCALL
, gtPutInIncomingArgArea(putInIncomingArgArea)
#endif // FEATURE_FASTTAILCALL
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
, gtPutArgStkKind(Kind::Invalid)
#endif
{
DEBUG_ARG_SLOTS_ASSERT(m_byteOffset == slotNum * TARGET_POINTER_SIZE);
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
DEBUG_ARG_SLOTS_ASSERT(m_byteSize == gtNumSlots * TARGET_POINTER_SIZE);
#endif
}
GenTree*& Data()
{
return gtOp1;
}
#if FEATURE_FASTTAILCALL
bool putInIncomingArgArea() const
{
return gtPutInIncomingArgArea;
}
#else // !FEATURE_FASTTAILCALL
bool putInIncomingArgArea() const
{
return false;
}
#endif // !FEATURE_FASTTAILCALL
unsigned getArgOffset() const
{
DEBUG_ARG_SLOTS_ASSERT(m_byteOffset / TARGET_POINTER_SIZE == gtSlotNum);
DEBUG_ARG_SLOTS_ASSERT(m_byteOffset % TARGET_POINTER_SIZE == 0);
return m_byteOffset;
}
#if defined(UNIX_X86_ABI)
unsigned getArgPadding() const
{
return gtPadAlign;
}
void setArgPadding(unsigned padAlign)
{
gtPadAlign = padAlign;
}
#endif
#ifdef FEATURE_PUT_STRUCT_ARG_STK
unsigned GetStackByteSize() const
{
return m_byteSize;
}
// Return true if this is a PutArgStk of a SIMD12 struct.
// This is needed because such values are re-typed to SIMD16, and the type of PutArgStk is VOID.
unsigned isSIMD12() const
{
return (varTypeIsSIMD(gtOp1) && (GetStackByteSize() == 12));
}
bool isPushKind() const
{
return gtPutArgStkKind == Kind::Push;
}
#else // !FEATURE_PUT_STRUCT_ARG_STK
unsigned GetStackByteSize() const;
#endif // !FEATURE_PUT_STRUCT_ARG_STK
#if DEBUGGABLE_GENTREE
GenTreePutArgStk() : GenTreeUnOp()
{
}
#endif
};
#if FEATURE_ARG_SPLIT
// Represent the struct argument: split value in register(s) and stack
struct GenTreePutArgSplit : public GenTreePutArgStk
{
unsigned gtNumRegs;
GenTreePutArgSplit(GenTree* op1,
unsigned stackByteOffset,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned stackByteSize,
#endif
#if defined(DEBUG_ARG_SLOTS)
unsigned slotNum,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
unsigned numSlots,
#endif
#endif
unsigned numRegs,
GenTreeCall* callNode,
bool putIncomingArgArea)
: GenTreePutArgStk(GT_PUTARG_SPLIT,
TYP_STRUCT,
op1,
stackByteOffset,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
stackByteSize,
#endif
#if defined(DEBUG_ARG_SLOTS)
slotNum,
#if defined(FEATURE_PUT_STRUCT_ARG_STK)
numSlots,
#endif
#endif
callNode,
putIncomingArgArea)
, gtNumRegs(numRegs)
{
ClearOtherRegs();
ClearOtherRegFlags();
}
// Type required to support multi-reg struct arg.
var_types m_regType[MAX_REG_ARG];
// First reg of struct is always given by GetRegNum().
// gtOtherRegs holds the other reg numbers of struct.
regNumberSmall gtOtherRegs[MAX_REG_ARG - 1];
MultiRegSpillFlags gtSpillFlags;
//---------------------------------------------------------------------------
// GetRegNumByIdx: get i'th register allocated to this struct argument.
//
// Arguments:
// idx - index of the struct
//
// Return Value:
// Return regNumber of i'th register of this struct argument
//
regNumber GetRegNumByIdx(unsigned idx) const
{
assert(idx < MAX_REG_ARG);
if (idx == 0)
{
return GetRegNum();
}
return (regNumber)gtOtherRegs[idx - 1];
}
//----------------------------------------------------------------------
// SetRegNumByIdx: set i'th register of this struct argument
//
// Arguments:
// reg - reg number
// idx - index of the struct
//
// Return Value:
// None
//
void SetRegNumByIdx(regNumber reg, unsigned idx)
{
assert(idx < MAX_REG_ARG);
if (idx == 0)
{
SetRegNum(reg);
}
else
{
gtOtherRegs[idx - 1] = (regNumberSmall)reg;
assert(gtOtherRegs[idx - 1] == reg);
}
}
//----------------------------------------------------------------------------
// ClearOtherRegs: clear multi-reg state to indicate no regs are allocated
//
// Arguments:
// None
//
// Return Value:
// None
//
void ClearOtherRegs()
{
for (unsigned i = 0; i < MAX_REG_ARG - 1; ++i)
{
gtOtherRegs[i] = REG_NA;
}
}
GenTreeFlags GetRegSpillFlagByIdx(unsigned idx) const
{
return GetMultiRegSpillFlagsByIdx(gtSpillFlags, idx);
}
void SetRegSpillFlagByIdx(GenTreeFlags flags, unsigned idx)
{
#if FEATURE_MULTIREG_RET
gtSpillFlags = SetMultiRegSpillFlagsByIdx(gtSpillFlags, flags, idx);
#endif
}
//--------------------------------------------------------------------------
// GetRegType: Get var_type of the register specified by index.
//
// Arguments:
// index - Index of the register.
// First register will have an index 0 and so on.
//
// Return Value:
// var_type of the register specified by its index.
var_types GetRegType(unsigned index) const
{
assert(index < gtNumRegs);
var_types result = m_regType[index];
return result;
}
//-------------------------------------------------------------------
// clearOtherRegFlags: clear GTF_* flags associated with gtOtherRegs
//
// Arguments:
// None
//
// Return Value:
// None
//
void ClearOtherRegFlags()
{
gtSpillFlags = 0;
}
#if DEBUGGABLE_GENTREE
GenTreePutArgSplit() : GenTreePutArgStk()
{
}
#endif
};
#endif // FEATURE_ARG_SPLIT
// Represents GT_COPY or GT_RELOAD node
//
// As it turns out, these are only needed on targets that happen to have multi-reg returns.
// However, they are actually needed on any target that has any multi-reg ops. It is just
// coincidence that those are the same (and there isn't a FEATURE_MULTIREG_OPS).
//
struct GenTreeCopyOrReload : public GenTreeUnOp
{
#if FEATURE_MULTIREG_RET
// State required to support copy/reload of a multi-reg call node.
// The first register is always given by GetRegNum().
//
regNumberSmall gtOtherRegs[MAX_RET_REG_COUNT - 1];
#endif
//----------------------------------------------------------
// ClearOtherRegs: set gtOtherRegs to REG_NA.
//
// Arguments:
// None
//
// Return Value:
// None
//
void ClearOtherRegs()
{
#if FEATURE_MULTIREG_RET
for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
{
gtOtherRegs[i] = REG_NA;
}
#endif
}
//-----------------------------------------------------------
// GetRegNumByIdx: Get regNumber of i'th position.
//
// Arguments:
// idx - register position.
//
// Return Value:
// Returns regNumber assigned to i'th position.
//
regNumber GetRegNumByIdx(unsigned idx) const
{
assert(idx < MAX_RET_REG_COUNT);
if (idx == 0)
{
return GetRegNum();
}
#if FEATURE_MULTIREG_RET
return (regNumber)gtOtherRegs[idx - 1];
#else
return REG_NA;
#endif
}
//-----------------------------------------------------------
// SetRegNumByIdx: Set the regNumber for i'th position.
//
// Arguments:
// reg - reg number
// idx - register position.
//
// Return Value:
// None.
//
void SetRegNumByIdx(regNumber reg, unsigned idx)
{
assert(idx < MAX_RET_REG_COUNT);
if (idx == 0)
{
SetRegNum(reg);
}
#if FEATURE_MULTIREG_RET
else
{
gtOtherRegs[idx - 1] = (regNumberSmall)reg;
assert(gtOtherRegs[idx - 1] == reg);
}
#else
else
{
unreached();
}
#endif
}
//----------------------------------------------------------------------------
// CopyOtherRegs: copy multi-reg state from the given copy/reload node to this
// node.
//
// Arguments:
// from - GenTree node from which to copy multi-reg state
//
// Return Value:
// None
//
// TODO-ARM: Implement this routine for Arm64 and Arm32
// TODO-X86: Implement this routine for x86
void CopyOtherRegs(GenTreeCopyOrReload* from)
{
assert(OperGet() == from->OperGet());
#ifdef UNIX_AMD64_ABI
for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
{
gtOtherRegs[i] = from->gtOtherRegs[i];
}
#endif
}
unsigned GetRegCount() const
{
#if FEATURE_MULTIREG_RET
// We need to return the highest index for which we have a valid register.
// Note that the gtOtherRegs array is off by one (the 0th register is GetRegNum()).
// If there's no valid register in gtOtherRegs, GetRegNum() must be valid.
// Note that for most nodes, the set of valid registers must be contiguous,
// but for COPY or RELOAD there is only a valid register for the register positions
// that must be copied or reloaded.
//
for (unsigned i = MAX_RET_REG_COUNT; i > 1; i--)
{
if (gtOtherRegs[i - 2] != REG_NA)
{
return i;
}
}
#endif
// We should never have a COPY or RELOAD with no valid registers.
assert(GetRegNum() != REG_NA);
return 1;
}
GenTreeCopyOrReload(genTreeOps oper, var_types type, GenTree* op1) : GenTreeUnOp(oper, type, op1)
{
assert(type != TYP_STRUCT || op1->IsMultiRegNode());
SetRegNum(REG_NA);
ClearOtherRegs();
}
#if DEBUGGABLE_GENTREE
GenTreeCopyOrReload() : GenTreeUnOp()
{
}
#endif
};
// Represents GT_ALLOCOBJ node
struct GenTreeAllocObj final : public GenTreeUnOp
{
unsigned int gtNewHelper; // Value returned by ICorJitInfo::getNewHelper
bool gtHelperHasSideEffects;
CORINFO_CLASS_HANDLE gtAllocObjClsHnd;
#ifdef FEATURE_READYTORUN
CORINFO_CONST_LOOKUP gtEntryPoint;
#endif
GenTreeAllocObj(
var_types type, unsigned int helper, bool helperHasSideEffects, CORINFO_CLASS_HANDLE clsHnd, GenTree* op)
: GenTreeUnOp(GT_ALLOCOBJ, type, op DEBUGARG(/*largeNode*/ TRUE))
, // This node in most cases will be changed to a call node
gtNewHelper(helper)
, gtHelperHasSideEffects(helperHasSideEffects)
, gtAllocObjClsHnd(clsHnd)
{
#ifdef FEATURE_READYTORUN
gtEntryPoint.addr = nullptr;
#endif
}
#if DEBUGGABLE_GENTREE
GenTreeAllocObj() : GenTreeUnOp()
{
}
#endif
};
// Represents GT_RUNTIMELOOKUP node
struct GenTreeRuntimeLookup final : public GenTreeUnOp
{
CORINFO_GENERIC_HANDLE gtHnd;
CorInfoGenericHandleType gtHndType;
GenTreeRuntimeLookup(CORINFO_GENERIC_HANDLE hnd, CorInfoGenericHandleType hndTyp, GenTree* tree)
: GenTreeUnOp(GT_RUNTIMELOOKUP, tree->gtType, tree DEBUGARG(/*largeNode*/ FALSE)), gtHnd(hnd), gtHndType(hndTyp)
{
assert(hnd != nullptr);
}
#if DEBUGGABLE_GENTREE
GenTreeRuntimeLookup() : GenTreeUnOp()
{
}
#endif
// Return reference to the actual tree that does the lookup
GenTree*& Lookup()
{
return gtOp1;
}
bool IsClassHandle() const
{
return gtHndType == CORINFO_HANDLETYPE_CLASS;
}
bool IsMethodHandle() const
{
return gtHndType == CORINFO_HANDLETYPE_METHOD;
}
bool IsFieldHandle() const
{
return gtHndType == CORINFO_HANDLETYPE_FIELD;
}
// Note these operations describe the handle that is input to the
// lookup, not the handle produced by the lookup.
CORINFO_CLASS_HANDLE GetClassHandle() const
{
assert(IsClassHandle());
return (CORINFO_CLASS_HANDLE)gtHnd;
}
CORINFO_METHOD_HANDLE GetMethodHandle() const
{
assert(IsMethodHandle());
return (CORINFO_METHOD_HANDLE)gtHnd;
}
CORINFO_FIELD_HANDLE GetFieldHandle() const
{
assert(IsMethodHandle());
return (CORINFO_FIELD_HANDLE)gtHnd;
}
};
// Represents the condition of a GT_JCC or GT_SETCC node.
struct GenCondition
{
// clang-format off
enum Code : unsigned char
{
OperMask = 7,
Unsigned = 8,
Unordered = Unsigned,
Float = 16,
// 0 would be the encoding of "signed EQ" but since equality is sign insensitive
// we'll use 0 as invalid/uninitialized condition code. This will also leave 1
// as a spare code.
NONE = 0,
SLT = 2,
SLE = 3,
SGE = 4,
SGT = 5,
S = 6,
NS = 7,
EQ = Unsigned | 0, // = 8
NE = Unsigned | 1, // = 9
ULT = Unsigned | SLT, // = 10
ULE = Unsigned | SLE, // = 11
UGE = Unsigned | SGE, // = 12
UGT = Unsigned | SGT, // = 13
C = Unsigned | S, // = 14
NC = Unsigned | NS, // = 15
FEQ = Float | 0, // = 16
FNE = Float | 1, // = 17
FLT = Float | SLT, // = 18
FLE = Float | SLE, // = 19
FGE = Float | SGE, // = 20
FGT = Float | SGT, // = 21
O = Float | S, // = 22
NO = Float | NS, // = 23
FEQU = Unordered | FEQ, // = 24
FNEU = Unordered | FNE, // = 25
FLTU = Unordered | FLT, // = 26
FLEU = Unordered | FLE, // = 27
FGEU = Unordered | FGE, // = 28
FGTU = Unordered | FGT, // = 29
P = Unordered | O, // = 30
NP = Unordered | NO, // = 31
};
// clang-format on
private:
Code m_code;
public:
Code GetCode() const
{
return m_code;
}
bool IsFlag() const
{
return (m_code & OperMask) >= S;
}
bool IsUnsigned() const
{
return (ULT <= m_code) && (m_code <= UGT);
}
bool IsFloat() const
{
return !IsFlag() && (m_code & Float) != 0;
}
bool IsUnordered() const
{
return !IsFlag() && (m_code & (Float | Unordered)) == (Float | Unordered);
}
bool Is(Code cond) const
{
return m_code == cond;
}
template <typename... TRest>
bool Is(Code c, TRest... rest) const
{
return Is(c) || Is(rest...);
}
// Indicate whether the condition should be swapped in order to avoid generating
// multiple branches. This happens for certain floating point conditions on XARCH,
// see GenConditionDesc and its associated mapping table for more details.
bool PreferSwap() const
{
#ifdef TARGET_XARCH
return Is(GenCondition::FLT, GenCondition::FLE, GenCondition::FGTU, GenCondition::FGEU);
#else
return false;
#endif
}
const char* Name() const
{
// clang-format off
static const char* names[]
{
"NONE", "???", "SLT", "SLE", "SGE", "SGT", "S", "NS",
"UEQ", "UNE", "ULT", "ULE", "UGE", "UGT", "C", "NC",
"FEQ", "FNE", "FLT", "FLE", "FGE", "FGT", "O", "NO",
"FEQU", "FNEU", "FLTU", "FLEU", "FGEU", "FGTU", "P", "NP"
};
// clang-format on
assert(m_code < ArrLen(names));
return names[m_code];
}
GenCondition() : m_code()
{
}
GenCondition(Code cond) : m_code(cond)
{
}
static_assert((GT_NE - GT_EQ) == (NE & ~Unsigned), "bad relop");
static_assert((GT_LT - GT_EQ) == SLT, "bad relop");
static_assert((GT_LE - GT_EQ) == SLE, "bad relop");
static_assert((GT_GE - GT_EQ) == SGE, "bad relop");
static_assert((GT_GT - GT_EQ) == SGT, "bad relop");
static_assert((GT_TEST_NE - GT_TEST_EQ) == (NE & ~Unsigned), "bad relop");
static GenCondition FromRelop(GenTree* relop)
{
assert(relop->OperIsCompare());
if (varTypeIsFloating(relop->gtGetOp1()))
{
return FromFloatRelop(relop);
}
else
{
return FromIntegralRelop(relop);
}
}
static GenCondition FromFloatRelop(GenTree* relop)
{
assert(varTypeIsFloating(relop->gtGetOp1()) && varTypeIsFloating(relop->gtGetOp2()));
return FromFloatRelop(relop->OperGet(), (relop->gtFlags & GTF_RELOP_NAN_UN) != 0);
}
static GenCondition FromFloatRelop(genTreeOps oper, bool isUnordered)
{
assert(GenTree::OperIsCompare(oper));
unsigned code = oper - GT_EQ;
assert(code <= SGT);
code |= Float;
if (isUnordered)
{
code |= Unordered;
}
return GenCondition(static_cast<Code>(code));
}
static GenCondition FromIntegralRelop(GenTree* relop)
{
assert(!varTypeIsFloating(relop->gtGetOp1()) && !varTypeIsFloating(relop->gtGetOp2()));
return FromIntegralRelop(relop->OperGet(), relop->IsUnsigned());
}
static GenCondition FromIntegralRelop(genTreeOps oper, bool isUnsigned)
{
assert(GenTree::OperIsCompare(oper));
// GT_TEST_EQ/NE are special, they need to be mapped as GT_EQ/NE
unsigned code = oper - ((oper >= GT_TEST_EQ) ? GT_TEST_EQ : GT_EQ);
if (isUnsigned || (code <= 1)) // EQ/NE are treated as unsigned
{
code |= Unsigned;
}
return GenCondition(static_cast<Code>(code));
}
static GenCondition Reverse(GenCondition condition)
{
// clang-format off
static const Code reverse[]
{
// EQ NE LT LE GE GT F NF
NONE, NONE, SGE, SGT, SLT, SLE, NS, S,
NE, EQ, UGE, UGT, ULT, ULE, NC, C,
FNEU, FEQU, FGEU, FGTU, FLTU, FLEU, NO, O,
FNE, FEQ, FGE, FGT, FLT, FGT, NP, P
};
// clang-format on
assert(condition.m_code < ArrLen(reverse));
return GenCondition(reverse[condition.m_code]);
}
static GenCondition Swap(GenCondition condition)
{
// clang-format off
static const Code swap[]
{
// EQ NE LT LE GE GT F NF
NONE, NONE, SGT, SGE, SLE, SLT, S, NS,
EQ, NE, UGT, UGE, ULE, ULT, C, NC,
FEQ, FNE, FGT, FGE, FLE, FLT, O, NO,
FEQU, FNEU, FGTU, FGEU, FLEU, FLTU, P, NP
};
// clang-format on
assert(condition.m_code < ArrLen(swap));
return GenCondition(swap[condition.m_code]);
}
};
// Represents a GT_JCC or GT_SETCC node.
struct GenTreeCC final : public GenTree
{
GenCondition gtCondition;
GenTreeCC(genTreeOps oper, GenCondition condition, var_types type = TYP_VOID)
: GenTree(oper, type DEBUGARG(/*largeNode*/ FALSE)), gtCondition(condition)
{
assert(OperIs(GT_JCC, GT_SETCC));
}
#if DEBUGGABLE_GENTREE
GenTreeCC() : GenTree()
{
}
#endif // DEBUGGABLE_GENTREE
};
//------------------------------------------------------------------------
// Deferred inline functions of GenTree -- these need the subtypes above to
// be defined already.
//------------------------------------------------------------------------
inline bool GenTree::OperIsBlkOp()
{
return ((gtOper == GT_ASG) && varTypeIsStruct(AsOp()->gtOp1)) || OperIsStoreBlk();
}
inline bool GenTree::OperIsInitBlkOp()
{
if (!OperIsBlkOp())
{
return false;
}
GenTree* src;
if (gtOper == GT_ASG)
{
src = gtGetOp2();
}
else
{
src = AsBlk()->Data()->gtSkipReloadOrCopy();
}
return src->OperIsInitVal() || src->OperIsConst();
}
inline bool GenTree::OperIsCopyBlkOp()
{
return OperIsBlkOp() && !OperIsInitBlkOp();
}
//------------------------------------------------------------------------
// IsFPZero: Checks whether this is a floating point constant with value 0.0
//
// Return Value:
// Returns true iff the tree is an GT_CNS_DBL, with value of 0.0.
inline bool GenTree::IsFPZero() const
{
if ((gtOper == GT_CNS_DBL) && (AsDblCon()->gtDconVal == 0.0))
{
return true;
}
return false;
}
//------------------------------------------------------------------------
// IsIntegralConst: Checks whether this is a constant node with the given value
//
// Arguments:
// constVal - the value of interest
//
// Return Value:
// Returns true iff the tree is an integral constant opcode, with
// the given value.
//
// Notes:
// Like gtIconVal, the argument is of ssize_t, so cannot check for
// long constants in a target-independent way.
inline bool GenTree::IsIntegralConst(ssize_t constVal) const
{
if ((gtOper == GT_CNS_INT) && (AsIntConCommon()->IconValue() == constVal))
{
return true;
}
if ((gtOper == GT_CNS_LNG) && (AsIntConCommon()->LngValue() == constVal))
{
return true;
}
return false;
}
//-------------------------------------------------------------------
// IsIntegralConstVector: returns true if this this is a SIMD vector
// with all its elements equal to an integral constant.
//
// Arguments:
// constVal - const value of vector element
//
// Returns:
// True if this represents an integral const SIMD vector.
//
inline bool GenTree::IsIntegralConstVector(ssize_t constVal) const
{
#ifdef FEATURE_SIMD
// SIMDIntrinsicInit intrinsic with a const value as initializer
// represents a const vector.
if ((gtOper == GT_SIMD) && (AsSIMD()->GetSIMDIntrinsicId() == SIMDIntrinsicInit) &&
AsSIMD()->Op(1)->IsIntegralConst(constVal))
{
assert(varTypeIsIntegral(AsSIMD()->GetSimdBaseType()));
assert(AsSIMD()->GetOperandCount() == 1);
return true;
}
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
if (gtOper == GT_HWINTRINSIC)
{
const GenTreeHWIntrinsic* node = AsHWIntrinsic();
if (!varTypeIsIntegral(node->GetSimdBaseType()))
{
// Can't be an integral constant
return false;
}
NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
if ((node->GetOperandCount() == 0) && (constVal == 0))
{
#if defined(TARGET_XARCH)
return (intrinsicId == NI_Vector128_get_Zero) || (intrinsicId == NI_Vector256_get_Zero);
#elif defined(TARGET_ARM64)
return (intrinsicId == NI_Vector64_get_Zero) || (intrinsicId == NI_Vector128_get_Zero);
#endif // !TARGET_XARCH && !TARGET_ARM64
}
else if ((node->GetOperandCount() == 1) && node->Op(1)->IsIntegralConst(constVal))
{
#if defined(TARGET_XARCH)
return (intrinsicId == NI_Vector128_Create) || (intrinsicId == NI_Vector256_Create);
#elif defined(TARGET_ARM64)
return (intrinsicId == NI_Vector64_Create) || (intrinsicId == NI_Vector128_Create);
#endif // !TARGET_XARCH && !TARGET_ARM64
}
}
#endif // FEATURE_HW_INTRINSICS
return false;
}
//-------------------------------------------------------------------
// IsSIMDZero: returns true if this this is a SIMD vector
// with all its elements equal to zero.
//
// Returns:
// True if this represents an integral const SIMD vector.
//
inline bool GenTree::IsSIMDZero() const
{
#ifdef FEATURE_SIMD
if ((gtOper == GT_SIMD) && (AsSIMD()->GetSIMDIntrinsicId() == SIMDIntrinsicInit))
{
return (AsSIMD()->Op(1)->IsIntegralConst(0) || AsSIMD()->Op(1)->IsFPZero());
}
#endif
return false;
}
//-------------------------------------------------------------------
// IsFloatPositiveZero: returns true if this is exactly a const float value of postive zero (+0.0)
//
// Returns:
// True if this represents a const floating-point value of exactly positive zero (+0.0).
// Will return false if the value is negative zero (-0.0).
//
inline bool GenTree::IsFloatPositiveZero() const
{
if (IsCnsFltOrDbl())
{
// This implementation is almost identical to IsCnsNonZeroFltOrDbl
// but it is easier to parse out
// rather than using !IsCnsNonZeroFltOrDbl.
double constValue = AsDblCon()->gtDconVal;
return *(__int64*)&constValue == 0;
}
return false;
}
//-------------------------------------------------------------------
// IsVectorZero: returns true if this node is a HWIntrinsic that is Vector*_get_Zero.
//
// Returns:
// True if this represents a HWIntrinsic node that is Vector*_get_Zero.
//
// TODO: We already have IsSIMDZero() and IsIntegralConstVector(0),
// however, IsSIMDZero() does not cover hardware intrinsics, and IsIntegralConstVector(0) does not cover floating
// point. In order to not risk adverse behaviour by modifying those, this function 'IsVectorZero' was introduced.
// At some point, it makes sense to normalize this logic to be a single function call rather than have several
// separate ones; preferably this one.
inline bool GenTree::IsVectorZero() const
{
#ifdef FEATURE_HW_INTRINSICS
if (gtOper == GT_HWINTRINSIC)
{
const GenTreeHWIntrinsic* node = AsHWIntrinsic();
const NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
#if defined(TARGET_XARCH)
return (intrinsicId == NI_Vector128_get_Zero) || (intrinsicId == NI_Vector256_get_Zero);
#elif defined(TARGET_ARM64)
return (intrinsicId == NI_Vector64_get_Zero) || (intrinsicId == NI_Vector128_get_Zero);
#endif // !TARGET_XARCH && !TARGET_ARM64
}
#endif // FEATURE_HW_INTRINSICS
return false;
}
inline bool GenTree::IsBoxedValue()
{
assert(gtOper != GT_BOX || AsBox()->BoxOp() != nullptr);
return (gtOper == GT_BOX) && (gtFlags & GTF_BOX_VALUE);
}
#ifdef DEBUG
//------------------------------------------------------------------------
// IsValidCallArgument: Given an GenTree node that represents an argument
// enforce (or don't enforce) the following invariant.
//
// Arguments:
// instance method for a GenTree node
//
// Return values:
// true: the GenTree node is accepted as a valid argument
// false: the GenTree node is not accepted as a valid argumeny
//
// Notes:
// For targets that don't support arguments as a list of fields, we do not support GT_FIELD_LIST.
//
// Currently for AMD64 UNIX we allow a limited case where a GT_FIELD_LIST is
// allowed but every element must be a GT_LCL_FLD.
//
// For the future targets that allow for Multireg args (and this includes the current ARM64 target),
// or that allow for passing promoted structs, we allow a GT_FIELD_LIST of arbitrary nodes.
// These would typically start out as GT_LCL_VARs or GT_LCL_FLDS or GT_INDs,
// but could be changed into constants or GT_COMMA trees by the later
// optimization phases.
inline bool GenTree::IsValidCallArgument()
{
if (OperIs(GT_FIELD_LIST))
{
#if !FEATURE_MULTIREG_ARGS && !FEATURE_PUT_STRUCT_ARG_STK
return false;
#else // FEATURE_MULTIREG_ARGS or FEATURE_PUT_STRUCT_ARG_STK
// We allow this GT_FIELD_LIST as an argument
return true;
#endif // FEATURE_MULTIREG_ARGS or FEATURE_PUT_STRUCT_ARG_STK
}
// We don't have either kind of list, so it satisfies the invariant.
return true;
}
#endif // DEBUG
inline GenTree* GenTree::gtGetOp1() const
{
return AsOp()->gtOp1;
}
#ifdef DEBUG
/* static */ inline bool GenTree::RequiresNonNullOp2(genTreeOps oper)
{
switch (oper)
{
case GT_ADD:
case GT_SUB:
case GT_MUL:
case GT_DIV:
case GT_MOD:
case GT_UDIV:
case GT_UMOD:
case GT_OR:
case GT_XOR:
case GT_AND:
case GT_LSH:
case GT_RSH:
case GT_RSZ:
case GT_ROL:
case GT_ROR:
case GT_INDEX:
case GT_ASG:
case GT_EQ:
case GT_NE:
case GT_LT:
case GT_LE:
case GT_GE:
case GT_GT:
case GT_COMMA:
case GT_QMARK:
case GT_COLON:
case GT_MKREFANY:
return true;
default:
return false;
}
}
#endif // DEBUG
inline GenTree* GenTree::gtGetOp2() const
{
assert(OperIsBinary());
GenTree* op2 = AsOp()->gtOp2;
// Only allow null op2 if the node type allows it, e.g. GT_LEA.
assert((op2 != nullptr) || !RequiresNonNullOp2(gtOper));
return op2;
}
inline GenTree* GenTree::gtGetOp2IfPresent() const
{
/* AsOp()->gtOp2 is only valid for GTK_BINOP nodes. */
GenTree* op2 = OperIsBinary() ? AsOp()->gtOp2 : nullptr;
// This documents the genTreeOps for which AsOp()->gtOp2 cannot be nullptr.
// This helps prefix in its analysis of code which calls gtGetOp2()
assert((op2 != nullptr) || !RequiresNonNullOp2(gtOper));
return op2;
}
inline GenTree* GenTree::gtEffectiveVal(bool commaOnly /* = false */)
{
GenTree* effectiveVal = this;
for (;;)
{
assert(!effectiveVal->OperIs(GT_PUTARG_TYPE));
if (effectiveVal->gtOper == GT_COMMA)
{
effectiveVal = effectiveVal->AsOp()->gtGetOp2();
}
else if (!commaOnly && (effectiveVal->gtOper == GT_NOP) && (effectiveVal->AsOp()->gtOp1 != nullptr))
{
effectiveVal = effectiveVal->AsOp()->gtOp1;
}
else
{
return effectiveVal;
}
}
}
//-------------------------------------------------------------------------
// gtCommaAssignVal - find value being assigned to a comma wrapped assigment
//
// Returns:
// tree representing value being assigned if this tree represents a
// comma-wrapped local definition and use.
//
// original tree, of not.
//
inline GenTree* GenTree::gtCommaAssignVal()
{
GenTree* result = this;
if (OperIs(GT_COMMA))
{
GenTree* commaOp1 = AsOp()->gtOp1;
GenTree* commaOp2 = AsOp()->gtOp2;
if (commaOp2->OperIs(GT_LCL_VAR) && commaOp1->OperIs(GT_ASG))
{
GenTree* asgOp1 = commaOp1->AsOp()->gtOp1;
GenTree* asgOp2 = commaOp1->AsOp()->gtOp2;
if (asgOp1->OperIs(GT_LCL_VAR) && (asgOp1->AsLclVar()->GetLclNum() == commaOp2->AsLclVar()->GetLclNum()))
{
result = asgOp2;
}
}
}
return result;
}
//-------------------------------------------------------------------------
// gtSkipPutArgType - skip PUTARG_TYPE if it is presented.
//
// Returns:
// the original tree or its child if it was a PUTARG_TYPE.
//
// Notes:
// PUTARG_TYPE should be skipped when we are doing transformations
// that are not affected by ABI, for example: inlining, implicit byref morphing.
//
inline GenTree* GenTree::gtSkipPutArgType()
{
if (OperIs(GT_PUTARG_TYPE))
{
GenTree* res = AsUnOp()->gtGetOp1();
assert(!res->OperIs(GT_PUTARG_TYPE));
return res;
}
return this;
}
inline GenTree* GenTree::gtSkipReloadOrCopy()
{
// There can be only one reload or copy (we can't have a reload/copy of a reload/copy)
if (gtOper == GT_RELOAD || gtOper == GT_COPY)
{
assert(gtGetOp1()->OperGet() != GT_RELOAD && gtGetOp1()->OperGet() != GT_COPY);
return gtGetOp1();
}
return this;
}
//-----------------------------------------------------------------------------------
// IsMultiRegCall: whether a call node returns its value in more than one register
//
// Arguments:
// None
//
// Return Value:
// Returns true if this GenTree is a multi register returning call
//
inline bool GenTree::IsMultiRegCall() const
{
if (this->IsCall())
{
return AsCall()->HasMultiRegRetVal();
}
return false;
}
//-----------------------------------------------------------------------------------
// IsMultiRegLclVar: whether a local var node defines multiple registers
//
// Arguments:
// None
//
// Return Value:
// Returns true if this GenTree is a multi register defining local var
//
inline bool GenTree::IsMultiRegLclVar() const
{
if (OperIsScalarLocal())
{
return AsLclVar()->IsMultiReg();
}
return false;
}
//-----------------------------------------------------------------------------------
// GetRegByIndex: Get a specific register, based on regIndex, that is produced by this node.
//
// Arguments:
// regIndex - which register to return (must be 0 for non-multireg nodes)
//
// Return Value:
// The register, if any, assigned to this index for this node.
//
// Notes:
// All targets that support multi-reg ops of any kind also support multi-reg return
// values for calls. Should that change with a future target, this method will need
// to change accordingly.
//
inline regNumber GenTree::GetRegByIndex(int regIndex) const
{
if (regIndex == 0)
{
return GetRegNum();
}
#if FEATURE_MULTIREG_RET
if (IsMultiRegCall())
{
return AsCall()->GetRegNumByIdx(regIndex);
}
#if FEATURE_ARG_SPLIT
if (OperIsPutArgSplit())
{
return AsPutArgSplit()->GetRegNumByIdx(regIndex);
}
#endif
#if !defined(TARGET_64BIT)
if (OperIsMultiRegOp())
{
return AsMultiRegOp()->GetRegNumByIdx(regIndex);
}
#endif
if (OperIs(GT_COPY, GT_RELOAD))
{
return AsCopyOrReload()->GetRegNumByIdx(regIndex);
}
#endif // FEATURE_MULTIREG_RET
#ifdef FEATURE_HW_INTRINSICS
if (OperIs(GT_HWINTRINSIC))
{
assert(regIndex == 1);
// TODO-ARM64-NYI: Support hardware intrinsics operating on multiple contiguous registers.
return AsHWIntrinsic()->GetOtherReg();
}
#endif // FEATURE_HW_INTRINSICS
if (OperIsScalarLocal())
{
return AsLclVar()->GetRegNumByIdx(regIndex);
}
assert(!"Invalid regIndex for GetRegFromMultiRegNode");
return REG_NA;
}
//-----------------------------------------------------------------------------------
// GetRegTypeByIndex: Get a specific register's type, based on regIndex, that is produced
// by this multi-reg node.
//
// Arguments:
// regIndex - index of register whose type will be returned
//
// Return Value:
// The register type assigned to this index for this node.
//
// Notes:
// This must be a multireg node that is *not* a copy or reload (which must retrieve the
// type from its source), and 'regIndex' must be a valid index for this node.
//
// All targets that support multi-reg ops of any kind also support multi-reg return
// values for calls. Should that change with a future target, this method will need
// to change accordingly.
//
inline var_types GenTree::GetRegTypeByIndex(int regIndex) const
{
#if FEATURE_MULTIREG_RET
if (IsMultiRegCall())
{
return AsCall()->AsCall()->GetReturnTypeDesc()->GetReturnRegType(regIndex);
}
#if FEATURE_ARG_SPLIT
if (OperIsPutArgSplit())
{
return AsPutArgSplit()->GetRegType(regIndex);
}
#endif // FEATURE_ARG_SPLIT
#if !defined(TARGET_64BIT)
if (OperIsMultiRegOp())
{
return AsMultiRegOp()->GetRegType(regIndex);
}
#endif // !defined(TARGET_64BIT)
#endif // FEATURE_MULTIREG_RET
if (OperIsHWIntrinsic())
{
assert(TypeGet() == TYP_STRUCT);
#ifdef TARGET_ARM64
if (AsHWIntrinsic()->GetSimdSize() == 16)
{
return TYP_SIMD16;
}
else
{
assert(AsHWIntrinsic()->GetSimdSize() == 8);
return TYP_SIMD8;
}
#elif defined(TARGET_XARCH)
// At this time, the only multi-reg HW intrinsics all return the type of their
// arguments. If this changes, we will need a way to record or determine this.
return gtGetOp1()->TypeGet();
#endif
}
if (OperIsScalarLocal())
{
if (TypeGet() == TYP_LONG)
{
return TYP_INT;
}
assert(TypeGet() == TYP_STRUCT);
assert((gtFlags & GTF_VAR_MULTIREG) != 0);
// The register type for a multireg lclVar requires looking at the LclVarDsc,
// which requires a Compiler instance. The caller must use the GetFieldTypeByIndex
// on GenTreeLclVar.
assert(!"GetRegTypeByIndex for LclVar");
}
assert(!"Invalid node type for GetRegTypeByIndex");
return TYP_UNDEF;
}
//-----------------------------------------------------------------------------------
// GetRegSpillFlagByIdx: Get a specific register's spill flags, based on regIndex,
// for this multi-reg node.
//
// Arguments:
// regIndex - which register's spill flags to return
//
// Return Value:
// The spill flags (GTF_SPILL GTF_SPILLED) for this register.
//
// Notes:
// This must be a multireg node and 'regIndex' must be a valid index for this node.
// This method returns the GTF "equivalent" flags based on the packed flags on the multireg node.
//
inline GenTreeFlags GenTree::GetRegSpillFlagByIdx(int regIndex) const
{
#if FEATURE_MULTIREG_RET
if (IsMultiRegCall())
{
return AsCall()->GetRegSpillFlagByIdx(regIndex);
}
#if FEATURE_ARG_SPLIT
if (OperIsPutArgSplit())
{
return AsPutArgSplit()->GetRegSpillFlagByIdx(regIndex);
}
#endif // FEATURE_ARG_SPLIT
#if !defined(TARGET_64BIT)
if (OperIsMultiRegOp())
{
return AsMultiRegOp()->GetRegSpillFlagByIdx(regIndex);
}
#endif // !defined(TARGET_64BIT)
#endif // FEATURE_MULTIREG_RET
if (OperIsScalarLocal())
{
return AsLclVar()->GetRegSpillFlagByIdx(regIndex);
}
assert(!"Invalid node type for GetRegSpillFlagByIdx");
return GTF_EMPTY;
}
//-----------------------------------------------------------------------------------
// GetLastUseBit: Get the last use bit for regIndex
//
// Arguments:
// regIndex - the register index
//
// Return Value:
// The bit to set, clear or query for the last-use of the regIndex'th value.
//
// Notes:
// This must be a GenTreeLclVar or GenTreeCopyOrReload node.
//
inline GenTreeFlags GenTree::GetLastUseBit(int regIndex) const
{
assert(regIndex < 4);
assert(OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR, GT_COPY, GT_RELOAD));
static_assert_no_msg((1 << MULTIREG_LAST_USE_SHIFT) == GTF_VAR_MULTIREG_DEATH0);
return (GenTreeFlags)(1 << (MULTIREG_LAST_USE_SHIFT + regIndex));
}
//-----------------------------------------------------------------------------------
// IsLastUse: Determine whether this node is a last use of the regIndex'th value
//
// Arguments:
// regIndex - the register index
//
// Return Value:
// true iff this is a last use.
//
// Notes:
// This must be a GenTreeLclVar or GenTreeCopyOrReload node.
//
inline bool GenTree::IsLastUse(int regIndex) const
{
assert(OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR, GT_COPY, GT_RELOAD));
return (gtFlags & GetLastUseBit(regIndex)) != 0;
}
//-----------------------------------------------------------------------------------
// IsLastUse: Determine whether this node is a last use of any value
//
// Return Value:
// true iff this has any last uses (i.e. at any index).
//
// Notes:
// This must be a GenTreeLclVar or GenTreeCopyOrReload node.
//
inline bool GenTree::HasLastUse() const
{
return (gtFlags & (GTF_VAR_DEATH_MASK)) != 0;
}
//-----------------------------------------------------------------------------------
// SetLastUse: Set the last use bit for the given index
//
// Arguments:
// regIndex - the register index
//
// Notes:
// This must be a GenTreeLclVar or GenTreeCopyOrReload node.
//
inline void GenTree::SetLastUse(int regIndex)
{
gtFlags |= GetLastUseBit(regIndex);
}
//-----------------------------------------------------------------------------------
// ClearLastUse: Clear the last use bit for the given index
//
// Arguments:
// regIndex - the register index
//
// Notes:
// This must be a GenTreeLclVar or GenTreeCopyOrReload node.
//
inline void GenTree::ClearLastUse(int regIndex)
{
gtFlags &= ~GetLastUseBit(regIndex);
}
//-------------------------------------------------------------------------
// IsCopyOrReload: whether this is a GT_COPY or GT_RELOAD node.
//
// Arguments:
// None
//
// Return Value:
// Returns true if this GenTree is a copy or reload node.
//
inline bool GenTree::IsCopyOrReload() const
{
return (gtOper == GT_COPY || gtOper == GT_RELOAD);
}
//-----------------------------------------------------------------------------------
// IsCopyOrReloadOfMultiRegCall: whether this is a GT_COPY or GT_RELOAD of a multi-reg
// call node.
//
// Arguments:
// None
//
// Return Value:
// Returns true if this GenTree is a copy or reload of multi-reg call node.
//
inline bool GenTree::IsCopyOrReloadOfMultiRegCall() const
{
if (IsCopyOrReload())
{
return gtGetOp1()->IsMultiRegCall();
}
return false;
}
inline bool GenTree::IsCnsIntOrI() const
{
return (gtOper == GT_CNS_INT);
}
inline bool GenTree::IsIntegralConst() const
{
#ifdef TARGET_64BIT
return IsCnsIntOrI();
#else // !TARGET_64BIT
return ((gtOper == GT_CNS_INT) || (gtOper == GT_CNS_LNG));
#endif // !TARGET_64BIT
}
//-------------------------------------------------------------------------
// IsIntegralConstPow2: Determines whether an integral constant is
// the power of 2.
//
// Return Value:
// Returns true if the GenTree's integral constant
// is the power of 2.
//
inline bool GenTree::IsIntegralConstPow2() const
{
if (IsIntegralConst())
{
return isPow2(AsIntConCommon()->IntegralValue());
}
return false;
}
//-------------------------------------------------------------------------
// IsIntegralConstUnsignedPow2: Determines whether the unsigned value of
// an integral constant is the power of 2.
//
// Return Value:
// Returns true if the unsigned value of a GenTree's integral constant
// is the power of 2.
//
// Notes:
// Integral constant nodes store its value in signed form.
// This should handle cases where an unsigned-int was logically used in
// user code.
//
inline bool GenTree::IsIntegralConstUnsignedPow2() const
{
if (IsIntegralConst())
{
return isPow2((UINT64)AsIntConCommon()->IntegralValue());
}
return false;
}
//-------------------------------------------------------------------------
// IsIntegralConstAbsPow2: Determines whether the absolute value of
// an integral constant is the power of 2.
//
// Return Value:
// Returns true if the absolute value of a GenTree's integral constant
// is the power of 2.
//
inline bool GenTree::IsIntegralConstAbsPow2() const
{
if (IsIntegralConst())
{
INT64 svalue = AsIntConCommon()->IntegralValue();
size_t value = (svalue == SSIZE_T_MIN) ? static_cast<size_t>(svalue) : static_cast<size_t>(abs(svalue));
return isPow2(value);
}
return false;
}
// Is this node an integer constant that fits in a 32-bit signed integer (INT32)
inline bool GenTree::IsIntCnsFitsInI32()
{
#ifdef TARGET_64BIT
return IsCnsIntOrI() && AsIntCon()->FitsInI32();
#else // !TARGET_64BIT
return IsCnsIntOrI();
#endif // !TARGET_64BIT
}
inline bool GenTree::IsCnsFltOrDbl() const
{
return OperGet() == GT_CNS_DBL;
}
inline bool GenTree::IsCnsNonZeroFltOrDbl() const
{
if (OperGet() == GT_CNS_DBL)
{
double constValue = AsDblCon()->gtDconVal;
return *(__int64*)&constValue != 0;
}
return false;
}
inline bool GenTree::IsHelperCall()
{
return OperGet() == GT_CALL && AsCall()->gtCallType == CT_HELPER;
}
inline var_types GenTree::CastFromType()
{
return this->AsCast()->CastOp()->TypeGet();
}
inline var_types& GenTree::CastToType()
{
return this->AsCast()->gtCastType;
}
inline bool GenTree::isUsedFromSpillTemp() const
{
// If spilled and no reg at use, then it is used from the spill temp location rather than being reloaded.
if (((gtFlags & GTF_SPILLED) != 0) && ((gtFlags & GTF_NOREG_AT_USE) != 0))
{
return true;
}
return false;
}
/*****************************************************************************/
#ifndef HOST_64BIT
#include <poppack.h>
#endif
/*****************************************************************************/
const size_t TREE_NODE_SZ_SMALL = sizeof(GenTreeLclFld);
const size_t TREE_NODE_SZ_LARGE = sizeof(GenTreeCall);
enum varRefKinds
{
VR_INVARIANT = 0x00, // an invariant value
VR_NONE = 0x00,
VR_IND_REF = 0x01, // an object reference
VR_IND_SCL = 0x02, // a non-object reference
VR_GLB_VAR = 0x04, // a global (clsVar)
};
/*****************************************************************************/
#endif // !GENTREE_H
/*****************************************************************************/
| 1 |
dotnet/runtime
| 66,407 |
ARM64 - Optimizing a % b operations part 2
|
Addressing part of this issue: https://github.com/dotnet/runtime/issues/34937
**Description**
There are various ways to optimize `%` for integers on ARM64.
`a % b` can be transformed into a specific sequence of instructions for ARM64 if the operation is signed and `b` is a constant with the power of 2.
**Acceptance Criteria**
- [x] Merge https://github.com/dotnet/runtime/pull/65535
- [x] Add Tests
ARM64 diffs based on tests
```diff
- asr w1, w0, #31
- and w1, w1, #15
- add w1, w1, w0
- asr w1, w1, #4
- lsl w1, w1, #4
- sub w0, w0, w1
- ;; bbWeight=1 PerfScore 4.50
+ and w1, w0, #15
+ negs w0, w0
+ and w0, w0, #15
+ csneg w0, w1, w0, mi
+ ;; bbWeight=1 PerfScore 2.00
```
|
TIHan
| 2022-03-09T20:33:02Z | 2022-04-13T20:01:26Z |
3b6a539c7219383ec6181a112465f904fe9e2edb
|
a81f4bc9d53dedefe03f49da53ed198abbf9c467
|
ARM64 - Optimizing a % b operations part 2. Addressing part of this issue: https://github.com/dotnet/runtime/issues/34937
**Description**
There are various ways to optimize `%` for integers on ARM64.
`a % b` can be transformed into a specific sequence of instructions for ARM64 if the operation is signed and `b` is a constant with the power of 2.
**Acceptance Criteria**
- [x] Merge https://github.com/dotnet/runtime/pull/65535
- [x] Add Tests
ARM64 diffs based on tests
```diff
- asr w1, w0, #31
- and w1, w1, #15
- add w1, w1, w0
- asr w1, w1, #4
- lsl w1, w1, #4
- sub w0, w0, w1
- ;; bbWeight=1 PerfScore 4.50
+ and w1, w0, #15
+ negs w0, w0
+ and w0, w0, #15
+ csneg w0, w1, w0, mi
+ ;; bbWeight=1 PerfScore 2.00
```
|
./src/coreclr/jit/gtlist.h
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// clang-format off
/*****************************************************************************/
#ifndef GTNODE
#error Define GTNODE before including this file.
#endif
/*****************************************************************************/
//
// Node enum
// , GenTree struct flavor
// ,commutative
// ,oper kind | DEBUG oper kind
GTNODE(NONE , char ,0,GTK_SPECIAL)
//-----------------------------------------------------------------------------
// Nodes related to locals:
//-----------------------------------------------------------------------------
GTNODE(PHI , GenTreePhi ,0,GTK_SPECIAL) // phi node for ssa.
GTNODE(PHI_ARG , GenTreePhiArg ,0,GTK_LEAF) // phi(phiarg, phiarg, phiarg)
GTNODE(LCL_VAR , GenTreeLclVar ,0,GTK_LEAF) // local variable
GTNODE(LCL_FLD , GenTreeLclFld ,0,GTK_LEAF) // field in a non-primitive variable
GTNODE(STORE_LCL_VAR , GenTreeLclVar ,0,GTK_UNOP|GTK_NOVALUE) // store to local variable
GTNODE(STORE_LCL_FLD , GenTreeLclFld ,0,GTK_UNOP|GTK_NOVALUE) // store to a part of the variable
GTNODE(LCL_VAR_ADDR , GenTreeLclVar ,0,GTK_LEAF) // address of local variable
GTNODE(LCL_FLD_ADDR , GenTreeLclFld ,0,GTK_LEAF) // address of field in a non-primitive variable
//-----------------------------------------------------------------------------
// Leaf nodes (i.e. these nodes have no sub-operands):
//-----------------------------------------------------------------------------
GTNODE(CATCH_ARG , GenTree ,0,GTK_LEAF) // Exception object in a catch block
GTNODE(LABEL , GenTree ,0,GTK_LEAF) // Jump-target
GTNODE(JMP , GenTreeVal ,0,GTK_LEAF|GTK_NOVALUE) // Jump to another function
GTNODE(FTN_ADDR , GenTreeFptrVal ,0,GTK_LEAF) // Address of a function
GTNODE(RET_EXPR , GenTreeRetExpr ,0,GTK_LEAF|DBK_NOTLIR) // Place holder for the return expression from an inline candidate
GTNODE(CLS_VAR , GenTreeClsVar ,0,GTK_LEAF) // Static data member
GTNODE(ARGPLACE , GenTreeArgPlace ,0,GTK_LEAF|GTK_NOVALUE|DBK_NOTLIR) // Placeholder for a "late arg" in the original arg list.
//-----------------------------------------------------------------------------
// Constant nodes:
//-----------------------------------------------------------------------------
GTNODE(CNS_INT , GenTreeIntCon ,0,GTK_LEAF)
GTNODE(CNS_LNG , GenTreeLngCon ,0,GTK_LEAF)
GTNODE(CNS_DBL , GenTreeDblCon ,0,GTK_LEAF)
GTNODE(CNS_STR , GenTreeStrCon ,0,GTK_LEAF)
//-----------------------------------------------------------------------------
// Unary operators (1 operand):
//-----------------------------------------------------------------------------
GTNODE(NOT , GenTreeOp ,0,GTK_UNOP)
GTNODE(NOP , GenTree ,0,GTK_UNOP|DBK_NOCONTAIN)
GTNODE(NEG , GenTreeOp ,0,GTK_UNOP)
GTNODE(INTRINSIC , GenTreeIntrinsic ,0,GTK_BINOP|GTK_EXOP)
GTNODE(LOCKADD , GenTreeOp ,0,GTK_BINOP|GTK_NOVALUE|DBK_NOTHIR)
GTNODE(XAND , GenTreeOp ,0,GTK_BINOP)
GTNODE(XORR , GenTreeOp ,0,GTK_BINOP)
GTNODE(XADD , GenTreeOp ,0,GTK_BINOP)
GTNODE(XCHG , GenTreeOp ,0,GTK_BINOP)
GTNODE(CMPXCHG , GenTreeCmpXchg ,0,GTK_SPECIAL)
GTNODE(MEMORYBARRIER , GenTree ,0,GTK_LEAF|GTK_NOVALUE)
GTNODE(KEEPALIVE , GenTree ,0,GTK_UNOP|GTK_NOVALUE) // keep operand alive, generate no code, produce no result
GTNODE(CAST , GenTreeCast ,0,GTK_UNOP|GTK_EXOP) // conversion to another type
#if defined(TARGET_ARM)
GTNODE(BITCAST , GenTreeMultiRegOp ,0,GTK_UNOP) // reinterpretation of bits as another type
#else
GTNODE(BITCAST , GenTreeOp ,0,GTK_UNOP) // reinterpretation of bits as another type
#endif
GTNODE(CKFINITE , GenTreeOp ,0,GTK_UNOP|DBK_NOCONTAIN) // Check for NaN
GTNODE(LCLHEAP , GenTreeOp ,0,GTK_UNOP|DBK_NOCONTAIN) // alloca()
GTNODE(ADDR , GenTreeOp ,0,GTK_UNOP|DBK_NOTLIR) // address of
GTNODE(BOUNDS_CHECK , GenTreeBoundsChk ,0,GTK_BINOP|GTK_EXOP|GTK_NOVALUE) // a bounds check - for arrays/spans/SIMDs/HWINTRINSICs
GTNODE(IND , GenTreeIndir ,0,GTK_UNOP) // Load indirection
GTNODE(STOREIND , GenTreeStoreInd ,0,GTK_BINOP|GTK_NOVALUE) // Store indirection
GTNODE(OBJ , GenTreeObj ,0,GTK_UNOP|GTK_EXOP) // Object that MAY have gc pointers, and thus includes the relevant gc layout info.
GTNODE(STORE_OBJ , GenTreeObj ,0,GTK_BINOP|GTK_EXOP|GTK_NOVALUE) // Object that MAY have gc pointers, and thus includes the relevant gc layout info.
GTNODE(BLK , GenTreeBlk ,0,GTK_UNOP|GTK_EXOP) // Block/object with no gc pointers, and with a known size (e.g. a struct with no gc fields)
GTNODE(STORE_BLK , GenTreeBlk ,0,GTK_BINOP|GTK_EXOP|GTK_NOVALUE) // Block/object with no gc pointers, and with a known size (e.g. a struct with no gc fields)
GTNODE(STORE_DYN_BLK , GenTreeStoreDynBlk ,0,GTK_SPECIAL|GTK_NOVALUE) // Dynamically sized block store
GTNODE(NULLCHECK , GenTreeIndir ,0,GTK_UNOP|GTK_NOVALUE) // Null checks the source
GTNODE(ARR_LENGTH , GenTreeArrLen ,0,GTK_UNOP|GTK_EXOP)
GTNODE(FIELD , GenTreeField ,0,GTK_UNOP|GTK_EXOP|DBK_NOTLIR) // Member-field
GTNODE(ALLOCOBJ , GenTreeAllocObj ,0,GTK_UNOP|GTK_EXOP|DBK_NOTLIR) // object allocator
GTNODE(INIT_VAL , GenTreeOp ,0,GTK_UNOP) // Initialization value for an initBlk
GTNODE(BOX , GenTreeBox ,0,GTK_UNOP|GTK_EXOP|DBK_NOTLIR) // Marks its first operands (a local) as being a box
GTNODE(PUTARG_TYPE , GenTreeOp ,0,GTK_UNOP|DBK_NOTLIR) // Saves argument type between importation and morph
GTNODE(RUNTIMELOOKUP , GenTreeRuntimeLookup, 0,GTK_UNOP|GTK_EXOP|DBK_NOTLIR) // Runtime handle lookup
GTNODE(ARR_ADDR , GenTreeArrAddr ,0,GTK_UNOP|GTK_EXOP|DBK_NOTLIR) // Wraps an array address expression
GTNODE(BSWAP , GenTreeOp ,0,GTK_UNOP) // Byte swap (32-bit or 64-bit)
GTNODE(BSWAP16 , GenTreeOp ,0,GTK_UNOP) // Byte swap (16-bit)
//-----------------------------------------------------------------------------
// Binary operators (2 operands):
//-----------------------------------------------------------------------------
GTNODE(ADD , GenTreeOp ,1,GTK_BINOP)
GTNODE(SUB , GenTreeOp ,0,GTK_BINOP)
GTNODE(MUL , GenTreeOp ,1,GTK_BINOP)
GTNODE(DIV , GenTreeOp ,0,GTK_BINOP)
GTNODE(MOD , GenTreeOp ,0,GTK_BINOP)
GTNODE(UDIV , GenTreeOp ,0,GTK_BINOP)
GTNODE(UMOD , GenTreeOp ,0,GTK_BINOP)
GTNODE(OR , GenTreeOp ,1,GTK_BINOP)
GTNODE(XOR , GenTreeOp ,1,GTK_BINOP)
GTNODE(AND , GenTreeOp ,1,GTK_BINOP)
GTNODE(LSH , GenTreeOp ,0,GTK_BINOP)
GTNODE(RSH , GenTreeOp ,0,GTK_BINOP)
GTNODE(RSZ , GenTreeOp ,0,GTK_BINOP)
GTNODE(ROL , GenTreeOp ,0,GTK_BINOP)
GTNODE(ROR , GenTreeOp ,0,GTK_BINOP)
GTNODE(ASG , GenTreeOp ,0,GTK_BINOP|DBK_NOTLIR)
GTNODE(EQ , GenTreeOp ,0,GTK_BINOP)
GTNODE(NE , GenTreeOp ,0,GTK_BINOP)
GTNODE(LT , GenTreeOp ,0,GTK_BINOP)
GTNODE(LE , GenTreeOp ,0,GTK_BINOP)
GTNODE(GE , GenTreeOp ,0,GTK_BINOP)
GTNODE(GT , GenTreeOp ,0,GTK_BINOP)
// These are similar to GT_EQ/GT_NE but they generate "test" instead of "cmp" instructions.
// Currently these are generated during lowering for code like ((x & y) eq|ne 0) only on
// XArch but ARM could too use these for the same purpose as there is a "tst" instruction.
// Note that the general case of comparing a register against 0 is handled directly by
// codegen which emits a "test reg, reg" instruction, that would be more difficult to do
// during lowering because the source operand is used twice so it has to be a lclvar.
// Because of this there is no need to also add GT_TEST_LT/LE/GE/GT opers.
GTNODE(TEST_EQ , GenTreeOp ,0,GTK_BINOP|DBK_NOTHIR)
GTNODE(TEST_NE , GenTreeOp ,0,GTK_BINOP|DBK_NOTHIR)
GTNODE(COMMA , GenTreeOp ,0,GTK_BINOP|DBK_NOTLIR)
GTNODE(QMARK , GenTreeQmark ,0,GTK_BINOP|GTK_EXOP|DBK_NOTLIR)
GTNODE(COLON , GenTreeColon ,0,GTK_BINOP|DBK_NOTLIR)
GTNODE(INDEX , GenTreeIndex ,0,GTK_BINOP|GTK_EXOP|DBK_NOTLIR) // SZ-array-element.
GTNODE(INDEX_ADDR , GenTreeIndexAddr ,0,GTK_BINOP|GTK_EXOP) // Addr of SZ-array-element; used when aiming to minimize compile times.
GTNODE(MKREFANY , GenTreeOp ,0,GTK_BINOP|DBK_NOTLIR)
GTNODE(LEA , GenTreeAddrMode ,0,GTK_BINOP|GTK_EXOP)
#if !defined(TARGET_64BIT)
// A GT_LONG node simply represents the long value produced by the concatenation
// of its two (lower and upper half) operands. Some GT_LONG nodes are transient,
// during the decomposing of longs; others are handled by codegen as operands of
// nodes such as calls, returns and stores of long lclVars.
GTNODE(LONG , GenTreeOp ,0,GTK_BINOP|DBK_NOTHIR)
// The following are nodes representing x86/arm32 specific long operators, including
// high operators of a 64-bit operations that requires a carry/borrow, which are
// named GT_XXX_HI for consistency, low operators of 64-bit operations that need
// to not be modified in phases post-decompose, and operators that return 64-bit
// results in one instruction.
GTNODE(ADD_LO , GenTreeOp ,1,GTK_BINOP|DBK_NOTHIR)
GTNODE(ADD_HI , GenTreeOp ,1,GTK_BINOP|DBK_NOTHIR)
GTNODE(SUB_LO , GenTreeOp ,0,GTK_BINOP|DBK_NOTHIR)
GTNODE(SUB_HI , GenTreeOp ,0,GTK_BINOP|DBK_NOTHIR)
// The following are nodes that specify shifts that take a GT_LONG op1. The GT_LONG
// contains the hi and lo parts of three operand shift form where one op will be
// shifted into the other op as part of the operation (LSH_HI will shift
// the high bits of the lo operand into the high operand as it shifts left. RSH_LO
// will shift the lo bits of the high operand into the lo operand). LSH_HI
// represents the high operation of a 64-bit left shift by a constant int, and
// RSH_LO represents the lo operation of a 64-bit right shift by a constant int.
GTNODE(LSH_HI , GenTreeOp ,0,GTK_BINOP|DBK_NOTHIR)
GTNODE(RSH_LO , GenTreeOp ,0,GTK_BINOP|DBK_NOTHIR)
#endif // !defined(TARGET_64BIT)
#ifdef FEATURE_SIMD
GTNODE(SIMD , GenTreeSIMD ,0,GTK_SPECIAL) // SIMD functions/operators/intrinsics
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
GTNODE(HWINTRINSIC , GenTreeHWIntrinsic ,0,GTK_SPECIAL) // hardware intrinsics
#endif // FEATURE_HW_INTRINSICS
//-----------------------------------------------------------------------------
// Backend-specific arithmetic nodes:
//-----------------------------------------------------------------------------
// Saturating increment, used in division by a constant (LowerUnsignedDivOrMod).
GTNODE(INC_SATURATE , GenTreeOp ,0,GTK_UNOP|DBK_NOTHIR)
// Returns high bits (top N bits of the 2N bit result of an NxN multiply)
// GT_MULHI is used in division by a constant (LowerUnsignedDivOrMod). We turn
// the div into a MULHI + some adjustments. In codegen, we only use the
// results of the high register, and we drop the low results.
GTNODE(MULHI , GenTreeOp ,1,GTK_BINOP|DBK_NOTHIR)
// A mul that returns the 2N bit result of an NxN multiply. This op is used for
// multiplies that take two ints and return a long result. For 32 bit targets,
// all other multiplies with long results are morphed into helper calls.
// It is similar to GT_MULHI, the difference being that GT_MULHI drops the lo
// part of the result, whereas GT_MUL_LONG keeps both parts of the result.
// MUL_LONG is also used on ARM64, where 64 bit multiplication is more expensive.
#if !defined(TARGET_64BIT)
GTNODE(MUL_LONG , GenTreeMultiRegOp ,1,GTK_BINOP|DBK_NOTHIR)
#elif defined(TARGET_ARM64)
GTNODE(MUL_LONG , GenTreeOp ,1,GTK_BINOP|DBK_NOTHIR)
#endif
// AndNot - emitted on ARM/ARM64 as the BIC instruction. Also used for creating AndNot HWINTRINSIC vector nodes in a cross-ISA manner.
GTNODE(AND_NOT , GenTreeOp ,0,GTK_BINOP|DBK_NOTHIR)
#ifdef TARGET_ARM64
GTNODE(MADD , GenTreeOp ,0,GTK_BINOP|DBK_NOTHIR) // Generates the Multiply-Add instruction. In the future, we might consider
// enabling it for both armarch and xarch for floating-point MADD "unsafe" math.
GTNODE(MSUB , GenTreeOp ,0,GTK_BINOP|DBK_NOTHIR) // Generates the Multiply-Subtract instruction. In the future, we might consider
// enabling it for both armarch and xarch for floating-point MSUB "unsafe" math.
GTNODE(ADDEX, GenTreeOp ,0,GTK_BINOP|DBK_NOTHIR) // Add with sign/zero extension.
GTNODE(BFIZ , GenTreeOp ,0,GTK_BINOP|DBK_NOTHIR) // Bitfield Insert in Zero.
#endif
//-----------------------------------------------------------------------------
// LIR specific compare and conditional branch/set nodes:
//-----------------------------------------------------------------------------
// Sets the condition flags according to the compare result. N.B. Not a relop, it does not produce a value and it cannot be reversed.
GTNODE(CMP , GenTreeOp ,0,GTK_BINOP|GTK_NOVALUE|DBK_NOTHIR)
// Makes a comparison and jump if the condition specified. Does not set flags.
GTNODE(JCMP , GenTreeOp ,0,GTK_BINOP|GTK_NOVALUE|DBK_NOTHIR)
// Checks the condition flags and branch if the condition specified by GenTreeCC::gtCondition is true.
GTNODE(JCC , GenTreeCC ,0,GTK_LEAF|GTK_NOVALUE|DBK_NOTHIR)
// Checks the condition flags and produces 1 if the condition specified by GenTreeCC::gtCondition is true and 0 otherwise.
GTNODE(SETCC , GenTreeCC ,0,GTK_LEAF|DBK_NOTHIR)
#ifdef TARGET_XARCH
// The XARCH BT instruction. Like CMP, this sets the condition flags (CF to be precise) and does not produce a value.
GTNODE(BT , GenTreeOp ,0,(GTK_BINOP|GTK_NOVALUE|DBK_NOTHIR))
#endif
//-----------------------------------------------------------------------------
// Other nodes that look like unary/binary operators:
//-----------------------------------------------------------------------------
GTNODE(JTRUE , GenTreeOp ,0,GTK_UNOP|GTK_NOVALUE)
//-----------------------------------------------------------------------------
// Other nodes that have special structure:
//-----------------------------------------------------------------------------
GTNODE(ARR_ELEM , GenTreeArrElem ,0,GTK_SPECIAL) // Multi-dimensional array-element address
GTNODE(ARR_INDEX , GenTreeArrIndex ,0,GTK_BINOP|GTK_EXOP) // Effective, bounds-checked index for one dimension of a multi-dimensional array element
GTNODE(ARR_OFFSET , GenTreeArrOffs ,0,GTK_SPECIAL) // Flattened offset of multi-dimensional array element
GTNODE(CALL , GenTreeCall ,0,GTK_SPECIAL|DBK_NOCONTAIN)
GTNODE(FIELD_LIST , GenTreeFieldList ,0,GTK_SPECIAL) // List of fields of a struct, when passed as an argument
GTNODE(RETURN , GenTreeOp ,0,GTK_UNOP|GTK_NOVALUE)
GTNODE(SWITCH , GenTreeOp ,0,GTK_UNOP|GTK_NOVALUE)
GTNODE(NO_OP , GenTree ,0,GTK_LEAF|GTK_NOVALUE) // A NOP that cannot be deleted.
GTNODE(START_NONGC , GenTree ,0,GTK_LEAF|GTK_NOVALUE|DBK_NOTHIR) // Starts a new instruction group that will be non-gc interruptible.
GTNODE(START_PREEMPTGC , GenTree ,0,GTK_LEAF|GTK_NOVALUE|DBK_NOTHIR) // Starts a new instruction group where preemptive GC is enabled.
GTNODE(PROF_HOOK , GenTree ,0,GTK_LEAF|GTK_NOVALUE|DBK_NOTHIR) // Profiler Enter/Leave/TailCall hook.
GTNODE(RETFILT , GenTreeOp ,0,GTK_UNOP|GTK_NOVALUE) // End filter with TYP_I_IMPL return value.
#if !defined(FEATURE_EH_FUNCLETS)
GTNODE(END_LFIN , GenTreeVal ,0,GTK_LEAF|GTK_NOVALUE) // End locally-invoked finally.
#endif // !FEATURE_EH_FUNCLETS
//-----------------------------------------------------------------------------
// Nodes used by Lower to generate a closer CPU representation of other nodes
//-----------------------------------------------------------------------------
GTNODE(JMPTABLE , GenTree ,0,GTK_LEAF|DBK_NOCONTAIN|DBK_NOTHIR) // Generates the jump table for switches
GTNODE(SWITCH_TABLE , GenTreeOp ,0,GTK_BINOP|GTK_NOVALUE|DBK_NOTHIR) // Jump Table based switch construct
//-----------------------------------------------------------------------------
// Nodes used only within the code generator:
//-----------------------------------------------------------------------------
GTNODE(CLS_VAR_ADDR , GenTreeClsVar ,0,GTK_LEAF|DBK_NOTHIR) // static data member address
GTNODE(PHYSREG , GenTreePhysReg ,0,GTK_LEAF|DBK_NOTHIR) // read from a physical register
GTNODE(EMITNOP , GenTree ,0,GTK_LEAF|GTK_NOVALUE|DBK_NOTHIR) // emitter-placed nop
GTNODE(PINVOKE_PROLOG , GenTree ,0,GTK_LEAF|GTK_NOVALUE|DBK_NOTHIR) // pinvoke prolog seq
GTNODE(PINVOKE_EPILOG , GenTree ,0,GTK_LEAF|GTK_NOVALUE|DBK_NOTHIR) // pinvoke epilog seq
GTNODE(RETURNTRAP , GenTreeOp ,0,GTK_UNOP|GTK_NOVALUE|DBK_NOTHIR) // a conditional call to wait on gc
#if defined(TARGET_ARM)
GTNODE(PUTARG_REG , GenTreeMultiRegOp ,0,GTK_UNOP|DBK_NOTHIR) // operator that places outgoing arg in register
#else
GTNODE(PUTARG_REG , GenTreeOp ,0,GTK_UNOP|DBK_NOTHIR) // operator that places outgoing arg in register
#endif
GTNODE(PUTARG_STK , GenTreePutArgStk ,0,GTK_UNOP|GTK_NOVALUE|DBK_NOTHIR) // operator that places outgoing arg in stack
#if FEATURE_ARG_SPLIT
GTNODE(PUTARG_SPLIT , GenTreePutArgSplit ,0,GTK_UNOP|DBK_NOTHIR) // operator that places outgoing arg in registers with stack (split struct in ARM32)
#endif // FEATURE_ARG_SPLIT
GTNODE(SWAP , GenTreeOp ,0,GTK_BINOP|GTK_NOVALUE|DBK_NOTHIR) // op1 and op2 swap (registers)
GTNODE(COPY , GenTreeCopyOrReload,0,GTK_UNOP|DBK_NOTHIR) // Copies a variable from its current location to a register that satisfies
GTNODE(RELOAD , GenTreeCopyOrReload,0,GTK_UNOP|DBK_NOTHIR) // code generation constraints. The operand is the actual lclVar node.
GTNODE(IL_OFFSET , GenTreeILOffset ,0,GTK_LEAF|GTK_NOVALUE|DBK_NOTHIR) // marks an IL offset for debugging purposes
/*****************************************************************************/
#undef GTNODE
/*****************************************************************************/
// clang-format on
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// clang-format off
/*****************************************************************************/
#ifndef GTNODE
#error Define GTNODE before including this file.
#endif
/*****************************************************************************/
//
// Node enum
// , GenTree struct flavor
// ,commutative
// ,oper kind | DEBUG oper kind
GTNODE(NONE , char ,0,GTK_SPECIAL)
//-----------------------------------------------------------------------------
// Nodes related to locals:
//-----------------------------------------------------------------------------
GTNODE(PHI , GenTreePhi ,0,GTK_SPECIAL) // phi node for ssa.
GTNODE(PHI_ARG , GenTreePhiArg ,0,GTK_LEAF) // phi(phiarg, phiarg, phiarg)
GTNODE(LCL_VAR , GenTreeLclVar ,0,GTK_LEAF) // local variable
GTNODE(LCL_FLD , GenTreeLclFld ,0,GTK_LEAF) // field in a non-primitive variable
GTNODE(STORE_LCL_VAR , GenTreeLclVar ,0,GTK_UNOP|GTK_NOVALUE) // store to local variable
GTNODE(STORE_LCL_FLD , GenTreeLclFld ,0,GTK_UNOP|GTK_NOVALUE) // store to a part of the variable
GTNODE(LCL_VAR_ADDR , GenTreeLclVar ,0,GTK_LEAF) // address of local variable
GTNODE(LCL_FLD_ADDR , GenTreeLclFld ,0,GTK_LEAF) // address of field in a non-primitive variable
//-----------------------------------------------------------------------------
// Leaf nodes (i.e. these nodes have no sub-operands):
//-----------------------------------------------------------------------------
GTNODE(CATCH_ARG , GenTree ,0,GTK_LEAF) // Exception object in a catch block
GTNODE(LABEL , GenTree ,0,GTK_LEAF) // Jump-target
GTNODE(JMP , GenTreeVal ,0,GTK_LEAF|GTK_NOVALUE) // Jump to another function
GTNODE(FTN_ADDR , GenTreeFptrVal ,0,GTK_LEAF) // Address of a function
GTNODE(RET_EXPR , GenTreeRetExpr ,0,GTK_LEAF|DBK_NOTLIR) // Place holder for the return expression from an inline candidate
GTNODE(CLS_VAR , GenTreeClsVar ,0,GTK_LEAF) // Static data member
GTNODE(ARGPLACE , GenTreeArgPlace ,0,GTK_LEAF|GTK_NOVALUE|DBK_NOTLIR) // Placeholder for a "late arg" in the original arg list.
//-----------------------------------------------------------------------------
// Constant nodes:
//-----------------------------------------------------------------------------
GTNODE(CNS_INT , GenTreeIntCon ,0,GTK_LEAF)
GTNODE(CNS_LNG , GenTreeLngCon ,0,GTK_LEAF)
GTNODE(CNS_DBL , GenTreeDblCon ,0,GTK_LEAF)
GTNODE(CNS_STR , GenTreeStrCon ,0,GTK_LEAF)
//-----------------------------------------------------------------------------
// Unary operators (1 operand):
//-----------------------------------------------------------------------------
GTNODE(NOT , GenTreeOp ,0,GTK_UNOP)
GTNODE(NOP , GenTree ,0,GTK_UNOP|DBK_NOCONTAIN)
GTNODE(NEG , GenTreeOp ,0,GTK_UNOP)
GTNODE(INTRINSIC , GenTreeIntrinsic ,0,GTK_BINOP|GTK_EXOP)
GTNODE(LOCKADD , GenTreeOp ,0,GTK_BINOP|GTK_NOVALUE|DBK_NOTHIR)
GTNODE(XAND , GenTreeOp ,0,GTK_BINOP)
GTNODE(XORR , GenTreeOp ,0,GTK_BINOP)
GTNODE(XADD , GenTreeOp ,0,GTK_BINOP)
GTNODE(XCHG , GenTreeOp ,0,GTK_BINOP)
GTNODE(CMPXCHG , GenTreeCmpXchg ,0,GTK_SPECIAL)
GTNODE(MEMORYBARRIER , GenTree ,0,GTK_LEAF|GTK_NOVALUE)
GTNODE(KEEPALIVE , GenTree ,0,GTK_UNOP|GTK_NOVALUE) // keep operand alive, generate no code, produce no result
GTNODE(CAST , GenTreeCast ,0,GTK_UNOP|GTK_EXOP) // conversion to another type
#if defined(TARGET_ARM)
GTNODE(BITCAST , GenTreeMultiRegOp ,0,GTK_UNOP) // reinterpretation of bits as another type
#else
GTNODE(BITCAST , GenTreeOp ,0,GTK_UNOP) // reinterpretation of bits as another type
#endif
GTNODE(CKFINITE , GenTreeOp ,0,GTK_UNOP|DBK_NOCONTAIN) // Check for NaN
GTNODE(LCLHEAP , GenTreeOp ,0,GTK_UNOP|DBK_NOCONTAIN) // alloca()
GTNODE(ADDR , GenTreeOp ,0,GTK_UNOP|DBK_NOTLIR) // address of
GTNODE(BOUNDS_CHECK , GenTreeBoundsChk ,0,GTK_BINOP|GTK_EXOP|GTK_NOVALUE) // a bounds check - for arrays/spans/SIMDs/HWINTRINSICs
GTNODE(IND , GenTreeIndir ,0,GTK_UNOP) // Load indirection
GTNODE(STOREIND , GenTreeStoreInd ,0,GTK_BINOP|GTK_NOVALUE) // Store indirection
GTNODE(OBJ , GenTreeObj ,0,GTK_UNOP|GTK_EXOP) // Object that MAY have gc pointers, and thus includes the relevant gc layout info.
GTNODE(STORE_OBJ , GenTreeObj ,0,GTK_BINOP|GTK_EXOP|GTK_NOVALUE) // Object that MAY have gc pointers, and thus includes the relevant gc layout info.
GTNODE(BLK , GenTreeBlk ,0,GTK_UNOP|GTK_EXOP) // Block/object with no gc pointers, and with a known size (e.g. a struct with no gc fields)
GTNODE(STORE_BLK , GenTreeBlk ,0,GTK_BINOP|GTK_EXOP|GTK_NOVALUE) // Block/object with no gc pointers, and with a known size (e.g. a struct with no gc fields)
GTNODE(STORE_DYN_BLK , GenTreeStoreDynBlk ,0,GTK_SPECIAL|GTK_NOVALUE) // Dynamically sized block store
GTNODE(NULLCHECK , GenTreeIndir ,0,GTK_UNOP|GTK_NOVALUE) // Null checks the source
GTNODE(ARR_LENGTH , GenTreeArrLen ,0,GTK_UNOP|GTK_EXOP)
GTNODE(FIELD , GenTreeField ,0,GTK_UNOP|GTK_EXOP|DBK_NOTLIR) // Member-field
GTNODE(ALLOCOBJ , GenTreeAllocObj ,0,GTK_UNOP|GTK_EXOP|DBK_NOTLIR) // object allocator
GTNODE(INIT_VAL , GenTreeOp ,0,GTK_UNOP) // Initialization value for an initBlk
GTNODE(BOX , GenTreeBox ,0,GTK_UNOP|GTK_EXOP|DBK_NOTLIR) // Marks its first operands (a local) as being a box
GTNODE(PUTARG_TYPE , GenTreeOp ,0,GTK_UNOP|DBK_NOTLIR) // Saves argument type between importation and morph
GTNODE(RUNTIMELOOKUP , GenTreeRuntimeLookup, 0,GTK_UNOP|GTK_EXOP|DBK_NOTLIR) // Runtime handle lookup
GTNODE(ARR_ADDR , GenTreeArrAddr ,0,GTK_UNOP|GTK_EXOP|DBK_NOTLIR) // Wraps an array address expression
GTNODE(BSWAP , GenTreeOp ,0,GTK_UNOP) // Byte swap (32-bit or 64-bit)
GTNODE(BSWAP16 , GenTreeOp ,0,GTK_UNOP) // Byte swap (16-bit)
//-----------------------------------------------------------------------------
// Binary operators (2 operands):
//-----------------------------------------------------------------------------
GTNODE(ADD , GenTreeOp ,1,GTK_BINOP)
GTNODE(SUB , GenTreeOp ,0,GTK_BINOP)
GTNODE(MUL , GenTreeOp ,1,GTK_BINOP)
GTNODE(DIV , GenTreeOp ,0,GTK_BINOP)
GTNODE(MOD , GenTreeOp ,0,GTK_BINOP)
GTNODE(UDIV , GenTreeOp ,0,GTK_BINOP)
GTNODE(UMOD , GenTreeOp ,0,GTK_BINOP)
GTNODE(OR , GenTreeOp ,1,GTK_BINOP)
GTNODE(XOR , GenTreeOp ,1,GTK_BINOP)
GTNODE(AND , GenTreeOp ,1,GTK_BINOP)
GTNODE(LSH , GenTreeOp ,0,GTK_BINOP)
GTNODE(RSH , GenTreeOp ,0,GTK_BINOP)
GTNODE(RSZ , GenTreeOp ,0,GTK_BINOP)
GTNODE(ROL , GenTreeOp ,0,GTK_BINOP)
GTNODE(ROR , GenTreeOp ,0,GTK_BINOP)
GTNODE(ASG , GenTreeOp ,0,GTK_BINOP|DBK_NOTLIR)
GTNODE(EQ , GenTreeOp ,0,GTK_BINOP)
GTNODE(NE , GenTreeOp ,0,GTK_BINOP)
GTNODE(LT , GenTreeOp ,0,GTK_BINOP)
GTNODE(LE , GenTreeOp ,0,GTK_BINOP)
GTNODE(GE , GenTreeOp ,0,GTK_BINOP)
GTNODE(GT , GenTreeOp ,0,GTK_BINOP)
// These are similar to GT_EQ/GT_NE but they generate "test" instead of "cmp" instructions.
// Currently these are generated during lowering for code like ((x & y) eq|ne 0) only on
// XArch but ARM could too use these for the same purpose as there is a "tst" instruction.
// Note that the general case of comparing a register against 0 is handled directly by
// codegen which emits a "test reg, reg" instruction, that would be more difficult to do
// during lowering because the source operand is used twice so it has to be a lclvar.
// Because of this there is no need to also add GT_TEST_LT/LE/GE/GT opers.
GTNODE(TEST_EQ , GenTreeOp ,0,GTK_BINOP|DBK_NOTHIR)
GTNODE(TEST_NE , GenTreeOp ,0,GTK_BINOP|DBK_NOTHIR)
GTNODE(COMMA , GenTreeOp ,0,GTK_BINOP|DBK_NOTLIR)
GTNODE(QMARK , GenTreeQmark ,0,GTK_BINOP|GTK_EXOP|DBK_NOTLIR)
GTNODE(COLON , GenTreeColon ,0,GTK_BINOP|DBK_NOTLIR)
GTNODE(INDEX , GenTreeIndex ,0,GTK_BINOP|GTK_EXOP|DBK_NOTLIR) // SZ-array-element.
GTNODE(INDEX_ADDR , GenTreeIndexAddr ,0,GTK_BINOP|GTK_EXOP) // Addr of SZ-array-element; used when aiming to minimize compile times.
GTNODE(MKREFANY , GenTreeOp ,0,GTK_BINOP|DBK_NOTLIR)
GTNODE(LEA , GenTreeAddrMode ,0,GTK_BINOP|GTK_EXOP)
#if !defined(TARGET_64BIT)
// A GT_LONG node simply represents the long value produced by the concatenation
// of its two (lower and upper half) operands. Some GT_LONG nodes are transient,
// during the decomposing of longs; others are handled by codegen as operands of
// nodes such as calls, returns and stores of long lclVars.
GTNODE(LONG , GenTreeOp ,0,GTK_BINOP|DBK_NOTHIR)
// The following are nodes representing x86/arm32 specific long operators, including
// high operators of a 64-bit operations that requires a carry/borrow, which are
// named GT_XXX_HI for consistency, low operators of 64-bit operations that need
// to not be modified in phases post-decompose, and operators that return 64-bit
// results in one instruction.
GTNODE(ADD_LO , GenTreeOp ,1,GTK_BINOP|DBK_NOTHIR)
GTNODE(ADD_HI , GenTreeOp ,1,GTK_BINOP|DBK_NOTHIR)
GTNODE(SUB_LO , GenTreeOp ,0,GTK_BINOP|DBK_NOTHIR)
GTNODE(SUB_HI , GenTreeOp ,0,GTK_BINOP|DBK_NOTHIR)
// The following are nodes that specify shifts that take a GT_LONG op1. The GT_LONG
// contains the hi and lo parts of three operand shift form where one op will be
// shifted into the other op as part of the operation (LSH_HI will shift
// the high bits of the lo operand into the high operand as it shifts left. RSH_LO
// will shift the lo bits of the high operand into the lo operand). LSH_HI
// represents the high operation of a 64-bit left shift by a constant int, and
// RSH_LO represents the lo operation of a 64-bit right shift by a constant int.
GTNODE(LSH_HI , GenTreeOp ,0,GTK_BINOP|DBK_NOTHIR)
GTNODE(RSH_LO , GenTreeOp ,0,GTK_BINOP|DBK_NOTHIR)
#endif // !defined(TARGET_64BIT)
#ifdef FEATURE_SIMD
GTNODE(SIMD , GenTreeSIMD ,0,GTK_SPECIAL) // SIMD functions/operators/intrinsics
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
GTNODE(HWINTRINSIC , GenTreeHWIntrinsic ,0,GTK_SPECIAL) // hardware intrinsics
#endif // FEATURE_HW_INTRINSICS
//-----------------------------------------------------------------------------
// Backend-specific arithmetic nodes:
//-----------------------------------------------------------------------------
// Saturating increment, used in division by a constant (LowerUnsignedDivOrMod).
GTNODE(INC_SATURATE , GenTreeOp ,0,GTK_UNOP|DBK_NOTHIR)
// Returns high bits (top N bits of the 2N bit result of an NxN multiply)
// GT_MULHI is used in division by a constant (LowerUnsignedDivOrMod). We turn
// the div into a MULHI + some adjustments. In codegen, we only use the
// results of the high register, and we drop the low results.
GTNODE(MULHI , GenTreeOp ,1,GTK_BINOP|DBK_NOTHIR)
// A mul that returns the 2N bit result of an NxN multiply. This op is used for
// multiplies that take two ints and return a long result. For 32 bit targets,
// all other multiplies with long results are morphed into helper calls.
// It is similar to GT_MULHI, the difference being that GT_MULHI drops the lo
// part of the result, whereas GT_MUL_LONG keeps both parts of the result.
// MUL_LONG is also used on ARM64, where 64 bit multiplication is more expensive.
#if !defined(TARGET_64BIT)
GTNODE(MUL_LONG , GenTreeMultiRegOp ,1,GTK_BINOP|DBK_NOTHIR)
#elif defined(TARGET_ARM64)
GTNODE(MUL_LONG , GenTreeOp ,1,GTK_BINOP|DBK_NOTHIR)
#endif
// AndNot - emitted on ARM/ARM64 as the BIC instruction. Also used for creating AndNot HWINTRINSIC vector nodes in a cross-ISA manner.
GTNODE(AND_NOT , GenTreeOp ,0,GTK_BINOP|DBK_NOTHIR)
#ifdef TARGET_ARM64
GTNODE(MADD , GenTreeOp ,0,GTK_BINOP|DBK_NOTHIR) // Generates the Multiply-Add instruction. In the future, we might consider
// enabling it for both armarch and xarch for floating-point MADD "unsafe" math.
GTNODE(MSUB , GenTreeOp ,0,GTK_BINOP|DBK_NOTHIR) // Generates the Multiply-Subtract instruction. In the future, we might consider
// enabling it for both armarch and xarch for floating-point MSUB "unsafe" math.
GTNODE(ADDEX, GenTreeOp ,0,GTK_BINOP|DBK_NOTHIR) // Add with sign/zero extension.
GTNODE(BFIZ , GenTreeOp ,0,GTK_BINOP|DBK_NOTHIR) // Bitfield Insert in Zero.
GTNODE(CSNEG_MI , GenTreeOp ,0,GTK_BINOP|DBK_NOTHIR) // Conditional select, negate, minus result
#endif
//-----------------------------------------------------------------------------
// LIR specific compare and conditional branch/set nodes:
//-----------------------------------------------------------------------------
// Sets the condition flags according to the compare result. N.B. Not a relop, it does not produce a value and it cannot be reversed.
GTNODE(CMP , GenTreeOp ,0,GTK_BINOP|GTK_NOVALUE|DBK_NOTHIR)
// Makes a comparison and jump if the condition specified. Does not set flags.
GTNODE(JCMP , GenTreeOp ,0,GTK_BINOP|GTK_NOVALUE|DBK_NOTHIR)
// Checks the condition flags and branch if the condition specified by GenTreeCC::gtCondition is true.
GTNODE(JCC , GenTreeCC ,0,GTK_LEAF|GTK_NOVALUE|DBK_NOTHIR)
// Checks the condition flags and produces 1 if the condition specified by GenTreeCC::gtCondition is true and 0 otherwise.
GTNODE(SETCC , GenTreeCC ,0,GTK_LEAF|DBK_NOTHIR)
#ifdef TARGET_XARCH
// The XARCH BT instruction. Like CMP, this sets the condition flags (CF to be precise) and does not produce a value.
GTNODE(BT , GenTreeOp ,0,(GTK_BINOP|GTK_NOVALUE|DBK_NOTHIR))
#endif
//-----------------------------------------------------------------------------
// Other nodes that look like unary/binary operators:
//-----------------------------------------------------------------------------
GTNODE(JTRUE , GenTreeOp ,0,GTK_UNOP|GTK_NOVALUE)
//-----------------------------------------------------------------------------
// Other nodes that have special structure:
//-----------------------------------------------------------------------------
GTNODE(ARR_ELEM , GenTreeArrElem ,0,GTK_SPECIAL) // Multi-dimensional array-element address
GTNODE(ARR_INDEX , GenTreeArrIndex ,0,GTK_BINOP|GTK_EXOP) // Effective, bounds-checked index for one dimension of a multi-dimensional array element
GTNODE(ARR_OFFSET , GenTreeArrOffs ,0,GTK_SPECIAL) // Flattened offset of multi-dimensional array element
GTNODE(CALL , GenTreeCall ,0,GTK_SPECIAL|DBK_NOCONTAIN)
GTNODE(FIELD_LIST , GenTreeFieldList ,0,GTK_SPECIAL) // List of fields of a struct, when passed as an argument
GTNODE(RETURN , GenTreeOp ,0,GTK_UNOP|GTK_NOVALUE)
GTNODE(SWITCH , GenTreeOp ,0,GTK_UNOP|GTK_NOVALUE)
GTNODE(NO_OP , GenTree ,0,GTK_LEAF|GTK_NOVALUE) // A NOP that cannot be deleted.
GTNODE(START_NONGC , GenTree ,0,GTK_LEAF|GTK_NOVALUE|DBK_NOTHIR) // Starts a new instruction group that will be non-gc interruptible.
GTNODE(START_PREEMPTGC , GenTree ,0,GTK_LEAF|GTK_NOVALUE|DBK_NOTHIR) // Starts a new instruction group where preemptive GC is enabled.
GTNODE(PROF_HOOK , GenTree ,0,GTK_LEAF|GTK_NOVALUE|DBK_NOTHIR) // Profiler Enter/Leave/TailCall hook.
GTNODE(RETFILT , GenTreeOp ,0,GTK_UNOP|GTK_NOVALUE) // End filter with TYP_I_IMPL return value.
#if !defined(FEATURE_EH_FUNCLETS)
GTNODE(END_LFIN , GenTreeVal ,0,GTK_LEAF|GTK_NOVALUE) // End locally-invoked finally.
#endif // !FEATURE_EH_FUNCLETS
//-----------------------------------------------------------------------------
// Nodes used by Lower to generate a closer CPU representation of other nodes
//-----------------------------------------------------------------------------
GTNODE(JMPTABLE , GenTree ,0,GTK_LEAF|DBK_NOCONTAIN|DBK_NOTHIR) // Generates the jump table for switches
GTNODE(SWITCH_TABLE , GenTreeOp ,0,GTK_BINOP|GTK_NOVALUE|DBK_NOTHIR) // Jump Table based switch construct
//-----------------------------------------------------------------------------
// Nodes used only within the code generator:
//-----------------------------------------------------------------------------
GTNODE(CLS_VAR_ADDR , GenTreeClsVar ,0,GTK_LEAF|DBK_NOTHIR) // static data member address
GTNODE(PHYSREG , GenTreePhysReg ,0,GTK_LEAF|DBK_NOTHIR) // read from a physical register
GTNODE(EMITNOP , GenTree ,0,GTK_LEAF|GTK_NOVALUE|DBK_NOTHIR) // emitter-placed nop
GTNODE(PINVOKE_PROLOG , GenTree ,0,GTK_LEAF|GTK_NOVALUE|DBK_NOTHIR) // pinvoke prolog seq
GTNODE(PINVOKE_EPILOG , GenTree ,0,GTK_LEAF|GTK_NOVALUE|DBK_NOTHIR) // pinvoke epilog seq
GTNODE(RETURNTRAP , GenTreeOp ,0,GTK_UNOP|GTK_NOVALUE|DBK_NOTHIR) // a conditional call to wait on gc
#if defined(TARGET_ARM)
GTNODE(PUTARG_REG , GenTreeMultiRegOp ,0,GTK_UNOP|DBK_NOTHIR) // operator that places outgoing arg in register
#else
GTNODE(PUTARG_REG , GenTreeOp ,0,GTK_UNOP|DBK_NOTHIR) // operator that places outgoing arg in register
#endif
GTNODE(PUTARG_STK , GenTreePutArgStk ,0,GTK_UNOP|GTK_NOVALUE|DBK_NOTHIR) // operator that places outgoing arg in stack
#if FEATURE_ARG_SPLIT
GTNODE(PUTARG_SPLIT , GenTreePutArgSplit ,0,GTK_UNOP|DBK_NOTHIR) // operator that places outgoing arg in registers with stack (split struct in ARM32)
#endif // FEATURE_ARG_SPLIT
GTNODE(SWAP , GenTreeOp ,0,GTK_BINOP|GTK_NOVALUE|DBK_NOTHIR) // op1 and op2 swap (registers)
GTNODE(COPY , GenTreeCopyOrReload,0,GTK_UNOP|DBK_NOTHIR) // Copies a variable from its current location to a register that satisfies
GTNODE(RELOAD , GenTreeCopyOrReload,0,GTK_UNOP|DBK_NOTHIR) // code generation constraints. The operand is the actual lclVar node.
GTNODE(IL_OFFSET , GenTreeILOffset ,0,GTK_LEAF|GTK_NOVALUE|DBK_NOTHIR) // marks an IL offset for debugging purposes
/*****************************************************************************/
#undef GTNODE
/*****************************************************************************/
// clang-format on
| 1 |
dotnet/runtime
| 66,407 |
ARM64 - Optimizing a % b operations part 2
|
Addressing part of this issue: https://github.com/dotnet/runtime/issues/34937
**Description**
There are various ways to optimize `%` for integers on ARM64.
`a % b` can be transformed into a specific sequence of instructions for ARM64 if the operation is signed and `b` is a constant with the power of 2.
**Acceptance Criteria**
- [x] Merge https://github.com/dotnet/runtime/pull/65535
- [x] Add Tests
ARM64 diffs based on tests
```diff
- asr w1, w0, #31
- and w1, w1, #15
- add w1, w1, w0
- asr w1, w1, #4
- lsl w1, w1, #4
- sub w0, w0, w1
- ;; bbWeight=1 PerfScore 4.50
+ and w1, w0, #15
+ negs w0, w0
+ and w0, w0, #15
+ csneg w0, w1, w0, mi
+ ;; bbWeight=1 PerfScore 2.00
```
|
TIHan
| 2022-03-09T20:33:02Z | 2022-04-13T20:01:26Z |
3b6a539c7219383ec6181a112465f904fe9e2edb
|
a81f4bc9d53dedefe03f49da53ed198abbf9c467
|
ARM64 - Optimizing a % b operations part 2. Addressing part of this issue: https://github.com/dotnet/runtime/issues/34937
**Description**
There are various ways to optimize `%` for integers on ARM64.
`a % b` can be transformed into a specific sequence of instructions for ARM64 if the operation is signed and `b` is a constant with the power of 2.
**Acceptance Criteria**
- [x] Merge https://github.com/dotnet/runtime/pull/65535
- [x] Add Tests
ARM64 diffs based on tests
```diff
- asr w1, w0, #31
- and w1, w1, #15
- add w1, w1, w0
- asr w1, w1, #4
- lsl w1, w1, #4
- sub w0, w0, w1
- ;; bbWeight=1 PerfScore 4.50
+ and w1, w0, #15
+ negs w0, w0
+ and w0, w0, #15
+ csneg w0, w1, w0, mi
+ ;; bbWeight=1 PerfScore 2.00
```
|
./src/coreclr/jit/lower.cpp
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Lower XX
XX XX
XX Preconditions: XX
XX XX
XX Postconditions (for the nodes currently handled): XX
XX - All operands requiring a register are explicit in the graph XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#include "lower.h"
#if !defined(TARGET_64BIT)
#include "decomposelongs.h"
#endif // !defined(TARGET_64BIT)
//------------------------------------------------------------------------
// MakeSrcContained: Make "childNode" a contained node
//
// Arguments:
// parentNode - is a non-leaf node that can contain its 'childNode'
// childNode - is an op that will now be contained by its parent.
//
// Notes:
// If 'childNode' it has any existing sources, they will now be sources for the parent.
//
void Lowering::MakeSrcContained(GenTree* parentNode, GenTree* childNode) const
{
assert(!parentNode->OperIsLeaf());
assert(childNode->canBeContained());
childNode->SetContained();
assert(childNode->isContained());
#ifdef DEBUG
if (IsContainableMemoryOp(childNode))
{
// Verify caller of this method checked safety.
//
const bool isSafeToContainMem = IsSafeToContainMem(parentNode, childNode);
if (!isSafeToContainMem)
{
JITDUMP("** Unsafe mem containment of [%06u] in [%06u}, comp->dspTreeID(childNode), "
"comp->dspTreeID(parentNode)\n");
assert(isSafeToContainMem);
}
}
#endif
}
//------------------------------------------------------------------------
// CheckImmedAndMakeContained: Checks if the 'childNode' is a containable immediate
// and, if so, makes it contained.
//
// Arguments:
// parentNode - is any non-leaf node
// childNode - is an child op of 'parentNode'
//
// Return value:
// true if we are able to make childNode a contained immediate
//
bool Lowering::CheckImmedAndMakeContained(GenTree* parentNode, GenTree* childNode)
{
assert(!parentNode->OperIsLeaf());
// If childNode is a containable immediate
if (IsContainableImmed(parentNode, childNode))
{
// then make it contained within the parentNode
MakeSrcContained(parentNode, childNode);
return true;
}
return false;
}
//------------------------------------------------------------------------
// IsSafeToContainMem: Checks for conflicts between childNode and parentNode,
// and returns 'true' iff memory operand childNode can be contained in parentNode.
//
// Arguments:
// parentNode - any non-leaf node
// childNode - some node that is an input to `parentNode`
//
// Return value:
// true if it is safe to make childNode a contained memory operand.
//
bool Lowering::IsSafeToContainMem(GenTree* parentNode, GenTree* childNode) const
{
// Quick early-out for unary cases
//
if (childNode->gtNext == parentNode)
{
return true;
}
m_scratchSideEffects.Clear();
m_scratchSideEffects.AddNode(comp, childNode);
for (GenTree* node = childNode->gtNext; node != parentNode; node = node->gtNext)
{
const bool strict = true;
if (m_scratchSideEffects.InterferesWith(comp, node, strict))
{
return false;
}
}
return true;
}
//------------------------------------------------------------------------
// IsSafeToContainMem: Checks for conflicts between childNode and grandParentNode
// and returns 'true' iff memory operand childNode can be contained in ancestorNode
//
// Arguments:
// grandParentNode - any non-leaf node
// parentNode - parent of `childNode` and an input to `grandParentNode`
// childNode - some node that is an input to `parentNode`
//
// Return value:
// true if it is safe to make childNode a contained memory operand.
//
bool Lowering::IsSafeToContainMem(GenTree* grandparentNode, GenTree* parentNode, GenTree* childNode) const
{
m_scratchSideEffects.Clear();
m_scratchSideEffects.AddNode(comp, childNode);
for (GenTree* node = childNode->gtNext; node != grandparentNode; node = node->gtNext)
{
if (node == parentNode)
{
continue;
}
const bool strict = true;
if (m_scratchSideEffects.InterferesWith(comp, node, strict))
{
return false;
}
}
return true;
}
//------------------------------------------------------------------------
// LowerNode: this is the main entry point for Lowering.
//
// Arguments:
// node - the node we are lowering.
//
// Returns:
// next node in the transformed node sequence that needs to be lowered.
//
GenTree* Lowering::LowerNode(GenTree* node)
{
assert(node != nullptr);
switch (node->gtOper)
{
case GT_NULLCHECK:
case GT_IND:
LowerIndir(node->AsIndir());
break;
case GT_STOREIND:
LowerStoreIndirCommon(node->AsStoreInd());
break;
case GT_ADD:
{
GenTree* next = LowerAdd(node->AsOp());
if (next != nullptr)
{
return next;
}
}
break;
#if !defined(TARGET_64BIT)
case GT_ADD_LO:
case GT_ADD_HI:
case GT_SUB_LO:
case GT_SUB_HI:
#endif
case GT_SUB:
case GT_AND:
case GT_OR:
case GT_XOR:
return LowerBinaryArithmetic(node->AsOp());
case GT_MUL:
case GT_MULHI:
#if defined(TARGET_X86) || defined(TARGET_ARM64)
case GT_MUL_LONG:
#endif
return LowerMul(node->AsOp());
case GT_UDIV:
case GT_UMOD:
if (!LowerUnsignedDivOrMod(node->AsOp()))
{
ContainCheckDivOrMod(node->AsOp());
}
break;
case GT_DIV:
case GT_MOD:
return LowerSignedDivOrMod(node);
case GT_SWITCH:
return LowerSwitch(node);
case GT_CALL:
LowerCall(node);
break;
case GT_LT:
case GT_LE:
case GT_GT:
case GT_GE:
case GT_EQ:
case GT_NE:
case GT_TEST_EQ:
case GT_TEST_NE:
case GT_CMP:
return LowerCompare(node);
case GT_JTRUE:
return LowerJTrue(node->AsOp());
case GT_JMP:
LowerJmpMethod(node);
break;
case GT_RETURN:
LowerRet(node->AsUnOp());
break;
case GT_RETURNTRAP:
ContainCheckReturnTrap(node->AsOp());
break;
case GT_CAST:
LowerCast(node);
break;
#if defined(TARGET_XARCH) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)
case GT_BOUNDS_CHECK:
ContainCheckBoundsChk(node->AsBoundsChk());
break;
#endif // TARGET_XARCH
case GT_ARR_ELEM:
return LowerArrElem(node);
case GT_ARR_OFFSET:
ContainCheckArrOffset(node->AsArrOffs());
break;
case GT_ROL:
case GT_ROR:
LowerRotate(node);
break;
#ifndef TARGET_64BIT
case GT_LSH_HI:
case GT_RSH_LO:
ContainCheckShiftRotate(node->AsOp());
break;
#endif // !TARGET_64BIT
case GT_LSH:
case GT_RSH:
case GT_RSZ:
#if defined(TARGET_XARCH) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)
LowerShift(node->AsOp());
#else
ContainCheckShiftRotate(node->AsOp());
#endif
break;
case GT_STORE_BLK:
case GT_STORE_OBJ:
if (node->AsBlk()->Data()->IsCall())
{
LowerStoreSingleRegCallStruct(node->AsBlk());
break;
}
FALLTHROUGH;
case GT_STORE_DYN_BLK:
LowerBlockStoreCommon(node->AsBlk());
break;
case GT_LCLHEAP:
ContainCheckLclHeap(node->AsOp());
break;
#ifdef TARGET_XARCH
case GT_INTRINSIC:
ContainCheckIntrinsic(node->AsOp());
break;
#endif // TARGET_XARCH
#ifdef FEATURE_SIMD
case GT_SIMD:
LowerSIMD(node->AsSIMD());
break;
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
LowerHWIntrinsic(node->AsHWIntrinsic());
break;
#endif // FEATURE_HW_INTRINSICS
case GT_LCL_FLD:
{
// We should only encounter this for lclVars that are lvDoNotEnregister.
verifyLclFldDoNotEnregister(node->AsLclVarCommon()->GetLclNum());
break;
}
case GT_LCL_VAR:
{
GenTreeLclVar* lclNode = node->AsLclVar();
WidenSIMD12IfNecessary(lclNode);
LclVarDsc* varDsc = comp->lvaGetDesc(lclNode);
// The consumer of this node must check compatibility of the fields.
// This merely checks whether it is possible for this to be a multireg node.
if (lclNode->IsMultiRegLclVar())
{
if (!varDsc->lvPromoted ||
(comp->lvaGetPromotionType(varDsc) != Compiler::PROMOTION_TYPE_INDEPENDENT) ||
(varDsc->lvFieldCnt > MAX_MULTIREG_COUNT))
{
lclNode->ClearMultiReg();
if (lclNode->TypeIs(TYP_STRUCT))
{
comp->lvaSetVarDoNotEnregister(lclNode->GetLclNum() DEBUGARG(DoNotEnregisterReason::BlockOp));
}
}
}
break;
}
case GT_STORE_LCL_VAR:
WidenSIMD12IfNecessary(node->AsLclVarCommon());
FALLTHROUGH;
case GT_STORE_LCL_FLD:
LowerStoreLocCommon(node->AsLclVarCommon());
break;
#if defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)
case GT_CMPXCHG:
CheckImmedAndMakeContained(node, node->AsCmpXchg()->gtOpComparand);
break;
case GT_XORR:
case GT_XAND:
case GT_XADD:
CheckImmedAndMakeContained(node, node->AsOp()->gtOp2);
break;
#elif defined(TARGET_XARCH)
case GT_XORR:
case GT_XAND:
case GT_XADD:
if (node->IsUnusedValue())
{
node->ClearUnusedValue();
// Make sure the types are identical, since the node type is changed to VOID
// CodeGen relies on op2's type to determine the instruction size.
// Note that the node type cannot be a small int but the data operand can.
assert(genActualType(node->gtGetOp2()->TypeGet()) == node->TypeGet());
node->SetOper(GT_LOCKADD);
node->gtType = TYP_VOID;
CheckImmedAndMakeContained(node, node->gtGetOp2());
}
break;
#endif
#if !defined(TARGET_ARMARCH) && !defined(TARGET_LOONGARCH64)
// TODO-ARMARCH-CQ: We should contain this as long as the offset fits.
case GT_OBJ:
if (node->AsObj()->Addr()->OperIsLocalAddr())
{
node->AsObj()->Addr()->SetContained();
}
break;
#endif // !TARGET_ARMARCH
case GT_KEEPALIVE:
node->gtGetOp1()->SetRegOptional();
break;
case GT_LCL_FLD_ADDR:
case GT_LCL_VAR_ADDR:
{
const GenTreeLclVarCommon* lclAddr = node->AsLclVarCommon();
const LclVarDsc* varDsc = comp->lvaGetDesc(lclAddr);
if (!varDsc->lvDoNotEnregister)
{
// TODO-Cleanup: this is definitely not the best place for this detection,
// but for now it is the easiest. Move it to morph.
comp->lvaSetVarDoNotEnregister(lclAddr->GetLclNum() DEBUGARG(DoNotEnregisterReason::LclAddrNode));
}
}
break;
default:
break;
}
return node->gtNext;
}
/** -- Switch Lowering --
* The main idea of switch lowering is to keep transparency of the register requirements of this node
* downstream in LSRA. Given that the switch instruction is inherently a control statement which in the JIT
* is represented as a simple tree node, at the time we actually generate code for it we end up
* generating instructions that actually modify the flow of execution that imposes complicated
* register requirement and lifetimes.
*
* So, for the purpose of LSRA, we want to have a more detailed specification of what a switch node actually
* means and more importantly, which and when do we need a register for each instruction we want to issue
* to correctly allocate them downstream.
*
* For this purpose, this procedure performs switch lowering in two different ways:
*
* a) Represent the switch statement as a zero-index jump table construct. This means that for every destination
* of the switch, we will store this destination in an array of addresses and the code generator will issue
* a data section where this array will live and will emit code that based on the switch index, will indirect and
* jump to the destination specified in the jump table.
*
* For this transformation we introduce a new GT node called GT_SWITCH_TABLE that is a specialization of the switch
* node for jump table based switches.
* The overall structure of a GT_SWITCH_TABLE is:
*
* GT_SWITCH_TABLE
* |_________ localVar (a temporary local that holds the switch index)
* |_________ jumpTable (this is a special node that holds the address of the jump table array)
*
* Now, the way we morph a GT_SWITCH node into this lowered switch table node form is the following:
*
* Input: GT_SWITCH (inside a basic block whose Branch Type is BBJ_SWITCH)
* |_____ expr (an arbitrarily complex GT_NODE that represents the switch index)
*
* This gets transformed into the following statements inside a BBJ_COND basic block (the target would be
* the default case of the switch in case the conditional is evaluated to true).
*
* ----- original block, transformed
* GT_STORE_LCL_VAR tempLocal (a new temporary local variable used to store the switch index)
* |_____ expr (the index expression)
*
* GT_JTRUE
* |_____ GT_COND
* |_____ GT_GE
* |___ Int_Constant (This constant is the index of the default case
* that happens to be the highest index in the jump table).
* |___ tempLocal (The local variable were we stored the index expression).
*
* ----- new basic block
* GT_SWITCH_TABLE
* |_____ tempLocal
* |_____ jumpTable (a new jump table node that now LSRA can allocate registers for explicitly
* and LinearCodeGen will be responsible to generate downstream).
*
* This way there are no implicit temporaries.
*
* b) For small-sized switches, we will actually morph them into a series of conditionals of the form
* if (case falls into the default){ goto jumpTable[size]; // last entry in the jump table is the default case }
* (For the default case conditional, we'll be constructing the exact same code as the jump table case one).
* else if (case == firstCase){ goto jumpTable[1]; }
* else if (case == secondCase) { goto jumptable[2]; } and so on.
*
* This transformation is of course made in JIT-IR, not downstream to CodeGen level, so this way we no longer
* require internal temporaries to maintain the index we're evaluating plus we're using existing code from
* LinearCodeGen to implement this instead of implement all the control flow constructs using InstrDscs and
* InstrGroups downstream.
*/
GenTree* Lowering::LowerSwitch(GenTree* node)
{
unsigned jumpCnt;
unsigned targetCnt;
BasicBlock** jumpTab;
assert(node->gtOper == GT_SWITCH);
// The first step is to build the default case conditional construct that is
// shared between both kinds of expansion of the switch node.
// To avoid confusion, we'll alias m_block to originalSwitchBB
// that represents the node we're morphing.
BasicBlock* originalSwitchBB = m_block;
LIR::Range& switchBBRange = LIR::AsRange(originalSwitchBB);
// jumpCnt is the number of elements in the jump table array.
// jumpTab is the actual pointer to the jump table array.
// targetCnt is the number of unique targets in the jump table array.
jumpCnt = originalSwitchBB->bbJumpSwt->bbsCount;
jumpTab = originalSwitchBB->bbJumpSwt->bbsDstTab;
targetCnt = originalSwitchBB->NumSucc(comp);
// GT_SWITCH must be a top-level node with no use.
#ifdef DEBUG
{
LIR::Use use;
assert(!switchBBRange.TryGetUse(node, &use));
}
#endif
JITDUMP("Lowering switch " FMT_BB ", %d cases\n", originalSwitchBB->bbNum, jumpCnt);
// Handle a degenerate case: if the switch has only a default case, just convert it
// to an unconditional branch. This should only happen in minopts or with debuggable
// code.
if (targetCnt == 1)
{
JITDUMP("Lowering switch " FMT_BB ": single target; converting to BBJ_ALWAYS\n", originalSwitchBB->bbNum);
noway_assert(comp->opts.OptimizationDisabled());
if (originalSwitchBB->bbNext == jumpTab[0])
{
originalSwitchBB->bbJumpKind = BBJ_NONE;
originalSwitchBB->bbJumpDest = nullptr;
}
else
{
originalSwitchBB->bbJumpKind = BBJ_ALWAYS;
originalSwitchBB->bbJumpDest = jumpTab[0];
}
// Remove extra predecessor links if there was more than one case.
for (unsigned i = 1; i < jumpCnt; ++i)
{
(void)comp->fgRemoveRefPred(jumpTab[i], originalSwitchBB);
}
// We have to get rid of the GT_SWITCH node but a child might have side effects so just assign
// the result of the child subtree to a temp.
GenTree* rhs = node->AsOp()->gtOp1;
unsigned lclNum = comp->lvaGrabTemp(true DEBUGARG("Lowering is creating a new local variable"));
comp->lvaTable[lclNum].lvType = rhs->TypeGet();
GenTreeLclVar* store = comp->gtNewStoreLclVar(lclNum, rhs);
switchBBRange.InsertAfter(node, store);
switchBBRange.Remove(node);
return store;
}
noway_assert(jumpCnt >= 2);
// Spill the argument to the switch node into a local so that it can be used later.
LIR::Use use(switchBBRange, &(node->AsOp()->gtOp1), node);
ReplaceWithLclVar(use);
// GT_SWITCH(indexExpression) is now two statements:
// 1. a statement containing 'asg' (for temp = indexExpression)
// 2. and a statement with GT_SWITCH(temp)
assert(node->gtOper == GT_SWITCH);
GenTree* temp = node->AsOp()->gtOp1;
assert(temp->gtOper == GT_LCL_VAR);
unsigned tempLclNum = temp->AsLclVarCommon()->GetLclNum();
var_types tempLclType = temp->TypeGet();
BasicBlock* defaultBB = jumpTab[jumpCnt - 1];
BasicBlock* followingBB = originalSwitchBB->bbNext;
/* Is the number of cases right for a test and jump switch? */
const bool fFirstCaseFollows = (followingBB == jumpTab[0]);
const bool fDefaultFollows = (followingBB == defaultBB);
unsigned minSwitchTabJumpCnt = 2; // table is better than just 2 cmp/jcc
// This means really just a single cmp/jcc (aka a simple if/else)
if (fFirstCaseFollows || fDefaultFollows)
{
minSwitchTabJumpCnt++;
}
#if defined(TARGET_ARM)
// On ARM for small switch tables we will
// generate a sequence of compare and branch instructions
// because the code to load the base of the switch
// table is huge and hideous due to the relocation... :(
minSwitchTabJumpCnt += 2;
#endif // TARGET_ARM
// Once we have the temporary variable, we construct the conditional branch for
// the default case. As stated above, this conditional is being shared between
// both GT_SWITCH lowering code paths.
// This condition is of the form: if (temp > jumpTableLength - 2){ goto jumpTable[jumpTableLength - 1]; }
GenTree* gtDefaultCaseCond = comp->gtNewOperNode(GT_GT, TYP_INT, comp->gtNewLclvNode(tempLclNum, tempLclType),
comp->gtNewIconNode(jumpCnt - 2, genActualType(tempLclType)));
// Make sure we perform an unsigned comparison, just in case the switch index in 'temp'
// is now less than zero 0 (that would also hit the default case).
gtDefaultCaseCond->gtFlags |= GTF_UNSIGNED;
GenTree* gtDefaultCaseJump = comp->gtNewOperNode(GT_JTRUE, TYP_VOID, gtDefaultCaseCond);
gtDefaultCaseJump->gtFlags = node->gtFlags;
LIR::Range condRange = LIR::SeqTree(comp, gtDefaultCaseJump);
switchBBRange.InsertAtEnd(std::move(condRange));
BasicBlock* afterDefaultCondBlock = comp->fgSplitBlockAfterNode(originalSwitchBB, condRange.LastNode());
// afterDefaultCondBlock is now the switch, and all the switch targets have it as a predecessor.
// originalSwitchBB is now a BBJ_NONE, and there is a predecessor edge in afterDefaultCondBlock
// representing the fall-through flow from originalSwitchBB.
assert(originalSwitchBB->bbJumpKind == BBJ_NONE);
assert(originalSwitchBB->bbNext == afterDefaultCondBlock);
assert(afterDefaultCondBlock->bbJumpKind == BBJ_SWITCH);
assert(afterDefaultCondBlock->bbJumpSwt->bbsHasDefault);
assert(afterDefaultCondBlock->isEmpty()); // Nothing here yet.
// The GT_SWITCH code is still in originalSwitchBB (it will be removed later).
// Turn originalSwitchBB into a BBJ_COND.
originalSwitchBB->bbJumpKind = BBJ_COND;
originalSwitchBB->bbJumpDest = jumpTab[jumpCnt - 1];
// Fix the pred for the default case: the default block target still has originalSwitchBB
// as a predecessor, but the fgSplitBlockAfterStatement() moved all predecessors to point
// to afterDefaultCondBlock.
flowList* oldEdge = comp->fgRemoveRefPred(jumpTab[jumpCnt - 1], afterDefaultCondBlock);
comp->fgAddRefPred(jumpTab[jumpCnt - 1], originalSwitchBB, oldEdge);
bool useJumpSequence = jumpCnt < minSwitchTabJumpCnt;
if (TargetOS::IsUnix && TargetArchitecture::IsArm32)
{
// Force using an inlined jumping instead switch table generation.
// Switch jump table is generated with incorrect values in CoreRT case,
// so any large switch will crash after loading to PC any such value.
// I think this is due to the fact that we use absolute addressing
// instead of relative. But in CoreRT is used as a rule relative
// addressing when we generate an executable.
// See also https://github.com/dotnet/runtime/issues/8683
// Also https://github.com/dotnet/coreclr/pull/13197
useJumpSequence = useJumpSequence || comp->IsTargetAbi(CORINFO_CORERT_ABI);
}
// If we originally had 2 unique successors, check to see whether there is a unique
// non-default case, in which case we can eliminate the switch altogether.
// Note that the single unique successor case is handled above.
BasicBlock* uniqueSucc = nullptr;
if (targetCnt == 2)
{
uniqueSucc = jumpTab[0];
noway_assert(jumpCnt >= 2);
for (unsigned i = 1; i < jumpCnt - 1; i++)
{
if (jumpTab[i] != uniqueSucc)
{
uniqueSucc = nullptr;
break;
}
}
}
if (uniqueSucc != nullptr)
{
// If the unique successor immediately follows this block, we have nothing to do -
// it will simply fall-through after we remove the switch, below.
// Otherwise, make this a BBJ_ALWAYS.
// Now, fixup the predecessor links to uniqueSucc. In the original jumpTab:
// jumpTab[i-1] was the default target, which we handled above,
// jumpTab[0] is the first target, and we'll leave that predecessor link.
// Remove any additional predecessor links to uniqueSucc.
for (unsigned i = 1; i < jumpCnt - 1; ++i)
{
assert(jumpTab[i] == uniqueSucc);
(void)comp->fgRemoveRefPred(uniqueSucc, afterDefaultCondBlock);
}
if (afterDefaultCondBlock->bbNext == uniqueSucc)
{
afterDefaultCondBlock->bbJumpKind = BBJ_NONE;
afterDefaultCondBlock->bbJumpDest = nullptr;
}
else
{
afterDefaultCondBlock->bbJumpKind = BBJ_ALWAYS;
afterDefaultCondBlock->bbJumpDest = uniqueSucc;
}
}
// If the number of possible destinations is small enough, we proceed to expand the switch
// into a series of conditional branches, otherwise we follow the jump table based switch
// transformation.
else if (useJumpSequence || comp->compStressCompile(Compiler::STRESS_SWITCH_CMP_BR_EXPANSION, 50))
{
// Lower the switch into a series of compare and branch IR trees.
//
// In this case we will morph the node in the following way:
// 1. Generate a JTRUE statement to evaluate the default case. (This happens above.)
// 2. Start splitting the switch basic block into subsequent basic blocks, each of which will contain
// a statement that is responsible for performing a comparison of the table index and conditional
// branch if equal.
JITDUMP("Lowering switch " FMT_BB ": using compare/branch expansion\n", originalSwitchBB->bbNum);
// We'll use 'afterDefaultCondBlock' for the first conditional. After that, we'll add new
// blocks. If we end up not needing it at all (say, if all the non-default cases just fall through),
// we'll delete it.
bool fUsedAfterDefaultCondBlock = false;
BasicBlock* currentBlock = afterDefaultCondBlock;
LIR::Range* currentBBRange = &LIR::AsRange(currentBlock);
// Walk to entries 0 to jumpCnt - 1. If a case target follows, ignore it and let it fall through.
// If no case target follows, the last one doesn't need to be a compare/branch: it can be an
// unconditional branch.
bool fAnyTargetFollows = false;
for (unsigned i = 0; i < jumpCnt - 1; ++i)
{
assert(currentBlock != nullptr);
// Remove the switch from the predecessor list of this case target's block.
// We'll add the proper new predecessor edge later.
flowList* oldEdge = comp->fgRemoveRefPred(jumpTab[i], afterDefaultCondBlock);
if (jumpTab[i] == followingBB)
{
// This case label follows the switch; let it fall through.
fAnyTargetFollows = true;
continue;
}
// We need a block to put in the new compare and/or branch.
// If we haven't used the afterDefaultCondBlock yet, then use that.
if (fUsedAfterDefaultCondBlock)
{
BasicBlock* newBlock = comp->fgNewBBafter(BBJ_NONE, currentBlock, true);
comp->fgAddRefPred(newBlock, currentBlock); // The fall-through predecessor.
currentBlock = newBlock;
currentBBRange = &LIR::AsRange(currentBlock);
}
else
{
assert(currentBlock == afterDefaultCondBlock);
fUsedAfterDefaultCondBlock = true;
}
// We're going to have a branch, either a conditional or unconditional,
// to the target. Set the target.
currentBlock->bbJumpDest = jumpTab[i];
// Wire up the predecessor list for the "branch" case.
comp->fgAddRefPred(jumpTab[i], currentBlock, oldEdge);
if (!fAnyTargetFollows && (i == jumpCnt - 2))
{
// We're processing the last one, and there is no fall through from any case
// to the following block, so we can use an unconditional branch to the final
// case: there is no need to compare against the case index, since it's
// guaranteed to be taken (since the default case was handled first, above).
currentBlock->bbJumpKind = BBJ_ALWAYS;
}
else
{
// Otherwise, it's a conditional branch. Set the branch kind, then add the
// condition statement.
currentBlock->bbJumpKind = BBJ_COND;
// Now, build the conditional statement for the current case that is
// being evaluated:
// GT_JTRUE
// |__ GT_COND
// |____GT_EQ
// |____ (switchIndex) (The temp variable)
// |____ (ICon) (The actual case constant)
GenTree* gtCaseCond = comp->gtNewOperNode(GT_EQ, TYP_INT, comp->gtNewLclvNode(tempLclNum, tempLclType),
comp->gtNewIconNode(i, tempLclType));
GenTree* gtCaseBranch = comp->gtNewOperNode(GT_JTRUE, TYP_VOID, gtCaseCond);
LIR::Range caseRange = LIR::SeqTree(comp, gtCaseBranch);
currentBBRange->InsertAtEnd(std::move(caseRange));
}
}
if (fAnyTargetFollows)
{
// There is a fall-through to the following block. In the loop
// above, we deleted all the predecessor edges from the switch.
// In this case, we need to add one back.
comp->fgAddRefPred(currentBlock->bbNext, currentBlock);
}
if (!fUsedAfterDefaultCondBlock)
{
// All the cases were fall-through! We don't need this block.
// Convert it from BBJ_SWITCH to BBJ_NONE and unset the BBF_DONT_REMOVE flag
// so fgRemoveBlock() doesn't complain.
JITDUMP("Lowering switch " FMT_BB ": all switch cases were fall-through\n", originalSwitchBB->bbNum);
assert(currentBlock == afterDefaultCondBlock);
assert(currentBlock->bbJumpKind == BBJ_SWITCH);
currentBlock->bbJumpKind = BBJ_NONE;
currentBlock->bbFlags &= ~BBF_DONT_REMOVE;
comp->fgRemoveBlock(currentBlock, /* unreachable */ false); // It's an empty block.
}
}
else
{
// At this point the default case has already been handled and we need to generate a jump
// table based switch or a bit test based switch at the end of afterDefaultCondBlock. Both
// switch variants need the switch value so create the necessary LclVar node here.
GenTree* switchValue = comp->gtNewLclvNode(tempLclNum, tempLclType);
LIR::Range& switchBlockRange = LIR::AsRange(afterDefaultCondBlock);
switchBlockRange.InsertAtEnd(switchValue);
// Try generating a bit test based switch first,
// if that's not possible a jump table based switch will be generated.
if (!TryLowerSwitchToBitTest(jumpTab, jumpCnt, targetCnt, afterDefaultCondBlock, switchValue))
{
JITDUMP("Lowering switch " FMT_BB ": using jump table expansion\n", originalSwitchBB->bbNum);
#ifdef TARGET_64BIT
if (tempLclType != TYP_I_IMPL)
{
// SWITCH_TABLE expects the switch value (the index into the jump table) to be TYP_I_IMPL.
// Note that the switch value is unsigned so the cast should be unsigned as well.
switchValue = comp->gtNewCastNode(TYP_I_IMPL, switchValue, true, TYP_U_IMPL);
switchBlockRange.InsertAtEnd(switchValue);
}
#endif
GenTree* switchTable = comp->gtNewJmpTableNode();
GenTree* switchJump = comp->gtNewOperNode(GT_SWITCH_TABLE, TYP_VOID, switchValue, switchTable);
switchBlockRange.InsertAfter(switchValue, switchTable, switchJump);
// this block no longer branches to the default block
afterDefaultCondBlock->bbJumpSwt->removeDefault();
}
comp->fgInvalidateSwitchDescMapEntry(afterDefaultCondBlock);
}
GenTree* next = node->gtNext;
// Get rid of the GT_SWITCH(temp).
switchBBRange.Remove(node->AsOp()->gtOp1);
switchBBRange.Remove(node);
return next;
}
//------------------------------------------------------------------------
// TryLowerSwitchToBitTest: Attempts to transform a jump table switch into a bit test.
//
// Arguments:
// jumpTable - The jump table
// jumpCount - The number of blocks in the jump table
// targetCount - The number of distinct blocks in the jump table
// bbSwitch - The switch block
// switchValue - A LclVar node that provides the switch value
//
// Return value:
// true if the switch has been lowered to a bit test
//
// Notes:
// If the jump table contains less than 32 (64 on 64 bit targets) entries and there
// are at most 2 distinct jump targets then the jump table can be converted to a word
// of bits where a 0 bit corresponds to one jump target and a 1 bit corresponds to the
// other jump target. Instead of the indirect jump a BT-JCC sequence is used to jump
// to the appropriate target:
// mov eax, 245 ; jump table converted to a "bit table"
// bt eax, ebx ; ebx is supposed to contain the switch value
// jc target1
// target0:
// ...
// target1:
// Such code is both shorter and faster (in part due to the removal of a memory load)
// than the traditional jump table base code. And of course, it also avoids the need
// to emit the jump table itself that can reach up to 256 bytes (for 64 entries).
//
bool Lowering::TryLowerSwitchToBitTest(
BasicBlock* jumpTable[], unsigned jumpCount, unsigned targetCount, BasicBlock* bbSwitch, GenTree* switchValue)
{
#ifndef TARGET_XARCH
// Other architectures may use this if they substitute GT_BT with equivalent code.
return false;
#else
assert(jumpCount >= 2);
assert(targetCount >= 2);
assert(bbSwitch->bbJumpKind == BBJ_SWITCH);
assert(switchValue->OperIs(GT_LCL_VAR));
//
// Quick check to see if it's worth going through the jump table. The bit test switch supports
// up to 2 targets but targetCount also includes the default block so we need to allow 3 targets.
// We'll ensure that there are only 2 targets when building the bit table.
//
if (targetCount > 3)
{
return false;
}
//
// The number of bits in the bit table is the same as the number of jump table entries. But the
// jump table also includes the default target (at the end) so we need to ignore it. The default
// has already been handled by a JTRUE(GT(switchValue, jumpCount - 2)) that LowerSwitch generates.
//
const unsigned bitCount = jumpCount - 1;
if (bitCount > (genTypeSize(TYP_I_IMPL) * 8))
{
return false;
}
//
// Build a bit table where a bit set to 0 corresponds to bbCase0 and a bit set to 1 corresponds to
// bbCase1. Simply use the first block in the jump table as bbCase1, later we can invert the bit
// table and/or swap the blocks if it's beneficial.
//
BasicBlock* bbCase0 = nullptr;
BasicBlock* bbCase1 = jumpTable[0];
size_t bitTable = 1;
for (unsigned bitIndex = 1; bitIndex < bitCount; bitIndex++)
{
if (jumpTable[bitIndex] == bbCase1)
{
bitTable |= (size_t(1) << bitIndex);
}
else if (bbCase0 == nullptr)
{
bbCase0 = jumpTable[bitIndex];
}
else if (jumpTable[bitIndex] != bbCase0)
{
// If it's neither bbCase0 nor bbCase1 then it means we have 3 targets. There can't be more
// than 3 because of the check at the start of the function.
assert(targetCount == 3);
return false;
}
}
//
// One of the case blocks has to follow the switch block. This requirement could be avoided
// by adding a BBJ_ALWAYS block after the switch block but doing that sometimes negatively
// impacts register allocation.
//
if ((bbSwitch->bbNext != bbCase0) && (bbSwitch->bbNext != bbCase1))
{
return false;
}
#ifdef TARGET_64BIT
//
// See if we can avoid a 8 byte immediate on 64 bit targets. If all upper 32 bits are 1
// then inverting the bit table will make them 0 so that the table now fits in 32 bits.
// Note that this does not change the number of bits in the bit table, it just takes
// advantage of the fact that loading a 32 bit immediate into a 64 bit register zero
// extends the immediate value to 64 bit.
//
if (~bitTable <= UINT32_MAX)
{
bitTable = ~bitTable;
std::swap(bbCase0, bbCase1);
}
#endif
//
// Rewire the blocks as needed and figure out the condition to use for JCC.
//
GenCondition bbSwitchCondition;
bbSwitch->bbJumpKind = BBJ_COND;
comp->fgRemoveAllRefPreds(bbCase1, bbSwitch);
comp->fgRemoveAllRefPreds(bbCase0, bbSwitch);
if (bbSwitch->bbNext == bbCase0)
{
// GenCondition::C generates JC so we jump to bbCase1 when the bit is set
bbSwitchCondition = GenCondition::C;
bbSwitch->bbJumpDest = bbCase1;
comp->fgAddRefPred(bbCase0, bbSwitch);
comp->fgAddRefPred(bbCase1, bbSwitch);
}
else
{
assert(bbSwitch->bbNext == bbCase1);
// GenCondition::NC generates JNC so we jump to bbCase0 when the bit is not set
bbSwitchCondition = GenCondition::NC;
bbSwitch->bbJumpDest = bbCase0;
comp->fgAddRefPred(bbCase0, bbSwitch);
comp->fgAddRefPred(bbCase1, bbSwitch);
}
//
// Append BT(bitTable, switchValue) and JCC(condition) to the switch block.
//
var_types bitTableType = (bitCount <= (genTypeSize(TYP_INT) * 8)) ? TYP_INT : TYP_LONG;
GenTree* bitTableIcon = comp->gtNewIconNode(bitTable, bitTableType);
GenTree* bitTest = comp->gtNewOperNode(GT_BT, TYP_VOID, bitTableIcon, switchValue);
bitTest->gtFlags |= GTF_SET_FLAGS;
GenTreeCC* jcc = new (comp, GT_JCC) GenTreeCC(GT_JCC, bbSwitchCondition);
jcc->gtFlags |= GTF_USE_FLAGS;
LIR::AsRange(bbSwitch).InsertAfter(switchValue, bitTableIcon, bitTest, jcc);
return true;
#endif // TARGET_XARCH
}
// NOTE: this method deliberately does not update the call arg table. It must only
// be used by NewPutArg and LowerArg; these functions are responsible for updating
// the call arg table as necessary.
void Lowering::ReplaceArgWithPutArgOrBitcast(GenTree** argSlot, GenTree* putArgOrBitcast)
{
assert(argSlot != nullptr);
assert(*argSlot != nullptr);
assert(putArgOrBitcast->OperIsPutArg() || putArgOrBitcast->OperIs(GT_BITCAST));
GenTree* arg = *argSlot;
// Replace the argument with the putarg/copy
*argSlot = putArgOrBitcast;
putArgOrBitcast->AsOp()->gtOp1 = arg;
// Insert the putarg/copy into the block
BlockRange().InsertAfter(arg, putArgOrBitcast);
}
//------------------------------------------------------------------------
// NewPutArg: rewrites the tree to put an arg in a register or on the stack.
//
// Arguments:
// call - the call whose arg is being rewritten.
// arg - the arg being rewritten.
// info - the fgArgTabEntry information for the argument.
// type - the type of the argument.
//
// Return Value:
// The new tree that was created to put the arg in the right place
// or the incoming arg if the arg tree was not rewritten.
//
// Assumptions:
// call, arg, and info must be non-null.
//
// Notes:
// For System V systems with native struct passing (i.e. UNIX_AMD64_ABI defined)
// this method allocates a single GT_PUTARG_REG for 1 eightbyte structs and a GT_FIELD_LIST of two GT_PUTARG_REGs
// for two eightbyte structs.
//
// For STK passed structs the method generates GT_PUTARG_STK tree. For System V systems with native struct passing
// (i.e. UNIX_AMD64_ABI defined) this method also sets the GC pointers count and the pointers
// layout object, so the codegen of the GT_PUTARG_STK could use this for optimizing copying to the stack by value.
// (using block copy primitives for non GC pointers and a single TARGET_POINTER_SIZE copy with recording GC info.)
//
GenTree* Lowering::NewPutArg(GenTreeCall* call, GenTree* arg, fgArgTabEntry* info, var_types type)
{
assert(call != nullptr);
assert(arg != nullptr);
assert(info != nullptr);
GenTree* putArg = nullptr;
bool isOnStack = (info->GetRegNum() == REG_STK);
#if defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64)
// Mark contained when we pass struct
// GT_FIELD_LIST is always marked contained when it is generated
if (type == TYP_STRUCT)
{
arg->SetContained();
if ((arg->OperGet() == GT_OBJ) && (arg->AsObj()->Addr()->OperGet() == GT_LCL_VAR_ADDR))
{
MakeSrcContained(arg, arg->AsObj()->Addr());
}
}
#endif
#if FEATURE_ARG_SPLIT
// Struct can be split into register(s) and stack on ARM
if (compFeatureArgSplit() && info->IsSplit())
{
assert(arg->OperGet() == GT_OBJ || arg->OperGet() == GT_FIELD_LIST);
// TODO: Need to check correctness for FastTailCall
if (call->IsFastTailCall())
{
#ifdef TARGET_ARM
NYI_ARM("lower: struct argument by fast tail call");
#endif // TARGET_ARM
}
const unsigned slotNumber = info->GetByteOffset() / TARGET_POINTER_SIZE;
DEBUG_ARG_SLOTS_ASSERT(slotNumber == info->slotNum);
const bool putInIncomingArgArea = call->IsFastTailCall();
putArg = new (comp, GT_PUTARG_SPLIT)
GenTreePutArgSplit(arg, info->GetByteOffset(),
#if defined(DEBUG_ARG_SLOTS) && defined(FEATURE_PUT_STRUCT_ARG_STK)
info->GetStackByteSize(), slotNumber, info->GetStackSlotsNumber(),
#elif defined(DEBUG_ARG_SLOTS) && !defined(FEATURE_PUT_STRUCT_ARG_STK)
slotNumber,
#elif !defined(DEBUG_ARG_SLOTS) && defined(FEATURE_PUT_STRUCT_ARG_STK)
info->GetStackByteSize(),
#endif
info->numRegs, call, putInIncomingArgArea);
// If struct argument is morphed to GT_FIELD_LIST node(s),
// we can know GC info by type of each GT_FIELD_LIST node.
// So we skip setting GC Pointer info.
//
GenTreePutArgSplit* argSplit = putArg->AsPutArgSplit();
for (unsigned regIndex = 0; regIndex < info->numRegs; regIndex++)
{
argSplit->SetRegNumByIdx(info->GetRegNum(regIndex), regIndex);
}
if (arg->OperGet() == GT_OBJ)
{
ClassLayout* layout = arg->AsObj()->GetLayout();
// Set type of registers
for (unsigned index = 0; index < info->numRegs; index++)
{
argSplit->m_regType[index] = layout->GetGCPtrType(index);
}
}
else
{
unsigned regIndex = 0;
for (GenTreeFieldList::Use& use : arg->AsFieldList()->Uses())
{
if (regIndex >= info->numRegs)
{
break;
}
var_types regType = use.GetNode()->TypeGet();
// Account for the possibility that float fields may be passed in integer registers.
if (varTypeIsFloating(regType) && !genIsValidFloatReg(argSplit->GetRegNumByIdx(regIndex)))
{
regType = (regType == TYP_FLOAT) ? TYP_INT : TYP_LONG;
}
argSplit->m_regType[regIndex] = regType;
regIndex++;
}
// Clear the register assignment on the fieldList node, as these are contained.
arg->SetRegNum(REG_NA);
}
}
else
#endif // FEATURE_ARG_SPLIT
{
if (!isOnStack)
{
#if FEATURE_MULTIREG_ARGS
if ((info->numRegs > 1) && (arg->OperGet() == GT_FIELD_LIST))
{
unsigned int regIndex = 0;
for (GenTreeFieldList::Use& use : arg->AsFieldList()->Uses())
{
regNumber argReg = info->GetRegNum(regIndex);
GenTree* curOp = use.GetNode();
var_types curTyp = curOp->TypeGet();
// Create a new GT_PUTARG_REG node with op1
GenTree* newOper = comp->gtNewPutArgReg(curTyp, curOp, argReg);
// Splice in the new GT_PUTARG_REG node in the GT_FIELD_LIST
ReplaceArgWithPutArgOrBitcast(&use.NodeRef(), newOper);
regIndex++;
}
// Just return arg. The GT_FIELD_LIST is not replaced.
// Nothing more to do.
return arg;
}
else
#endif // FEATURE_MULTIREG_ARGS
{
putArg = comp->gtNewPutArgReg(type, arg, info->GetRegNum());
}
}
else
{
// Mark this one as tail call arg if it is a fast tail call.
// This provides the info to put this argument in in-coming arg area slot
// instead of in out-going arg area slot.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
// Make sure state is correct. The PUTARG_STK has TYP_VOID, as it doesn't produce
// a result. So the type of its operand must be the correct type to push on the stack.
// For a FIELD_LIST, this will be the type of the field (not the type of the arg),
// but otherwise it is generally the type of the operand.
info->checkIsStruct();
#endif
if ((arg->OperGet() != GT_FIELD_LIST))
{
#if defined(FEATURE_SIMD) && defined(FEATURE_PUT_STRUCT_ARG_STK)
if (type == TYP_SIMD12)
{
#if !defined(TARGET_64BIT)
assert(info->GetByteSize() == 12);
#else // TARGET_64BIT
if (compMacOsArm64Abi())
{
assert(info->GetByteSize() == 12);
}
else
{
assert(info->GetByteSize() == 16);
}
#endif // TARGET_64BIT
}
else
#endif // defined(FEATURE_SIMD) && defined(FEATURE_PUT_STRUCT_ARG_STK)
{
assert(genActualType(arg->TypeGet()) == type);
}
}
const unsigned slotNumber = info->GetByteOffset() / TARGET_POINTER_SIZE;
const bool putInIncomingArgArea = call->IsFastTailCall();
putArg = new (comp, GT_PUTARG_STK)
GenTreePutArgStk(GT_PUTARG_STK, TYP_VOID, arg, info->GetByteOffset(),
#if defined(DEBUG_ARG_SLOTS) && defined(FEATURE_PUT_STRUCT_ARG_STK)
info->GetStackByteSize(), slotNumber, info->GetStackSlotsNumber(),
#elif defined(DEBUG_ARG_SLOTS) && !defined(FEATURE_PUT_STRUCT_ARG_STK)
slotNumber,
#elif !defined(DEBUG_ARG_SLOTS) && defined(FEATURE_PUT_STRUCT_ARG_STK)
info->GetStackByteSize(),
#endif
call, putInIncomingArgArea);
#ifdef FEATURE_PUT_STRUCT_ARG_STK
// If the ArgTabEntry indicates that this arg is a struct
// get and store the number of slots that are references.
// This is later used in the codegen for PUT_ARG_STK implementation
// for struct to decide whether and how many single eight-byte copies
// to be done (only for reference slots), so gcinfo is emitted.
// For non-reference slots faster/smaller size instructions are used -
// pair copying using XMM registers or rep mov instructions.
if (info->isStruct)
{
// We use GT_OBJ only for non-lclVar, non-SIMD, non-FIELD_LIST struct arguments.
if (arg->OperIsLocal())
{
// This must have a type with a known size (SIMD or has been morphed to a primitive type).
assert(arg->TypeGet() != TYP_STRUCT);
}
else if (arg->OperIs(GT_OBJ))
{
assert(!varTypeIsSIMD(arg));
#ifdef TARGET_X86
// On x86 VM lies about the type of a struct containing a pointer sized
// integer field by returning the type of its field as the type of struct.
// Such struct can be passed in a register depending its position in
// parameter list. VM does this unwrapping only one level and therefore
// a type like Struct Foo { Struct Bar { int f}} awlays needs to be
// passed on stack. Also, VM doesn't lie about type of such a struct
// when it is a field of another struct. That is VM doesn't lie about
// the type of Foo.Bar
//
// We now support the promotion of fields that are of type struct.
// However we only support a limited case where the struct field has a
// single field and that single field must be a scalar type. Say Foo.Bar
// field is getting passed as a parameter to a call, Since it is a TYP_STRUCT,
// as per x86 ABI it should always be passed on stack. Therefore GenTree
// node under a PUTARG_STK could be GT_OBJ(GT_LCL_VAR_ADDR(v1)), where
// local v1 could be a promoted field standing for Foo.Bar. Note that
// the type of v1 will be the type of field of Foo.Bar.f when Foo is
// promoted. That is v1 will be a scalar type. In this case we need to
// pass v1 on stack instead of in a register.
//
// TODO-PERF: replace GT_OBJ(GT_LCL_VAR_ADDR(v1)) with v1 if v1 is
// a scalar type and the width of GT_OBJ matches the type size of v1.
// Note that this cannot be done till call node arguments are morphed
// because we should not lose the fact that the type of argument is
// a struct so that the arg gets correctly marked to be passed on stack.
GenTree* objOp1 = arg->gtGetOp1();
if (objOp1->OperGet() == GT_LCL_VAR_ADDR)
{
unsigned lclNum = objOp1->AsLclVarCommon()->GetLclNum();
if (comp->lvaTable[lclNum].lvType != TYP_STRUCT)
{
comp->lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::VMNeedsStackAddr));
}
}
#endif // TARGET_X86
}
else if (!arg->OperIs(GT_FIELD_LIST))
{
#ifdef TARGET_ARM
assert((info->GetStackSlotsNumber() == 1) ||
((arg->TypeGet() == TYP_DOUBLE) && (info->GetStackSlotsNumber() == 2)));
#else
assert(varTypeIsSIMD(arg) || (info->GetStackSlotsNumber() == 1));
#endif
}
}
#endif // FEATURE_PUT_STRUCT_ARG_STK
}
}
JITDUMP("new node is : ");
DISPNODE(putArg);
JITDUMP("\n");
if (arg->gtFlags & GTF_LATE_ARG)
{
putArg->gtFlags |= GTF_LATE_ARG;
}
return putArg;
}
//------------------------------------------------------------------------
// LowerArg: Lower one argument of a call. This entails splicing a "putarg" node between
// the argument evaluation and the call. This is the point at which the source is
// consumed and the value transitions from control of the register allocator to the calling
// convention.
//
// Arguments:
// call - The call node
// ppArg - Pointer to the call argument pointer. We might replace the call argument by
// changing *ppArg.
//
// Return Value:
// None.
//
void Lowering::LowerArg(GenTreeCall* call, GenTree** ppArg)
{
GenTree* arg = *ppArg;
JITDUMP("lowering arg : ");
DISPNODE(arg);
// No assignments should remain by Lowering.
assert(!arg->OperIs(GT_ASG));
assert(!arg->OperIsPutArgStk());
// Assignments/stores at this level are not really placing an argument.
// They are setting up temporary locals that will later be placed into
// outgoing regs or stack.
// Note that atomic ops may be stores and still produce a value.
if (!arg->IsValue())
{
assert((arg->OperIsStore() && !arg->IsValue()) || arg->IsArgPlaceHolderNode() || arg->IsNothingNode() ||
arg->OperIsCopyBlkOp());
return;
}
fgArgTabEntry* info = comp->gtArgEntryByNode(call, arg);
assert(info->GetNode() == arg);
var_types type = arg->TypeGet();
if (varTypeIsSmall(type))
{
// Normalize 'type', it represents the item that we will be storing in the Outgoing Args
type = TYP_INT;
}
#if defined(FEATURE_SIMD)
#if defined(TARGET_X86)
// Non-param TYP_SIMD12 local var nodes are massaged in Lower to TYP_SIMD16 to match their
// allocated size (see lvSize()). However, when passing the variables as arguments, and
// storing the variables to the outgoing argument area on the stack, we must use their
// actual TYP_SIMD12 type, so exactly 12 bytes is allocated and written.
if (type == TYP_SIMD16)
{
if ((arg->OperGet() == GT_LCL_VAR) || (arg->OperGet() == GT_STORE_LCL_VAR))
{
const LclVarDsc* varDsc = comp->lvaGetDesc(arg->AsLclVarCommon());
type = varDsc->lvType;
}
else if (arg->OperIs(GT_SIMD, GT_HWINTRINSIC))
{
GenTreeJitIntrinsic* jitIntrinsic = reinterpret_cast<GenTreeJitIntrinsic*>(arg);
// For HWIntrinsic, there are some intrinsics like ExtractVector128 which have
// a gtType of TYP_SIMD16 but a SimdSize of 32, so we need to include that in
// the assert below.
assert((jitIntrinsic->GetSimdSize() == 12) || (jitIntrinsic->GetSimdSize() == 16) ||
(jitIntrinsic->GetSimdSize() == 32));
if (jitIntrinsic->GetSimdSize() == 12)
{
type = TYP_SIMD12;
}
}
}
#elif defined(TARGET_AMD64)
// TYP_SIMD8 parameters that are passed as longs
if (type == TYP_SIMD8 && genIsValidIntReg(info->GetRegNum()))
{
GenTree* bitcast = comp->gtNewBitCastNode(TYP_LONG, arg);
BlockRange().InsertAfter(arg, bitcast);
*ppArg = arg = bitcast;
assert(info->GetNode() == arg);
type = TYP_LONG;
}
#endif // defined(TARGET_X86)
#endif // defined(FEATURE_SIMD)
// If we hit this we are probably double-lowering.
assert(!arg->OperIsPutArg());
#if !defined(TARGET_64BIT)
if (varTypeIsLong(type))
{
noway_assert(arg->OperIs(GT_LONG));
GenTreeFieldList* fieldList = new (comp, GT_FIELD_LIST) GenTreeFieldList();
fieldList->AddFieldLIR(comp, arg->AsOp()->gtGetOp1(), 0, TYP_INT);
fieldList->AddFieldLIR(comp, arg->AsOp()->gtGetOp2(), 4, TYP_INT);
GenTree* newArg = NewPutArg(call, fieldList, info, type);
if (info->GetRegNum() != REG_STK)
{
assert(info->numRegs == 2);
// In the register argument case, NewPutArg replaces the original field list args with new
// GT_PUTARG_REG nodes, inserts them in linear order and returns the field list. So the
// only thing left to do is to insert the field list itself in linear order.
assert(newArg == fieldList);
BlockRange().InsertBefore(arg, newArg);
}
else
{
// For longs, we will replace the GT_LONG with a GT_FIELD_LIST, and put that under a PUTARG_STK.
// Although the hi argument needs to be pushed first, that will be handled by the general case,
// in which the fields will be reversed.
assert(info->numSlots == 2);
newArg->SetRegNum(REG_STK);
BlockRange().InsertBefore(arg, fieldList, newArg);
}
*ppArg = newArg;
assert(info->GetNode() == newArg);
BlockRange().Remove(arg);
}
else
#endif // !defined(TARGET_64BIT)
{
#if defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64)
if (call->IsVarargs() || comp->opts.compUseSoftFP)
{
// For vararg call or on armel, reg args should be all integer.
// Insert copies as needed to move float value to integer register.
GenTree* newNode = LowerFloatArg(ppArg, info);
if (newNode != nullptr)
{
type = newNode->TypeGet();
}
}
#endif // TARGET_ARMARCH || TARGET_LOONGARCH64
GenTree* putArg = NewPutArg(call, arg, info, type);
// In the case of register passable struct (in one or two registers)
// the NewPutArg returns a new node (GT_PUTARG_REG or a GT_FIELD_LIST with two GT_PUTARG_REGs.)
// If an extra node is returned, splice it in the right place in the tree.
if (arg != putArg)
{
ReplaceArgWithPutArgOrBitcast(ppArg, putArg);
}
}
}
#if defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64)
//------------------------------------------------------------------------
// LowerFloatArg: Lower float call arguments on the arm/LoongArch64 platform.
//
// Arguments:
// arg - The arg node
// info - call argument info
//
// Return Value:
// Return nullptr, if no transformation was done;
// return arg if there was in place transformation;
// return a new tree if the root was changed.
//
// Notes:
// This must handle scalar float arguments as well as GT_FIELD_LISTs
// with floating point fields.
//
GenTree* Lowering::LowerFloatArg(GenTree** pArg, fgArgTabEntry* info)
{
GenTree* arg = *pArg;
if (info->GetRegNum() != REG_STK)
{
if (arg->OperIs(GT_FIELD_LIST))
{
// Transform fields that are passed as registers in place.
regNumber currRegNumber = info->GetRegNum();
unsigned regIndex = 0;
for (GenTreeFieldList::Use& use : arg->AsFieldList()->Uses())
{
if (regIndex >= info->numRegs)
{
break;
}
GenTree* node = use.GetNode();
if (varTypeIsFloating(node))
{
GenTree* intNode = LowerFloatArgReg(node, currRegNumber);
assert(intNode != nullptr);
ReplaceArgWithPutArgOrBitcast(&use.NodeRef(), intNode);
}
if (node->TypeGet() == TYP_DOUBLE)
{
currRegNumber = REG_NEXT(REG_NEXT(currRegNumber));
regIndex += 2;
}
else
{
currRegNumber = REG_NEXT(currRegNumber);
regIndex += 1;
}
}
// List fields were replaced in place.
return arg;
}
else if (varTypeIsFloating(arg))
{
GenTree* intNode = LowerFloatArgReg(arg, info->GetRegNum());
assert(intNode != nullptr);
ReplaceArgWithPutArgOrBitcast(pArg, intNode);
return *pArg;
}
}
return nullptr;
}
//------------------------------------------------------------------------
// LowerFloatArgReg: Lower the float call argument node that is passed via register.
//
// Arguments:
// arg - The arg node
// regNum - register number
//
// Return Value:
// Return new bitcast node, that moves float to int register.
//
GenTree* Lowering::LowerFloatArgReg(GenTree* arg, regNumber regNum)
{
var_types floatType = arg->TypeGet();
assert(varTypeIsFloating(floatType));
var_types intType = (floatType == TYP_DOUBLE) ? TYP_LONG : TYP_INT;
GenTree* intArg = comp->gtNewBitCastNode(intType, arg);
intArg->SetRegNum(regNum);
#ifdef TARGET_ARM
if (floatType == TYP_DOUBLE)
{
// A special case when we introduce TYP_LONG
// during lowering for arm32 softFP to pass double
// in int registers.
assert(comp->opts.compUseSoftFP);
regNumber nextReg = REG_NEXT(regNum);
intArg->AsMultiRegOp()->gtOtherReg = nextReg;
}
#endif
return intArg;
}
#endif
// do lowering steps for each arg of a call
void Lowering::LowerArgsForCall(GenTreeCall* call)
{
JITDUMP("objp:\n======\n");
if (call->gtCallThisArg != nullptr)
{
LowerArg(call, &call->gtCallThisArg->NodeRef());
}
JITDUMP("\nargs:\n======\n");
for (GenTreeCall::Use& use : call->Args())
{
LowerArg(call, &use.NodeRef());
}
JITDUMP("\nlate:\n======\n");
for (GenTreeCall::Use& use : call->LateArgs())
{
LowerArg(call, &use.NodeRef());
}
}
// helper that create a node representing a relocatable physical address computation
GenTree* Lowering::AddrGen(ssize_t addr)
{
// this should end up in codegen as : instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, reg, addr)
GenTree* result = comp->gtNewIconHandleNode(addr, GTF_ICON_FTN_ADDR);
return result;
}
// variant that takes a void*
GenTree* Lowering::AddrGen(void* addr)
{
return AddrGen((ssize_t)addr);
}
// do lowering steps for a call
// this includes:
// - adding the placement nodes (either stack or register variety) for arguments
// - lowering the expression that calculates the target address
// - adding nodes for other operations that occur after the call sequence starts and before
// control transfer occurs (profiling and tail call helpers, pinvoke incantations)
//
void Lowering::LowerCall(GenTree* node)
{
GenTreeCall* call = node->AsCall();
JITDUMP("lowering call (before):\n");
DISPTREERANGE(BlockRange(), call);
JITDUMP("\n");
call->ClearOtherRegs();
LowerArgsForCall(call);
// note that everything generated from this point might run AFTER the outgoing args are placed
GenTree* controlExpr = nullptr;
bool callWasExpandedEarly = false;
// for x86, this is where we record ESP for checking later to make sure stack is balanced
// Check for Delegate.Invoke(). If so, we inline it. We get the
// target-object and target-function from the delegate-object, and do
// an indirect call.
if (call->IsDelegateInvoke())
{
controlExpr = LowerDelegateInvoke(call);
}
else
{
// Virtual and interface calls
switch (call->gtFlags & GTF_CALL_VIRT_KIND_MASK)
{
case GTF_CALL_VIRT_STUB:
controlExpr = LowerVirtualStubCall(call);
break;
case GTF_CALL_VIRT_VTABLE:
assert(call->IsVirtualVtable());
if (!call->IsExpandedEarly())
{
assert(call->gtControlExpr == nullptr);
controlExpr = LowerVirtualVtableCall(call);
}
else
{
callWasExpandedEarly = true;
controlExpr = call->gtControlExpr;
}
break;
case GTF_CALL_NONVIRT:
if (call->IsUnmanaged())
{
controlExpr = LowerNonvirtPinvokeCall(call);
}
else if (call->gtCallType == CT_INDIRECT)
{
controlExpr = LowerIndirectNonvirtCall(call);
}
else
{
controlExpr = LowerDirectCall(call);
}
break;
default:
noway_assert(!"strange call type");
break;
}
}
// Indirect calls should always go through GenTreeCall::gtCallAddr and
// should never have a control expression as well.
assert((call->gtCallType != CT_INDIRECT) || (controlExpr == nullptr));
if (call->IsTailCallViaJitHelper())
{
// Either controlExpr or gtCallAddr must contain real call target.
if (controlExpr == nullptr)
{
assert(call->gtCallType == CT_INDIRECT);
assert(call->gtCallAddr != nullptr);
controlExpr = call->gtCallAddr;
}
controlExpr = LowerTailCallViaJitHelper(call, controlExpr);
}
// Check if we need to thread a newly created controlExpr into the LIR
//
if ((controlExpr != nullptr) && !callWasExpandedEarly)
{
LIR::Range controlExprRange = LIR::SeqTree(comp, controlExpr);
JITDUMP("results of lowering call:\n");
DISPRANGE(controlExprRange);
ContainCheckRange(controlExprRange);
BlockRange().InsertBefore(call, std::move(controlExprRange));
call->gtControlExpr = controlExpr;
}
if (comp->opts.IsCFGEnabled())
{
LowerCFGCall(call);
}
if (call->IsFastTailCall())
{
// Lower fast tail call can introduce new temps to set up args correctly for Callee.
// This involves patching LCL_VAR and LCL_VAR_ADDR nodes holding Caller stack args
// and replacing them with a new temp. Control expr also can contain nodes that need
// to be patched.
// Therefore lower fast tail call must be done after controlExpr is inserted into LIR.
// There is one side effect which is flipping the order of PME and control expression
// since LowerFastTailCall calls InsertPInvokeMethodEpilog.
LowerFastTailCall(call);
}
if (varTypeIsStruct(call))
{
LowerCallStruct(call);
}
ContainCheckCallOperands(call);
JITDUMP("lowering call (after):\n");
DISPTREERANGE(BlockRange(), call);
JITDUMP("\n");
}
// Inserts profiler hook, GT_PROF_HOOK for a tail call node.
//
// AMD64:
// We need to insert this after all nested calls, but before all the arguments to this call have been set up.
// To do this, we look for the first GT_PUTARG_STK or GT_PUTARG_REG, and insert the hook immediately before
// that. If there are no args, then it should be inserted before the call node.
//
// For example:
// * stmtExpr void (top level) (IL 0x000...0x010)
// arg0 SETUP | /--* argPlace ref REG NA $c5
// this in rcx | | /--* argPlace ref REG NA $c1
// | | | /--* call ref System.Globalization.CultureInfo.get_InvariantCulture $c2
// arg1 SETUP | | +--* st.lclVar ref V02 tmp1 REG NA $c2
// | | | /--* lclVar ref V02 tmp1 u : 2 (last use) REG NA $c2
// arg1 in rdx | | +--* putarg_reg ref REG NA
// | | | /--* lclVar ref V00 arg0 u : 2 (last use) REG NA $80
// this in rcx | | +--* putarg_reg ref REG NA
// | | /--* call nullcheck ref System.String.ToLower $c5
// | | { * stmtExpr void (embedded)(IL 0x000... ? ? ? )
// | | { \--* prof_hook void REG NA
// arg0 in rcx | +--* putarg_reg ref REG NA
// control expr | +--* const(h) long 0x7ffe8e910e98 ftn REG NA
// \--* call void System.Runtime.Remoting.Identity.RemoveAppNameOrAppGuidIfNecessary $VN.Void
//
// In this case, the GT_PUTARG_REG src is a nested call. We need to put the instructions after that call
// (as shown). We assume that of all the GT_PUTARG_*, only the first one can have a nested call.
//
// X86:
// Insert the profiler hook immediately before the call. The profiler hook will preserve
// all argument registers (ECX, EDX), but nothing else.
//
// Params:
// callNode - tail call node
// insertionPoint - if non-null, insert the profiler hook before this point.
// If null, insert the profiler hook before args are setup
// but after all arg side effects are computed.
//
void Lowering::InsertProfTailCallHook(GenTreeCall* call, GenTree* insertionPoint)
{
assert(call->IsTailCall());
assert(comp->compIsProfilerHookNeeded());
#if defined(TARGET_X86)
if (insertionPoint == nullptr)
{
insertionPoint = call;
}
#else // !defined(TARGET_X86)
if (insertionPoint == nullptr)
{
for (GenTreeCall::Use& use : call->Args())
{
assert(!use.GetNode()->OperIs(GT_PUTARG_REG)); // We don't expect to see these in gtCallArgs
if (use.GetNode()->OperIs(GT_PUTARG_STK))
{
// found it
insertionPoint = use.GetNode();
break;
}
}
if (insertionPoint == nullptr)
{
for (GenTreeCall::Use& use : call->LateArgs())
{
if (use.GetNode()->OperIs(GT_PUTARG_REG, GT_PUTARG_STK))
{
// found it
insertionPoint = use.GetNode();
break;
}
}
// If there are no args, insert before the call node
if (insertionPoint == nullptr)
{
insertionPoint = call;
}
}
}
#endif // !defined(TARGET_X86)
assert(insertionPoint != nullptr);
GenTree* profHookNode = new (comp, GT_PROF_HOOK) GenTree(GT_PROF_HOOK, TYP_VOID);
BlockRange().InsertBefore(insertionPoint, profHookNode);
}
//------------------------------------------------------------------------
// LowerFastTailCall: Lower a call node dispatched as a fast tailcall (epilog +
// jmp).
//
// Arguments:
// call - the call node that is being dispatched as a fast tailcall.
//
// Assumptions:
// call must be non-null.
//
// Notes:
// For fast tail calls it is necessary to set up stack args in the incoming
// arg stack space area. When args passed also come from this area we may
// run into problems because we may end up overwriting the stack slot before
// using it. For example, for foo(a, b) { return bar(b, a); }, if a and b
// are on incoming arg stack space in foo they need to be swapped in this
// area for the call to bar. This function detects this situation and
// introduces a temp when an outgoing argument would overwrite a later-used
// incoming argument.
//
// This function also handles inserting necessary profiler hooks and pinvoke
// method epilogs in case there are inlined pinvokes.
void Lowering::LowerFastTailCall(GenTreeCall* call)
{
#if FEATURE_FASTTAILCALL
// Tail call restrictions i.e. conditions under which tail prefix is ignored.
// Most of these checks are already done by importer or fgMorphTailCall().
// This serves as a double sanity check.
assert((comp->info.compFlags & CORINFO_FLG_SYNCH) == 0); // tail calls from synchronized methods
assert(!comp->opts.IsReversePInvoke()); // tail calls reverse pinvoke
assert(!call->IsUnmanaged()); // tail calls to unamanaged methods
assert(!comp->compLocallocUsed); // tail call from methods that also do localloc
#ifdef TARGET_AMD64
assert(!comp->getNeedsGSSecurityCookie()); // jit64 compat: tail calls from methods that need GS check
#endif // TARGET_AMD64
// We expect to see a call that meets the following conditions
assert(call->IsFastTailCall());
// VM cannot use return address hijacking when A() and B() tail call each
// other in mutual recursion. Therefore, this block is reachable through
// a GC-safe point or the whole method is marked as fully interruptible.
//
// TODO-Cleanup:
// optReachWithoutCall() depends on the fact that loop headers blocks
// will have a block number > fgLastBB. These loop headers gets added
// after dominator computation and get skipped by OptReachWithoutCall().
// The below condition cannot be asserted in lower because fgSimpleLowering()
// can add a new basic block for range check failure which becomes
// fgLastBB with block number > loop header block number.
// assert((comp->compCurBB->bbFlags & BBF_GC_SAFE_POINT) ||
// !comp->optReachWithoutCall(comp->fgFirstBB, comp->compCurBB) || comp->GetInterruptible());
// If PInvokes are in-lined, we have to remember to execute PInvoke method epilog anywhere that
// a method returns. This is a case of caller method has both PInvokes and tail calls.
if (comp->compMethodRequiresPInvokeFrame())
{
InsertPInvokeMethodEpilog(comp->compCurBB DEBUGARG(call));
}
// Args for tail call are setup in incoming arg area. The gc-ness of args of
// caller and callee (which being tail called) may not match. Therefore, everything
// from arg setup until the epilog need to be non-interuptible by GC. This is
// achieved by inserting GT_START_NONGC before the very first GT_PUTARG_STK node
// of call is setup. Note that once a stack arg is setup, it cannot have nested
// calls subsequently in execution order to setup other args, because the nested
// call could over-write the stack arg that is setup earlier.
ArrayStack<GenTree*> putargs(comp->getAllocator(CMK_ArrayStack));
for (GenTreeCall::Use& use : call->Args())
{
if (use.GetNode()->OperIs(GT_PUTARG_STK))
{
putargs.Push(use.GetNode());
}
}
for (GenTreeCall::Use& use : call->LateArgs())
{
if (use.GetNode()->OperIs(GT_PUTARG_STK))
{
putargs.Push(use.GetNode());
}
}
GenTree* startNonGCNode = nullptr;
if (!putargs.Empty())
{
// Get the earliest operand of the first PUTARG_STK node. We will make
// the requred copies of args before this node.
bool unused;
GenTree* insertionPoint = BlockRange().GetTreeRange(putargs.Bottom(), &unused).FirstNode();
// Insert GT_START_NONGC node before we evaluate the PUTARG_STK args.
// Note that if there are no args to be setup on stack, no need to
// insert GT_START_NONGC node.
startNonGCNode = new (comp, GT_START_NONGC) GenTree(GT_START_NONGC, TYP_VOID);
BlockRange().InsertBefore(insertionPoint, startNonGCNode);
// Gc-interruptability in the following case:
// foo(a, b, c, d, e) { bar(a, b, c, d, e); }
// bar(a, b, c, d, e) { foo(a, b, d, d, e); }
//
// Since the instruction group starting from the instruction that sets up first
// stack arg to the end of the tail call is marked as non-gc interruptible,
// this will form a non-interruptible tight loop causing gc-starvation. To fix
// this we insert GT_NO_OP as embedded stmt before GT_START_NONGC, if the method
// has a single basic block and is not a GC-safe point. The presence of a single
// nop outside non-gc interruptible region will prevent gc starvation.
if ((comp->fgBBcount == 1) && !(comp->compCurBB->bbFlags & BBF_GC_SAFE_POINT))
{
assert(comp->fgFirstBB == comp->compCurBB);
GenTree* noOp = new (comp, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
BlockRange().InsertBefore(startNonGCNode, noOp);
}
// Since this is a fast tailcall each PUTARG_STK will place the argument in the
// _incoming_ arg space area. This will effectively overwrite our already existing
// incoming args that live in that area. If we have later uses of those args, this
// is a problem. We introduce a defensive copy into a temp here of those args that
// potentially may cause problems.
for (int i = 0; i < putargs.Height(); i++)
{
GenTreePutArgStk* put = putargs.Bottom(i)->AsPutArgStk();
unsigned int overwrittenStart = put->getArgOffset();
unsigned int overwrittenEnd = overwrittenStart + put->GetStackByteSize();
int baseOff = -1; // Stack offset of first arg on stack
for (unsigned callerArgLclNum = 0; callerArgLclNum < comp->info.compArgsCount; callerArgLclNum++)
{
LclVarDsc* callerArgDsc = comp->lvaGetDesc(callerArgLclNum);
if (callerArgDsc->lvIsRegArg)
{
continue;
}
unsigned int argStart;
unsigned int argEnd;
#if defined(TARGET_AMD64)
if (TargetOS::IsWindows)
{
// On Windows x64, the argument position determines the stack slot uniquely, and even the
// register args take up space in the stack frame (shadow space).
argStart = callerArgLclNum * TARGET_POINTER_SIZE;
argEnd = argStart + static_cast<unsigned int>(callerArgDsc->lvArgStackSize());
}
else
#endif // TARGET_AMD64
{
assert(callerArgDsc->GetStackOffset() != BAD_STK_OFFS);
if (baseOff == -1)
{
baseOff = callerArgDsc->GetStackOffset();
}
// On all ABIs where we fast tail call the stack args should come in order.
assert(baseOff <= callerArgDsc->GetStackOffset());
// Compute offset of this stack argument relative to the first stack arg.
// This will be its offset into the incoming arg space area.
argStart = static_cast<unsigned int>(callerArgDsc->GetStackOffset() - baseOff);
argEnd = argStart + comp->lvaLclSize(callerArgLclNum);
}
// If ranges do not overlap then this PUTARG_STK will not mess up the arg.
if ((overwrittenEnd <= argStart) || (overwrittenStart >= argEnd))
{
continue;
}
// Codegen cannot handle a partially overlapping copy. For
// example, if we have
// bar(S16 stack, S32 stack2)
// foo(S32 stack, S32 stack2) { bar(..., stack) }
// then we may end up having to move 'stack' in foo 16 bytes
// ahead. It is possible that this PUTARG_STK is the only use,
// in which case we will need to introduce a temp, so look for
// uses starting from it. Note that we assume that in-place
// copies are OK.
GenTree* lookForUsesFrom = put->gtNext;
if (overwrittenStart != argStart)
{
lookForUsesFrom = insertionPoint;
}
RehomeArgForFastTailCall(callerArgLclNum, insertionPoint, lookForUsesFrom, call);
// The above call can introduce temps and invalidate the pointer.
callerArgDsc = comp->lvaGetDesc(callerArgLclNum);
// For promoted locals we have more work to do as its fields could also have been invalidated.
if (!callerArgDsc->lvPromoted)
{
continue;
}
unsigned int fieldsFirst = callerArgDsc->lvFieldLclStart;
unsigned int fieldsEnd = fieldsFirst + callerArgDsc->lvFieldCnt;
for (unsigned int j = fieldsFirst; j < fieldsEnd; j++)
{
RehomeArgForFastTailCall(j, insertionPoint, lookForUsesFrom, call);
}
}
}
}
// Insert GT_PROF_HOOK node to emit profiler tail call hook. This should be
// inserted before the args are setup but after the side effects of args are
// computed. That is, GT_PROF_HOOK node needs to be inserted before GT_START_NONGC
// node if one exists.
if (comp->compIsProfilerHookNeeded())
{
InsertProfTailCallHook(call, startNonGCNode);
}
#else // !FEATURE_FASTTAILCALL
// Platform does not implement fast tail call mechanism. This cannot be
// reached because we always choose to do a tailcall via helper on those
// platforms (or no tailcall at all).
unreached();
#endif
}
//
//------------------------------------------------------------------------
// RehomeArgForFastTailCall: Introduce temps for args that may be overwritten
// during fast tailcall sequence.
//
// Arguments:
// lclNum - the lcl num of the arg that will be overwritten.
// insertTempBefore - the node at which to copy the arg into a temp.
// lookForUsesStart - the node where to start scanning and replacing uses of
// the arg specified by lclNum.
// callNode - the call node that is being dispatched as a fast tailcall.
//
// Assumptions:
// all args must be non-null.
//
// Notes:
// This function scans for uses of the arg specified by lclNum starting
// from the lookForUsesStart node. If it finds any uses it introduces a temp
// for this argument and updates uses to use this instead. In the situation
// where it introduces a temp it can thus invalidate pointers to other
// locals.
//
void Lowering::RehomeArgForFastTailCall(unsigned int lclNum,
GenTree* insertTempBefore,
GenTree* lookForUsesStart,
GenTreeCall* callNode)
{
unsigned int tmpLclNum = BAD_VAR_NUM;
for (GenTree* treeNode = lookForUsesStart; treeNode != callNode; treeNode = treeNode->gtNext)
{
if (!treeNode->OperIsLocal() && !treeNode->OperIsLocalAddr())
{
continue;
}
GenTreeLclVarCommon* lcl = treeNode->AsLclVarCommon();
if (lcl->GetLclNum() != lclNum)
{
continue;
}
// Create tmp and use it in place of callerArgDsc
if (tmpLclNum == BAD_VAR_NUM)
{
tmpLclNum = comp->lvaGrabTemp(true DEBUGARG("Fast tail call lowering is creating a new local variable"));
LclVarDsc* callerArgDsc = comp->lvaGetDesc(lclNum);
var_types tmpTyp = genActualType(callerArgDsc->TypeGet());
comp->lvaTable[tmpLclNum].lvType = tmpTyp;
// TODO-CQ: I don't see why we should copy doNotEnreg.
comp->lvaTable[tmpLclNum].lvDoNotEnregister = callerArgDsc->lvDoNotEnregister;
#ifdef DEBUG
comp->lvaTable[tmpLclNum].SetDoNotEnregReason(callerArgDsc->GetDoNotEnregReason());
#endif // DEBUG
GenTree* value;
#ifdef TARGET_ARM
if (tmpTyp == TYP_LONG)
{
GenTree* loResult = comp->gtNewLclFldNode(lclNum, TYP_INT, 0);
GenTree* hiResult = comp->gtNewLclFldNode(lclNum, TYP_INT, 4);
value = new (comp, GT_LONG) GenTreeOp(GT_LONG, TYP_LONG, loResult, hiResult);
}
else
#endif // TARGET_ARM
{
value = comp->gtNewLclvNode(lclNum, tmpTyp);
}
if (tmpTyp == TYP_STRUCT)
{
comp->lvaSetStruct(tmpLclNum, comp->lvaGetStruct(lclNum), false);
}
GenTreeLclVar* storeLclVar = comp->gtNewStoreLclVar(tmpLclNum, value);
BlockRange().InsertBefore(insertTempBefore, LIR::SeqTree(comp, storeLclVar));
ContainCheckRange(value, storeLclVar);
LowerNode(storeLclVar);
}
lcl->SetLclNum(tmpLclNum);
}
}
//------------------------------------------------------------------------
// LowerTailCallViaJitHelper: lower a call via the tailcall JIT helper. Morph
// has already inserted tailcall helper special arguments. This function inserts
// actual data for some placeholders. This function is only used on x86.
//
// Lower
// tail.call(<function args>, int numberOfOldStackArgs, int dummyNumberOfNewStackArgs, int flags, void* dummyArg)
// as
// JIT_TailCall(<function args>, int numberOfOldStackArgsWords, int numberOfNewStackArgsWords, int flags, void*
// callTarget)
// Note that the special arguments are on the stack, whereas the function arguments follow the normal convention.
//
// Also inserts PInvoke method epilog if required.
//
// Arguments:
// call - The call node
// callTarget - The real call target. This is used to replace the dummyArg during lowering.
//
// Return Value:
// Returns control expression tree for making a call to helper Jit_TailCall.
//
GenTree* Lowering::LowerTailCallViaJitHelper(GenTreeCall* call, GenTree* callTarget)
{
// Tail call restrictions i.e. conditions under which tail prefix is ignored.
// Most of these checks are already done by importer or fgMorphTailCall().
// This serves as a double sanity check.
assert((comp->info.compFlags & CORINFO_FLG_SYNCH) == 0); // tail calls from synchronized methods
assert(!call->IsUnmanaged()); // tail calls to unamanaged methods
assert(!comp->compLocallocUsed); // tail call from methods that also do localloc
// We expect to see a call that meets the following conditions
assert(call->IsTailCallViaJitHelper());
assert(callTarget != nullptr);
// The TailCall helper call never returns to the caller and is not GC interruptible.
// Therefore the block containing the tail call should be a GC safe point to avoid
// GC starvation. It is legal for the block to be unmarked iff the entry block is a
// GC safe point, as the entry block trivially dominates every reachable block.
assert((comp->compCurBB->bbFlags & BBF_GC_SAFE_POINT) || (comp->fgFirstBB->bbFlags & BBF_GC_SAFE_POINT));
// If PInvokes are in-lined, we have to remember to execute PInvoke method epilog anywhere that
// a method returns. This is a case of caller method has both PInvokes and tail calls.
if (comp->compMethodRequiresPInvokeFrame())
{
InsertPInvokeMethodEpilog(comp->compCurBB DEBUGARG(call));
}
// Remove gtCallAddr from execution order if present.
if (call->gtCallType == CT_INDIRECT)
{
assert(call->gtCallAddr != nullptr);
bool isClosed;
LIR::ReadOnlyRange callAddrRange = BlockRange().GetTreeRange(call->gtCallAddr, &isClosed);
assert(isClosed);
BlockRange().Remove(std::move(callAddrRange));
}
// The callTarget tree needs to be sequenced.
LIR::Range callTargetRange = LIR::SeqTree(comp, callTarget);
// Verify the special args are what we expect, and replace the dummy args with real values.
// We need to figure out the size of the outgoing stack arguments, not including the special args.
// The number of 4-byte words is passed to the helper for the incoming and outgoing argument sizes.
// This number is exactly the next slot number in the call's argument info struct.
unsigned nNewStkArgsBytes = call->fgArgInfo->GetNextSlotByteOffset();
const int wordSize = 4;
unsigned nNewStkArgsWords = nNewStkArgsBytes / wordSize;
DEBUG_ARG_SLOTS_ASSERT(call->fgArgInfo->GetNextSlotNum() == nNewStkArgsWords);
assert(nNewStkArgsWords >= 4); // There must be at least the four special stack args.
nNewStkArgsWords -= 4;
unsigned numArgs = call->fgArgInfo->ArgCount();
fgArgTabEntry* argEntry;
// arg 0 == callTarget.
argEntry = comp->gtArgEntryByArgNum(call, numArgs - 1);
assert(argEntry != nullptr);
GenTree* arg0 = argEntry->GetNode()->AsPutArgStk()->gtGetOp1();
ContainCheckRange(callTargetRange);
BlockRange().InsertAfter(arg0, std::move(callTargetRange));
bool isClosed;
LIR::ReadOnlyRange secondArgRange = BlockRange().GetTreeRange(arg0, &isClosed);
assert(isClosed);
BlockRange().Remove(std::move(secondArgRange));
argEntry->GetNode()->AsPutArgStk()->gtOp1 = callTarget;
// arg 1 == flags
argEntry = comp->gtArgEntryByArgNum(call, numArgs - 2);
assert(argEntry != nullptr);
GenTree* arg1 = argEntry->GetNode()->AsPutArgStk()->gtGetOp1();
assert(arg1->gtOper == GT_CNS_INT);
ssize_t tailCallHelperFlags = 1 | // always restore EDI,ESI,EBX
(call->IsVirtualStub() ? 0x2 : 0x0); // Stub dispatch flag
arg1->AsIntCon()->gtIconVal = tailCallHelperFlags;
// arg 2 == numberOfNewStackArgsWords
argEntry = comp->gtArgEntryByArgNum(call, numArgs - 3);
assert(argEntry != nullptr);
GenTree* arg2 = argEntry->GetNode()->AsPutArgStk()->gtGetOp1();
assert(arg2->gtOper == GT_CNS_INT);
arg2->AsIntCon()->gtIconVal = nNewStkArgsWords;
#ifdef DEBUG
// arg 3 == numberOfOldStackArgsWords
argEntry = comp->gtArgEntryByArgNum(call, numArgs - 4);
assert(argEntry != nullptr);
GenTree* arg3 = argEntry->GetNode()->AsPutArgStk()->gtGetOp1();
assert(arg3->gtOper == GT_CNS_INT);
#endif // DEBUG
// Transform this call node into a call to Jit tail call helper.
call->gtCallType = CT_HELPER;
call->gtCallMethHnd = comp->eeFindHelper(CORINFO_HELP_TAILCALL);
call->gtFlags &= ~GTF_CALL_VIRT_KIND_MASK;
// Lower this as if it were a pure helper call.
call->gtCallMoreFlags &= ~(GTF_CALL_M_TAILCALL | GTF_CALL_M_TAILCALL_VIA_JIT_HELPER);
GenTree* result = LowerDirectCall(call);
// Now add back tail call flags for identifying this node as tail call dispatched via helper.
call->gtCallMoreFlags |= GTF_CALL_M_TAILCALL | GTF_CALL_M_TAILCALL_VIA_JIT_HELPER;
#ifdef PROFILING_SUPPORTED
// Insert profiler tail call hook if needed.
// Since we don't know the insertion point, pass null for second param.
if (comp->compIsProfilerHookNeeded())
{
InsertProfTailCallHook(call, nullptr);
}
#endif // PROFILING_SUPPORTED
return result;
}
//------------------------------------------------------------------------
// LowerCFGCall: Potentially lower a call to use control-flow guard. This
// expands indirect calls into either a validate+call sequence or to a dispatch
// helper taking the original target in a special register.
//
// Arguments:
// call - The call node
//
void Lowering::LowerCFGCall(GenTreeCall* call)
{
assert(!call->IsHelperCall(comp, CORINFO_HELP_DISPATCH_INDIRECT_CALL));
if (call->IsHelperCall(comp, CORINFO_HELP_VALIDATE_INDIRECT_CALL))
{
return;
}
GenTree* callTarget = call->gtCallType == CT_INDIRECT ? call->gtCallAddr : call->gtControlExpr;
if ((callTarget == nullptr) || callTarget->IsIntegralConst())
{
// This is a direct call, no CFG check is necessary.
return;
}
CFGCallKind cfgKind = call->GetCFGCallKind();
switch (cfgKind)
{
case CFGCallKind::ValidateAndCall:
{
// To safely apply CFG we need to generate a very specific pattern:
// in particular, it is a safety issue to allow the JIT to reload
// the call target from memory between calling
// CORINFO_HELP_VALIDATE_INDIRECT_CALL and the target. This is
// something that would easily occur in debug codegen if we
// produced high-level IR. Instead we will use a GT_PHYSREG node
// to get the target back from the register that contains the target.
//
// Additionally, the validator does not preserve all arg registers,
// so we have to move all GT_PUTARG_REG nodes that would otherwise
// be trashed ahead. The JIT also has an internal invariant that
// once GT_PUTARG nodes start to appear in LIR, the call is coming
// up. To avoid breaking this invariant we move _all_ GT_PUTARG
// nodes (in particular, GC info reporting relies on this).
//
// To sum up, we end up transforming
//
// ta... = <early args>
// tb... = <late args>
// tc = callTarget
// GT_CALL tc, ta..., tb...
//
// into
//
// ta... = <early args> (without GT_PUTARG_* nodes)
// tb = callTarget
// GT_CALL CORINFO_HELP_VALIDATE_INDIRECT_CALL, tb
// tc = GT_PHYSREG REG_VALIDATE_INDIRECT_CALL_ADDR (preserved by helper)
// td = <moved GT_PUTARG_* nodes>
// GT_CALL tb, ta..., td..
//
GenTree* regNode = PhysReg(REG_VALIDATE_INDIRECT_CALL_ADDR, TYP_I_IMPL);
LIR::Use useOfTar;
bool gotUse = BlockRange().TryGetUse(callTarget, &useOfTar);
assert(gotUse);
useOfTar.ReplaceWith(regNode);
GenTree* targetPlaceholder = comp->gtNewZeroConNode(callTarget->TypeGet());
// Add the call to the validator. Use a placeholder for the target while we
// morph, sequence and lower, to avoid redoing that for the actual target.
GenTreeCall::Use* args = comp->gtNewCallArgs(targetPlaceholder);
GenTreeCall* validate = comp->gtNewHelperCallNode(CORINFO_HELP_VALIDATE_INDIRECT_CALL, TYP_VOID, args);
comp->fgMorphTree(validate);
LIR::Range validateRange = LIR::SeqTree(comp, validate);
GenTree* validateFirst = validateRange.FirstNode();
GenTree* validateLast = validateRange.LastNode();
// Insert the validator with the call target before the late args.
BlockRange().InsertBefore(call, std::move(validateRange));
// Swap out the target
gotUse = BlockRange().TryGetUse(targetPlaceholder, &useOfTar);
assert(gotUse);
useOfTar.ReplaceWith(callTarget);
targetPlaceholder->SetUnusedValue();
LowerRange(validateFirst, validateLast);
// Insert the PHYSREG node that we must load right after validation.
BlockRange().InsertAfter(validate, regNode);
LowerNode(regNode);
// Finally move all GT_PUTARG_* nodes
for (GenTreeCall::Use& use : call->Args())
{
GenTree* node = use.GetNode();
if (!node->IsValue())
{
// Non-value nodes in early args are setup nodes for late args.
continue;
}
assert(node->OperIsPutArg() || node->OperIsFieldList());
MoveCFGCallArg(call, node);
}
for (GenTreeCall::Use& use : call->LateArgs())
{
GenTree* node = use.GetNode();
assert(node->OperIsPutArg() || node->OperIsFieldList());
MoveCFGCallArg(call, node);
}
break;
}
case CFGCallKind::Dispatch:
{
#ifdef REG_DISPATCH_INDIRECT_CALL_ADDR
// Now insert the call target as an extra argument.
//
// First append the early placeholder arg
GenTreeCall::Use** earlySlot = &call->gtCallArgs;
unsigned int index = call->gtCallThisArg != nullptr ? 1 : 0;
while (*earlySlot != nullptr)
{
earlySlot = &(*earlySlot)->NextRef();
index++;
}
assert(index == call->fgArgInfo->ArgCount());
GenTree* placeHolder = comp->gtNewArgPlaceHolderNode(callTarget->TypeGet(), NO_CLASS_HANDLE);
placeHolder->gtFlags |= GTF_LATE_ARG;
*earlySlot = comp->gtNewCallArgs(placeHolder);
// Append the late actual arg
GenTreeCall::Use** lateSlot = &call->gtCallLateArgs;
unsigned int lateIndex = 0;
while (*lateSlot != nullptr)
{
lateSlot = &(*lateSlot)->NextRef();
lateIndex++;
}
*lateSlot = comp->gtNewCallArgs(callTarget);
// Add an entry into the arg info
regNumber regNum = REG_DISPATCH_INDIRECT_CALL_ADDR;
unsigned numRegs = 1;
unsigned byteSize = TARGET_POINTER_SIZE;
unsigned byteAlignment = TARGET_POINTER_SIZE;
bool isStruct = false;
bool isFloatHfa = false;
bool isVararg = false;
fgArgTabEntry* entry =
call->fgArgInfo->AddRegArg(index, placeHolder, *earlySlot, regNum, numRegs, byteSize, byteAlignment,
isStruct, isFloatHfa,
isVararg UNIX_AMD64_ABI_ONLY_ARG(REG_STK) UNIX_AMD64_ABI_ONLY_ARG(0)
UNIX_AMD64_ABI_ONLY_ARG(0) UNIX_AMD64_ABI_ONLY_ARG(nullptr));
entry->lateUse = *lateSlot;
entry->SetLateArgInx(lateIndex);
// Lower the newly added args now that call is updated
LowerArg(call, &(*earlySlot)->NodeRef());
LowerArg(call, &(*lateSlot)->NodeRef());
// Finally update the call to be a helper call
call->gtCallType = CT_HELPER;
call->gtCallMethHnd = comp->eeFindHelper(CORINFO_HELP_DISPATCH_INDIRECT_CALL);
call->gtFlags &= ~GTF_CALL_VIRT_KIND_MASK;
#ifdef FEATURE_READYTORUN
call->gtEntryPoint.addr = nullptr;
call->gtEntryPoint.accessType = IAT_VALUE;
#endif
// Now relower the call target
call->gtControlExpr = LowerDirectCall(call);
if (call->gtControlExpr != nullptr)
{
LIR::Range dispatchControlExprRange = LIR::SeqTree(comp, call->gtControlExpr);
ContainCheckRange(dispatchControlExprRange);
BlockRange().InsertBefore(call, std::move(dispatchControlExprRange));
}
#else
assert(!"Unexpected CFGCallKind::Dispatch for platform without dispatcher");
#endif
break;
}
default:
unreached();
}
}
//------------------------------------------------------------------------
// IsInvariantInRange: Check if a node is invariant in the specified range. In
// other words, can 'node' be moved to right before 'endExclusive' without its
// computation changing values?
//
// Arguments:
// node - The node.
// endExclusive - The exclusive end of the range to check invariance for.
//
bool Lowering::IsInvariantInRange(GenTree* node, GenTree* endExclusive)
{
assert(node->Precedes(endExclusive));
if (node->IsInvariant())
{
return true;
}
if (!node->IsValue())
{
return false;
}
if (node->OperIsLocal())
{
GenTreeLclVarCommon* lcl = node->AsLclVarCommon();
LclVarDsc* desc = comp->lvaGetDesc(lcl);
if (desc->IsAddressExposed())
{
return false;
}
// Currently, non-address exposed locals have the property that their
// use occurs at the user, so no further interference check is
// necessary.
return true;
}
return false;
}
//------------------------------------------------------------------------
// MoveCFGCallArg: Given a call that will be CFG transformed using the
// validate+call scheme, and an argument GT_PUTARG_* or GT_FIELD_LIST node,
// move that node right before the call.
//
// Arguments:
// call - The call that is being CFG transformed
// node - The argument node
//
// Remarks:
// We can always move the GT_PUTARG_* node further ahead as the side-effects
// of these nodes are handled by LSRA. However, the operands of these nodes
// are not always safe to move further ahead; for invariant operands, we
// move them ahead as well to shorten the lifetime of these values.
//
void Lowering::MoveCFGCallArg(GenTreeCall* call, GenTree* node)
{
assert(node->OperIsPutArg() || node->OperIsFieldList());
if (node->OperIsFieldList())
{
JITDUMP("Node is a GT_FIELD_LIST; moving all operands\n");
for (GenTreeFieldList::Use& operand : node->AsFieldList()->Uses())
{
assert(operand.GetNode()->OperIsPutArg());
MoveCFGCallArg(call, operand.GetNode());
}
}
else
{
GenTree* operand = node->AsOp()->gtGetOp1();
JITDUMP("Checking if we can move operand of GT_PUTARG_* node:\n");
DISPTREE(operand);
if (((operand->gtFlags & GTF_ALL_EFFECT) == 0) && IsInvariantInRange(operand, call))
{
JITDUMP("...yes, moving to after validator call\n");
BlockRange().Remove(operand);
BlockRange().InsertBefore(call, operand);
}
else
{
JITDUMP("...no, operand has side effects or is not invariant\n");
}
}
JITDUMP("Moving\n");
DISPTREE(node);
JITDUMP("\n");
BlockRange().Remove(node);
BlockRange().InsertBefore(call, node);
}
#ifndef TARGET_64BIT
//------------------------------------------------------------------------
// Lowering::DecomposeLongCompare: Decomposes a TYP_LONG compare node.
//
// Arguments:
// cmp - the compare node
//
// Return Value:
// The next node to lower.
//
// Notes:
// This is done during lowering because DecomposeLongs handles only nodes
// that produce TYP_LONG values. Compare nodes may consume TYP_LONG values
// but produce TYP_INT values.
//
GenTree* Lowering::DecomposeLongCompare(GenTree* cmp)
{
assert(cmp->gtGetOp1()->TypeGet() == TYP_LONG);
GenTree* src1 = cmp->gtGetOp1();
GenTree* src2 = cmp->gtGetOp2();
assert(src1->OperIs(GT_LONG));
assert(src2->OperIs(GT_LONG));
GenTree* loSrc1 = src1->gtGetOp1();
GenTree* hiSrc1 = src1->gtGetOp2();
GenTree* loSrc2 = src2->gtGetOp1();
GenTree* hiSrc2 = src2->gtGetOp2();
BlockRange().Remove(src1);
BlockRange().Remove(src2);
genTreeOps condition = cmp->OperGet();
GenTree* loCmp;
GenTree* hiCmp;
if (cmp->OperIs(GT_EQ, GT_NE))
{
//
// Transform (x EQ|NE y) into (((x.lo XOR y.lo) OR (x.hi XOR y.hi)) EQ|NE 0). If y is 0 then this can
// be reduced to just ((x.lo OR x.hi) EQ|NE 0). The OR is expected to set the condition flags so we
// don't need to generate a redundant compare against 0, we only generate a SETCC|JCC instruction.
//
// XOR is used rather than SUB because it is commutative and thus allows swapping the operands when
// the first happens to be a constant. Usually only the second compare operand is a constant but it's
// still possible to have a constant on the left side. For example, when src1 is a uint->ulong cast
// then hiSrc1 would be 0.
//
if (loSrc1->OperIs(GT_CNS_INT))
{
std::swap(loSrc1, loSrc2);
}
if (loSrc2->IsIntegralConst(0))
{
BlockRange().Remove(loSrc2);
loCmp = loSrc1;
}
else
{
loCmp = comp->gtNewOperNode(GT_XOR, TYP_INT, loSrc1, loSrc2);
BlockRange().InsertBefore(cmp, loCmp);
ContainCheckBinary(loCmp->AsOp());
}
if (hiSrc1->OperIs(GT_CNS_INT))
{
std::swap(hiSrc1, hiSrc2);
}
if (hiSrc2->IsIntegralConst(0))
{
BlockRange().Remove(hiSrc2);
hiCmp = hiSrc1;
}
else
{
hiCmp = comp->gtNewOperNode(GT_XOR, TYP_INT, hiSrc1, hiSrc2);
BlockRange().InsertBefore(cmp, hiCmp);
ContainCheckBinary(hiCmp->AsOp());
}
hiCmp = comp->gtNewOperNode(GT_OR, TYP_INT, loCmp, hiCmp);
BlockRange().InsertBefore(cmp, hiCmp);
ContainCheckBinary(hiCmp->AsOp());
}
else
{
assert(cmp->OperIs(GT_LT, GT_LE, GT_GE, GT_GT));
//
// If the compare is signed then (x LT|GE y) can be transformed into ((x SUB y) LT|GE 0).
// If the compare is unsigned we can still use SUB but we need to check the Carry flag,
// not the actual result. In both cases we can simply check the appropiate condition flags
// and ignore the actual result:
// SUB_LO loSrc1, loSrc2
// SUB_HI hiSrc1, hiSrc2
// SETCC|JCC (signed|unsigned LT|GE)
// If loSrc2 happens to be 0 then the first SUB can be eliminated and the second one can
// be turned into a CMP because the first SUB would have set carry to 0. This effectively
// transforms a long compare against 0 into an int compare of the high part against 0.
//
// (x LE|GT y) can to be transformed into ((x SUB y) LE|GT 0) but checking that a long value
// is greater than 0 is not so easy. We need to turn this into a positive/negative check
// like the one we get for LT|GE compares, this can be achieved by swapping the compare:
// (x LE|GT y) becomes (y GE|LT x)
//
// Having to swap operands is problematic when the second operand is a constant. The constant
// moves to the first operand where it cannot be contained and thus needs a register. This can
// be avoided by changing the constant such that LE|GT becomes LT|GE:
// (x LE|GT 41) becomes (x LT|GE 42)
//
if (cmp->OperIs(GT_LE, GT_GT))
{
bool mustSwap = true;
if (loSrc2->OperIs(GT_CNS_INT) && hiSrc2->OperIs(GT_CNS_INT))
{
uint32_t loValue = static_cast<uint32_t>(loSrc2->AsIntCon()->IconValue());
uint32_t hiValue = static_cast<uint32_t>(hiSrc2->AsIntCon()->IconValue());
uint64_t value = static_cast<uint64_t>(loValue) | (static_cast<uint64_t>(hiValue) << 32);
uint64_t maxValue = cmp->IsUnsigned() ? UINT64_MAX : INT64_MAX;
if (value != maxValue)
{
value++;
loValue = value & UINT32_MAX;
hiValue = (value >> 32) & UINT32_MAX;
loSrc2->AsIntCon()->SetIconValue(loValue);
hiSrc2->AsIntCon()->SetIconValue(hiValue);
condition = cmp->OperIs(GT_LE) ? GT_LT : GT_GE;
mustSwap = false;
}
}
if (mustSwap)
{
std::swap(loSrc1, loSrc2);
std::swap(hiSrc1, hiSrc2);
condition = GenTree::SwapRelop(condition);
}
}
assert((condition == GT_LT) || (condition == GT_GE));
if (loSrc2->IsIntegralConst(0))
{
BlockRange().Remove(loSrc2);
// Very conservative dead code removal... but it helps.
if (loSrc1->OperIs(GT_CNS_INT, GT_LCL_VAR, GT_LCL_FLD))
{
BlockRange().Remove(loSrc1);
}
else
{
loSrc1->SetUnusedValue();
}
hiCmp = comp->gtNewOperNode(GT_CMP, TYP_VOID, hiSrc1, hiSrc2);
BlockRange().InsertBefore(cmp, hiCmp);
ContainCheckCompare(hiCmp->AsOp());
}
else
{
loCmp = comp->gtNewOperNode(GT_CMP, TYP_VOID, loSrc1, loSrc2);
hiCmp = comp->gtNewOperNode(GT_SUB_HI, TYP_INT, hiSrc1, hiSrc2);
BlockRange().InsertBefore(cmp, loCmp, hiCmp);
ContainCheckCompare(loCmp->AsOp());
ContainCheckBinary(hiCmp->AsOp());
//
// Try to move the first SUB_HI operands right in front of it, this allows using
// a single temporary register instead of 2 (one for CMP and one for SUB_HI). Do
// this only for locals as they won't change condition flags. Note that we could
// move constants (except 0 which generates XOR reg, reg) but it's extremely rare
// to have a constant as the first operand.
//
if (hiSrc1->OperIs(GT_LCL_VAR, GT_LCL_FLD) && IsInvariantInRange(hiSrc1, hiCmp))
{
BlockRange().Remove(hiSrc1);
BlockRange().InsertBefore(hiCmp, hiSrc1);
}
}
}
hiCmp->gtFlags |= GTF_SET_FLAGS;
if (hiCmp->IsValue())
{
hiCmp->SetUnusedValue();
}
LIR::Use cmpUse;
if (BlockRange().TryGetUse(cmp, &cmpUse) && cmpUse.User()->OperIs(GT_JTRUE))
{
BlockRange().Remove(cmp);
GenTree* jcc = cmpUse.User();
jcc->AsOp()->gtOp1 = nullptr;
jcc->ChangeOper(GT_JCC);
jcc->gtFlags |= GTF_USE_FLAGS;
jcc->AsCC()->gtCondition = GenCondition::FromIntegralRelop(condition, cmp->IsUnsigned());
}
else
{
cmp->AsOp()->gtOp1 = nullptr;
cmp->AsOp()->gtOp2 = nullptr;
cmp->ChangeOper(GT_SETCC);
cmp->gtFlags |= GTF_USE_FLAGS;
cmp->AsCC()->gtCondition = GenCondition::FromIntegralRelop(condition, cmp->IsUnsigned());
}
return cmp->gtNext;
}
#endif // !TARGET_64BIT
//------------------------------------------------------------------------
// Lowering::OptimizeConstCompare: Performs various "compare with const" optimizations.
//
// Arguments:
// cmp - the compare node
//
// Return Value:
// The original compare node if lowering should proceed as usual or the next node
// to lower if the compare node was changed in such a way that lowering is no
// longer needed.
//
// Notes:
// - Narrow operands to enable memory operand containment (XARCH specific).
// - Transform cmp(and(x, y), 0) into test(x, y) (XARCH/Arm64 specific but could
// be used for ARM as well if support for GT_TEST_EQ/GT_TEST_NE is added).
// - Transform TEST(x, LSH(1, y)) into BT(x, y) (XARCH specific)
// - Transform RELOP(OP, 0) into SETCC(OP) or JCC(OP) if OP can set the
// condition flags appropriately (XARCH/ARM64 specific but could be extended
// to ARM32 as well if ARM32 codegen supports GTF_SET_FLAGS).
//
GenTree* Lowering::OptimizeConstCompare(GenTree* cmp)
{
assert(cmp->gtGetOp2()->IsIntegralConst());
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
GenTree* op1 = cmp->gtGetOp1();
GenTreeIntCon* op2 = cmp->gtGetOp2()->AsIntCon();
ssize_t op2Value = op2->IconValue();
#ifdef TARGET_XARCH
var_types op1Type = op1->TypeGet();
if (IsContainableMemoryOp(op1) && varTypeIsSmall(op1Type) && FitsIn(op1Type, op2Value))
{
//
// If op1's type is small then try to narrow op2 so it has the same type as op1.
// Small types are usually used by memory loads and if both compare operands have
// the same type then the memory load can be contained. In certain situations
// (e.g "cmp ubyte, 200") we also get a smaller instruction encoding.
//
op2->gtType = op1Type;
}
else
#endif
if (op1->OperIs(GT_CAST) && !op1->gtOverflow())
{
GenTreeCast* cast = op1->AsCast();
var_types castToType = cast->CastToType();
GenTree* castOp = cast->gtGetOp1();
if (((castToType == TYP_BOOL) || (castToType == TYP_UBYTE)) && FitsIn<UINT8>(op2Value))
{
//
// Since we're going to remove the cast we need to be able to narrow the cast operand
// to the cast type. This can be done safely only for certain opers (e.g AND, OR, XOR).
// Some opers just can't be narrowed (e.g DIV, MUL) while other could be narrowed but
// doing so would produce incorrect results (e.g. RSZ, RSH).
//
// The below list of handled opers is conservative but enough to handle the most common
// situations. In particular this include CALL, sometimes the JIT unnecessarilly widens
// the result of bool returning calls.
//
bool removeCast =
#ifdef TARGET_ARM64
(op2Value == 0) && cmp->OperIs(GT_EQ, GT_NE, GT_GT) &&
#endif
(castOp->OperIs(GT_CALL, GT_LCL_VAR) || castOp->OperIs(GT_OR, GT_XOR, GT_AND)
#ifdef TARGET_XARCH
|| IsContainableMemoryOp(castOp)
#endif
);
if (removeCast)
{
assert(!castOp->gtOverflowEx()); // Must not be an overflow checking operation
#ifdef TARGET_ARM64
bool cmpEq = cmp->OperIs(GT_EQ);
cmp->SetOperRaw(cmpEq ? GT_TEST_EQ : GT_TEST_NE);
op2->SetIconValue(0xff);
op2->gtType = castOp->gtType;
#else
castOp->gtType = castToType;
op2->gtType = castToType;
#endif
// If we have any contained memory ops on castOp, they must now not be contained.
if (castOp->OperIs(GT_OR, GT_XOR, GT_AND))
{
GenTree* op1 = castOp->gtGetOp1();
if ((op1 != nullptr) && !op1->IsCnsIntOrI())
{
op1->ClearContained();
}
GenTree* op2 = castOp->gtGetOp2();
if ((op2 != nullptr) && !op2->IsCnsIntOrI())
{
op2->ClearContained();
}
}
cmp->AsOp()->gtOp1 = castOp;
BlockRange().Remove(cast);
}
}
}
else if (op1->OperIs(GT_AND) && cmp->OperIs(GT_EQ, GT_NE))
{
//
// Transform ((x AND y) EQ|NE 0) into (x TEST_EQ|TEST_NE y) when possible.
//
GenTree* andOp1 = op1->gtGetOp1();
GenTree* andOp2 = op1->gtGetOp2();
if (op2Value != 0)
{
// Optimizes (X & 1) == 1 to (X & 1)
// The compiler requires jumps to have relop operands, so we do not fold that case.
LIR::Use cmpUse;
if ((op2Value == 1) && cmp->OperIs(GT_EQ))
{
if (andOp2->IsIntegralConst(1) && (genActualType(op1) == cmp->TypeGet()) &&
BlockRange().TryGetUse(cmp, &cmpUse) && !cmpUse.User()->OperIs(GT_JTRUE))
{
GenTree* next = cmp->gtNext;
cmpUse.ReplaceWith(op1);
BlockRange().Remove(cmp->gtGetOp2());
BlockRange().Remove(cmp);
return next;
}
}
//
// If we don't have a 0 compare we can get one by transforming ((x AND mask) EQ|NE mask)
// into ((x AND mask) NE|EQ 0) when mask is a single bit.
//
if (isPow2<target_size_t>(static_cast<target_size_t>(op2Value)) && andOp2->IsIntegralConst(op2Value))
{
op2Value = 0;
op2->SetIconValue(0);
cmp->SetOperRaw(GenTree::ReverseRelop(cmp->OperGet()));
}
}
if (op2Value == 0)
{
BlockRange().Remove(op1);
BlockRange().Remove(op2);
cmp->SetOperRaw(cmp->OperIs(GT_EQ) ? GT_TEST_EQ : GT_TEST_NE);
cmp->AsOp()->gtOp1 = andOp1;
cmp->AsOp()->gtOp2 = andOp2;
// We will re-evaluate containment below
andOp1->ClearContained();
andOp2->ClearContained();
#ifdef TARGET_XARCH
if (IsContainableMemoryOp(andOp1) && andOp2->IsIntegralConst())
{
//
// For "test" we only care about the bits that are set in the second operand (mask).
// If the mask fits in a small type then we can narrow both operands to generate a "test"
// instruction with a smaller encoding ("test" does not have a r/m32, imm8 form) and avoid
// a widening load in some cases.
//
// For 16 bit operands we narrow only if the memory operand is already 16 bit. This matches
// the behavior of a previous implementation and avoids adding more cases where we generate
// 16 bit instructions that require a length changing prefix (0x66). These suffer from
// significant decoder stalls on Intel CPUs.
//
// We could also do this for 64 bit masks that fit into 32 bit but it doesn't help.
// In such cases morph narrows down the existing GT_AND by inserting a cast between it and
// the memory operand so we'd need to add more code to recognize and eliminate that cast.
//
size_t mask = static_cast<size_t>(andOp2->AsIntCon()->IconValue());
if (FitsIn<UINT8>(mask))
{
andOp1->gtType = TYP_UBYTE;
andOp2->gtType = TYP_UBYTE;
}
else if (FitsIn<UINT16>(mask) && genTypeSize(andOp1) == 2)
{
andOp1->gtType = TYP_USHORT;
andOp2->gtType = TYP_USHORT;
}
}
#endif
}
}
if (cmp->OperIs(GT_TEST_EQ, GT_TEST_NE))
{
#ifdef TARGET_XARCH
//
// Transform TEST_EQ|NE(x, LSH(1, y)) into BT(x, y) when possible. Using BT
// results in smaller and faster code. It also doesn't have special register
// requirements, unlike LSH that requires the shift count to be in ECX.
// Note that BT has the same behavior as LSH when the bit index exceeds the
// operand bit size - it uses (bit_index MOD bit_size).
//
GenTree* lsh = cmp->gtGetOp2();
LIR::Use cmpUse;
if (lsh->OperIs(GT_LSH) && varTypeIsIntOrI(lsh->TypeGet()) && lsh->gtGetOp1()->IsIntegralConst(1) &&
BlockRange().TryGetUse(cmp, &cmpUse))
{
GenCondition condition = cmp->OperIs(GT_TEST_NE) ? GenCondition::C : GenCondition::NC;
cmp->SetOper(GT_BT);
cmp->gtType = TYP_VOID;
cmp->gtFlags |= GTF_SET_FLAGS;
cmp->AsOp()->gtOp2 = lsh->gtGetOp2();
cmp->gtGetOp2()->ClearContained();
BlockRange().Remove(lsh->gtGetOp1());
BlockRange().Remove(lsh);
GenTreeCC* cc;
if (cmpUse.User()->OperIs(GT_JTRUE))
{
cmpUse.User()->ChangeOper(GT_JCC);
cc = cmpUse.User()->AsCC();
cc->gtCondition = condition;
}
else
{
cc = new (comp, GT_SETCC) GenTreeCC(GT_SETCC, condition, TYP_INT);
BlockRange().InsertAfter(cmp, cc);
cmpUse.ReplaceWith(cc);
}
cc->gtFlags |= GTF_USE_FLAGS;
return cmp->gtNext;
}
#endif // TARGET_XARCH
}
else if (cmp->OperIs(GT_EQ, GT_NE))
{
GenTree* op1 = cmp->gtGetOp1();
GenTree* op2 = cmp->gtGetOp2();
// TODO-CQ: right now the below peep is inexpensive and gets the benefit in most
// cases because in majority of cases op1, op2 and cmp would be in that order in
// execution. In general we should be able to check that all the nodes that come
// after op1 do not modify the flags so that it is safe to avoid generating a
// test instruction.
if (op2->IsIntegralConst(0) && (op1->gtNext == op2) && (op2->gtNext == cmp) &&
#ifdef TARGET_XARCH
(op1->OperIs(GT_AND, GT_OR, GT_XOR, GT_ADD, GT_SUB, GT_NEG)
#ifdef FEATURE_HW_INTRINSICS
|| (op1->OperIs(GT_HWINTRINSIC) &&
emitter::DoesWriteZeroFlag(HWIntrinsicInfo::lookupIns(op1->AsHWIntrinsic())))
#endif // FEATURE_HW_INTRINSICS
)
#else // TARGET_ARM64
op1->OperIs(GT_AND, GT_ADD, GT_SUB)
#endif
)
{
op1->gtFlags |= GTF_SET_FLAGS;
op1->SetUnusedValue();
BlockRange().Remove(op2);
GenTree* next = cmp->gtNext;
GenTree* cc;
genTreeOps ccOp;
LIR::Use cmpUse;
// Fast check for the common case - relop used by a JTRUE that immediately follows it.
if ((next != nullptr) && next->OperIs(GT_JTRUE) && (next->gtGetOp1() == cmp))
{
cc = next;
ccOp = GT_JCC;
next = nullptr;
BlockRange().Remove(cmp);
}
else if (BlockRange().TryGetUse(cmp, &cmpUse) && cmpUse.User()->OperIs(GT_JTRUE))
{
cc = cmpUse.User();
ccOp = GT_JCC;
next = nullptr;
BlockRange().Remove(cmp);
}
else // The relop is not used by a JTRUE or it is not used at all.
{
// Transform the relop node it into a SETCC. If it's not used we could remove
// it completely but that means doing more work to handle a rare case.
cc = cmp;
ccOp = GT_SETCC;
}
GenCondition condition = GenCondition::FromIntegralRelop(cmp);
cc->ChangeOper(ccOp);
cc->AsCC()->gtCondition = condition;
cc->gtFlags |= GTF_USE_FLAGS;
return next;
}
}
#endif // defined(TARGET_XARCH) || defined(TARGET_ARM64)
return cmp;
}
//------------------------------------------------------------------------
// Lowering::LowerCompare: Lowers a compare node.
//
// Arguments:
// cmp - the compare node
//
// Return Value:
// The next node to lower.
//
GenTree* Lowering::LowerCompare(GenTree* cmp)
{
#ifndef TARGET_64BIT
if (cmp->gtGetOp1()->TypeGet() == TYP_LONG)
{
return DecomposeLongCompare(cmp);
}
#endif
if (cmp->gtGetOp2()->IsIntegralConst() && !comp->opts.MinOpts())
{
GenTree* next = OptimizeConstCompare(cmp);
// If OptimizeConstCompare return the compare node as "next" then we need to continue lowering.
if (next != cmp)
{
return next;
}
}
#ifdef TARGET_XARCH
if (cmp->gtGetOp1()->TypeGet() == cmp->gtGetOp2()->TypeGet())
{
if (varTypeIsSmall(cmp->gtGetOp1()->TypeGet()) && varTypeIsUnsigned(cmp->gtGetOp1()->TypeGet()))
{
//
// If both operands have the same type then codegen will use the common operand type to
// determine the instruction type. For small types this would result in performing a
// signed comparison of two small unsigned values without zero extending them to TYP_INT
// which is incorrect. Note that making the comparison unsigned doesn't imply that codegen
// has to generate a small comparison, it can still correctly generate a TYP_INT comparison.
//
cmp->gtFlags |= GTF_UNSIGNED;
}
}
#endif // TARGET_XARCH
ContainCheckCompare(cmp->AsOp());
return cmp->gtNext;
}
//------------------------------------------------------------------------
// Lowering::LowerJTrue: Lowers a JTRUE node.
//
// Arguments:
// jtrue - the JTRUE node
//
// Return Value:
// The next node to lower (usually nullptr).
//
// Notes:
// On ARM64 this may remove the JTRUE node and transform its associated
// relop into a JCMP node.
//
GenTree* Lowering::LowerJTrue(GenTreeOp* jtrue)
{
#if defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)
GenTree* relop = jtrue->gtGetOp1();
GenTree* relopOp2 = relop->AsOp()->gtGetOp2();
if ((relop->gtNext == jtrue) && relopOp2->IsCnsIntOrI())
{
bool useJCMP = false;
GenTreeFlags flags = GTF_EMPTY;
#if defined(TARGET_LOONGARCH64)
if (relop->OperIs(GT_EQ, GT_NE))
{
// Codegen will use beq or bne.
flags = relop->OperIs(GT_EQ) ? GTF_JCMP_EQ : GTF_EMPTY;
useJCMP = true;
}
#else // TARGET_ARM64
if (relop->OperIs(GT_EQ, GT_NE) && relopOp2->IsIntegralConst(0))
{
// Codegen will use cbz or cbnz in codegen which do not affect the flag register
flags = relop->OperIs(GT_EQ) ? GTF_JCMP_EQ : GTF_EMPTY;
useJCMP = true;
}
else if (relop->OperIs(GT_TEST_EQ, GT_TEST_NE) && isPow2(relopOp2->AsIntCon()->IconValue()))
{
// Codegen will use tbz or tbnz in codegen which do not affect the flag register
flags = GTF_JCMP_TST | (relop->OperIs(GT_TEST_EQ) ? GTF_JCMP_EQ : GTF_EMPTY);
useJCMP = true;
}
#endif // TARGET_ARM64
if (useJCMP)
{
relop->SetOper(GT_JCMP);
relop->gtFlags &= ~(GTF_JCMP_TST | GTF_JCMP_EQ);
relop->gtFlags |= flags;
relop->gtType = TYP_VOID;
relopOp2->SetContained();
BlockRange().Remove(jtrue);
assert(relop->gtNext == nullptr);
return nullptr;
}
}
#endif // TARGET_ARM64 || TARGET_LOONGARCH64
ContainCheckJTrue(jtrue);
assert(jtrue->gtNext == nullptr);
return nullptr;
}
//----------------------------------------------------------------------------------------------
// LowerNodeCC: Lowers a node that produces a boolean value by setting the condition flags.
//
// Arguments:
// node - The node to lower
// condition - The condition code of the generated SETCC/JCC node
//
// Return Value:
// A SETCC/JCC node or nullptr if `node` is not used.
//
// Notes:
// This simply replaces `node`'s use with an appropiate SETCC/JCC node,
// `node` is not actually changed, except by having its GTF_SET_FLAGS set.
// It's the caller's responsibility to change `node` such that it only
// sets the condition flags, without producing a boolean value.
//
GenTreeCC* Lowering::LowerNodeCC(GenTree* node, GenCondition condition)
{
// Skip over a chain of EQ/NE(x, 0) relops. This may be present either
// because `node` is not a relop and so it cannot be used directly by a
// JTRUE, or because the frontend failed to remove a EQ/NE(x, 0) that's
// used as logical negation.
//
// Usually there's only one such relop but there's little difference
// between removing one or all so we may as well remove them all.
//
// We can't allow any other nodes between `node` and its user because we
// have no way of knowing if those nodes change flags or not. So we're looking
// to skip over a sequence of appropriately connected zero and EQ/NE nodes.
// The x in EQ/NE(x, 0)
GenTree* relop = node;
// The first node of the relop sequence
GenTree* first = node->gtNext;
// The node following the relop sequence
GenTree* next = first;
while ((next != nullptr) && next->IsIntegralConst(0) && (next->gtNext != nullptr) &&
next->gtNext->OperIs(GT_EQ, GT_NE) && (next->gtNext->AsOp()->gtGetOp1() == relop) &&
(next->gtNext->AsOp()->gtGetOp2() == next))
{
relop = next->gtNext;
next = relop->gtNext;
if (relop->OperIs(GT_EQ))
{
condition = GenCondition::Reverse(condition);
}
}
GenTreeCC* cc = nullptr;
// Next may be null if `node` is not used. In that case we don't need to generate a SETCC node.
if (next != nullptr)
{
if (next->OperIs(GT_JTRUE))
{
// If the instruction immediately following 'relop', i.e. 'next' is a conditional branch,
// it should always have 'relop' as its 'op1'. If it doesn't, then we have improperly
// constructed IL (the setting of a condition code should always immediately precede its
// use, since the JIT doesn't track dataflow for condition codes). Still, if it happens
// it's not our problem, it simply means that `node` is not used and can be removed.
if (next->AsUnOp()->gtGetOp1() == relop)
{
assert(relop->OperIsCompare());
next->ChangeOper(GT_JCC);
cc = next->AsCC();
cc->gtCondition = condition;
}
}
else
{
// If the node is used by something other than a JTRUE then we need to insert a
// SETCC node to materialize the boolean value.
LIR::Use use;
if (BlockRange().TryGetUse(relop, &use))
{
cc = new (comp, GT_SETCC) GenTreeCC(GT_SETCC, condition, TYP_INT);
BlockRange().InsertAfter(node, cc);
use.ReplaceWith(cc);
}
}
}
if (cc != nullptr)
{
node->gtFlags |= GTF_SET_FLAGS;
cc->gtFlags |= GTF_USE_FLAGS;
}
// Remove the chain of EQ/NE(x, 0) relop nodes, if any. Note that if a SETCC was
// inserted after `node`, `first` still points to the node that was initially
// after `node`.
if (relop != node)
{
BlockRange().Remove(first, relop);
}
return cc;
}
// Lower "jmp <method>" tail call to insert PInvoke method epilog if required.
void Lowering::LowerJmpMethod(GenTree* jmp)
{
assert(jmp->OperGet() == GT_JMP);
JITDUMP("lowering GT_JMP\n");
DISPNODE(jmp);
JITDUMP("============");
// If PInvokes are in-lined, we have to remember to execute PInvoke method epilog anywhere that
// a method returns.
if (comp->compMethodRequiresPInvokeFrame())
{
InsertPInvokeMethodEpilog(comp->compCurBB DEBUGARG(jmp));
}
}
// Lower GT_RETURN node to insert PInvoke method epilog if required.
void Lowering::LowerRet(GenTreeUnOp* ret)
{
assert(ret->OperGet() == GT_RETURN);
JITDUMP("lowering GT_RETURN\n");
DISPNODE(ret);
JITDUMP("============");
GenTree* retVal = ret->gtGetOp1();
// There are two kinds of retyping:
// - A simple bitcast can be inserted when:
// - We're returning a floating type as an integral type or vice-versa, or
// - If we're returning a struct as a primitive type, we change the type of
// 'retval' in 'LowerRetStructLclVar()'
bool needBitcast =
(ret->TypeGet() != TYP_VOID) && (varTypeUsesFloatReg(ret) != varTypeUsesFloatReg(ret->gtGetOp1()));
bool doPrimitiveBitcast = false;
if (needBitcast)
{
doPrimitiveBitcast = (!varTypeIsStruct(ret) && !varTypeIsStruct(retVal));
}
if (doPrimitiveBitcast)
{
// Add a simple bitcast when both types are not structs.
// If one type is a struct it will be handled below.
#if defined(DEBUG)
assert(!varTypeIsStruct(ret) && !varTypeIsStruct(retVal));
#endif
GenTree* bitcast = comp->gtNewBitCastNode(ret->TypeGet(), retVal);
ret->gtOp1 = bitcast;
BlockRange().InsertBefore(ret, bitcast);
ContainCheckBitCast(bitcast);
}
else if (ret->TypeGet() != TYP_VOID)
{
#if FEATURE_MULTIREG_RET
if (retVal->OperIs(GT_LCL_VAR) && varTypeIsStruct(retVal))
{
ReturnTypeDesc retTypeDesc;
LclVarDsc* varDsc = nullptr;
varDsc = comp->lvaGetDesc(retVal->AsLclVar());
retTypeDesc.InitializeStructReturnType(comp, varDsc->GetStructHnd(), comp->info.compCallConv);
if (retTypeDesc.GetReturnRegCount() > 1)
{
CheckMultiRegLclVar(retVal->AsLclVar(), &retTypeDesc);
}
}
#endif // FEATURE_MULTIREG_RET
#ifdef DEBUG
if (varTypeIsStruct(ret->TypeGet()) != varTypeIsStruct(retVal->TypeGet()))
{
if (varTypeIsStruct(ret->TypeGet()))
{
assert(comp->info.compRetNativeType != TYP_STRUCT);
var_types retActualType = genActualType(comp->info.compRetNativeType);
var_types retValActualType = genActualType(retVal->TypeGet());
bool constStructInit = retVal->IsConstInitVal();
bool implicitCastFromSameOrBiggerSize = (genTypeSize(retActualType) <= genTypeSize(retValActualType));
// This could happen if we have retyped op1 as a primitive type during struct promotion,
// check `retypedFieldsMap` for details.
bool actualTypesMatch = (retActualType == retValActualType);
assert(actualTypesMatch || constStructInit || implicitCastFromSameOrBiggerSize);
}
}
#endif // DEBUG
if (varTypeIsStruct(ret))
{
LowerRetStruct(ret);
}
else if (!ret->TypeIs(TYP_VOID) && varTypeIsStruct(retVal))
{
// Return struct as a primitive using Unsafe cast.
assert(retVal->OperIs(GT_LCL_VAR));
LowerRetSingleRegStructLclVar(ret);
}
}
// Method doing PInvokes has exactly one return block unless it has tail calls.
if (comp->compMethodRequiresPInvokeFrame() && (comp->compCurBB == comp->genReturnBB))
{
InsertPInvokeMethodEpilog(comp->compCurBB DEBUGARG(ret));
}
ContainCheckRet(ret);
}
//----------------------------------------------------------------------------------------------
// LowerStoreLocCommon: platform idependent part of local var or field store lowering.
//
// Arguments:
// lclStore - The store lcl node to lower.
//
void Lowering::LowerStoreLocCommon(GenTreeLclVarCommon* lclStore)
{
assert(lclStore->OperIs(GT_STORE_LCL_FLD, GT_STORE_LCL_VAR));
JITDUMP("lowering store lcl var/field (before):\n");
DISPTREERANGE(BlockRange(), lclStore);
JITDUMP("\n");
GenTree* src = lclStore->gtGetOp1();
LclVarDsc* varDsc = comp->lvaGetDesc(lclStore);
const bool srcIsMultiReg = src->IsMultiRegNode();
const bool dstIsMultiReg = lclStore->IsMultiRegLclVar();
if (!dstIsMultiReg && varTypeIsStruct(varDsc))
{
// TODO-Cleanup: we want to check `varDsc->lvRegStruct` as the last condition instead of `!varDsc->lvPromoted`,
// but we do not set it for `CSE` vars so it is currently failing.
assert(varDsc->CanBeReplacedWithItsField(comp) || varDsc->lvDoNotEnregister || !varDsc->lvPromoted);
if (varDsc->CanBeReplacedWithItsField(comp))
{
assert(varDsc->lvFieldCnt == 1);
unsigned fldNum = varDsc->lvFieldLclStart;
LclVarDsc* fldDsc = comp->lvaGetDesc(fldNum);
JITDUMP("Replacing an independently promoted local var V%02u with its only field V%02u for the store "
"from a call [%06u]\n",
lclStore->GetLclNum(), fldNum, comp->dspTreeID(lclStore));
lclStore->SetLclNum(fldNum);
lclStore->ChangeType(fldDsc->TypeGet());
varDsc = fldDsc;
}
}
if (srcIsMultiReg || dstIsMultiReg)
{
const ReturnTypeDesc* retTypeDesc = nullptr;
if (src->OperIs(GT_CALL))
{
retTypeDesc = src->AsCall()->GetReturnTypeDesc();
}
CheckMultiRegLclVar(lclStore->AsLclVar(), retTypeDesc);
}
const var_types lclRegType = varDsc->GetRegisterType(lclStore);
if ((lclStore->TypeGet() == TYP_STRUCT) && !srcIsMultiReg)
{
bool convertToStoreObj;
if (src->OperGet() == GT_CALL)
{
GenTreeCall* call = src->AsCall();
const ClassLayout* layout = varDsc->GetLayout();
#ifdef DEBUG
const unsigned slotCount = layout->GetSlotCount();
#if defined(TARGET_XARCH) && !defined(UNIX_AMD64_ABI)
// Windows x64 doesn't have multireg returns,
// x86 uses it only for long return type, not for structs.
assert(slotCount == 1);
assert(lclRegType != TYP_UNDEF);
#else // !TARGET_XARCH || UNIX_AMD64_ABI
if (!varDsc->lvIsHfa())
{
if (slotCount > 1)
{
assert(call->HasMultiRegRetVal());
}
else
{
unsigned size = layout->GetSize();
assert((size <= 8) || (size == 16));
bool isPowerOf2 = (((size - 1) & size) == 0);
bool isTypeDefined = (lclRegType != TYP_UNDEF);
assert(isPowerOf2 == isTypeDefined);
}
}
#endif // !TARGET_XARCH || UNIX_AMD64_ABI
#endif // DEBUG
#if !defined(WINDOWS_AMD64_ABI)
if (!call->HasMultiRegRetVal() && (lclRegType == TYP_UNDEF))
{
// If we have a single return register,
// but we can't retype it as a primitive type, we must spill it.
GenTreeLclVar* spilledCall = SpillStructCallResult(call);
lclStore->gtOp1 = spilledCall;
src = lclStore->gtOp1;
JITDUMP("lowering store lcl var/field has to spill call src.\n");
LowerStoreLocCommon(lclStore);
return;
}
#endif // !WINDOWS_AMD64_ABI
convertToStoreObj = false;
}
else if (!varDsc->IsEnregisterableType())
{
convertToStoreObj = true;
}
else if (src->OperIs(GT_CNS_INT))
{
assert(src->IsIntegralConst(0) && "expected an INIT_VAL for non-zero init.");
#ifdef FEATURE_SIMD
if (varTypeIsSIMD(lclRegType))
{
CorInfoType simdBaseJitType = comp->getBaseJitTypeOfSIMDLocal(lclStore);
if (simdBaseJitType == CORINFO_TYPE_UNDEF)
{
// Lie about the type if we don't know/have it.
simdBaseJitType = CORINFO_TYPE_FLOAT;
}
GenTreeSIMD* simdTree =
comp->gtNewSIMDNode(lclRegType, src, SIMDIntrinsicInit, simdBaseJitType, varDsc->lvExactSize);
BlockRange().InsertAfter(src, simdTree);
LowerSIMD(simdTree);
src = simdTree;
lclStore->gtOp1 = src;
convertToStoreObj = false;
}
else
#endif // FEATURE_SIMD
{
convertToStoreObj = false;
}
}
else if (!src->OperIs(GT_LCL_VAR))
{
convertToStoreObj = true;
}
else
{
assert(src->OperIs(GT_LCL_VAR));
convertToStoreObj = false;
}
if (convertToStoreObj)
{
const unsigned lclNum = lclStore->GetLclNum();
GenTreeLclVar* addr = comp->gtNewLclVarAddrNode(lclNum, TYP_BYREF);
comp->lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::BlockOp));
addr->gtFlags |= GTF_VAR_DEF;
assert(!addr->IsPartialLclFld(comp));
addr->gtFlags |= GTF_DONT_CSE;
// Create the assignment node.
lclStore->ChangeOper(GT_STORE_OBJ);
GenTreeBlk* objStore = lclStore->AsObj();
// Only the GTF_LATE_ARG flag (if present) is preserved.
objStore->gtFlags &= GTF_LATE_ARG;
objStore->gtFlags |= GTF_ASG | GTF_IND_NONFAULTING | GTF_IND_TGT_NOT_HEAP;
#ifndef JIT32_GCENCODER
objStore->gtBlkOpGcUnsafe = false;
#endif
objStore->gtBlkOpKind = GenTreeObj::BlkOpKindInvalid;
objStore->SetLayout(varDsc->GetLayout());
objStore->SetAddr(addr);
objStore->SetData(src);
BlockRange().InsertBefore(objStore, addr);
LowerBlockStoreCommon(objStore);
return;
}
}
// src and dst can be in registers, check if we need a bitcast.
if (!src->TypeIs(TYP_STRUCT) && (varTypeUsesFloatReg(lclRegType) != varTypeUsesFloatReg(src)))
{
assert(!srcIsMultiReg && !dstIsMultiReg);
assert(lclStore->OperIsLocalStore());
assert(lclRegType != TYP_UNDEF);
GenTree* bitcast = comp->gtNewBitCastNode(lclRegType, src);
lclStore->gtOp1 = bitcast;
src = lclStore->gtGetOp1();
BlockRange().InsertBefore(lclStore, bitcast);
ContainCheckBitCast(bitcast);
}
LowerStoreLoc(lclStore);
JITDUMP("lowering store lcl var/field (after):\n");
DISPTREERANGE(BlockRange(), lclStore);
JITDUMP("\n");
}
//----------------------------------------------------------------------------------------------
// LowerRetStructLclVar: Lowers a struct return node.
//
// Arguments:
// node - The return node to lower.
//
void Lowering::LowerRetStruct(GenTreeUnOp* ret)
{
#ifdef TARGET_ARM64
if (GlobalJitOptions::compFeatureHfa)
{
if (varTypeIsSIMD(ret))
{
if (comp->info.compRetNativeType == TYP_STRUCT)
{
assert(varTypeIsSIMD(ret->gtGetOp1()));
assert(comp->compMethodReturnsMultiRegRegTypeAlternate());
ret->ChangeType(comp->info.compRetNativeType);
}
else
{
assert(comp->info.compRetNativeType == ret->TypeGet());
GenTree* retVal = ret->gtGetOp1();
if (retVal->TypeGet() != ret->TypeGet())
{
assert(retVal->OperIs(GT_LCL_VAR));
LowerRetSingleRegStructLclVar(ret);
}
return;
}
}
}
#endif // TARGET_ARM64
if (comp->compMethodReturnsMultiRegRegTypeAlternate())
{
return;
}
assert(ret->OperIs(GT_RETURN));
assert(varTypeIsStruct(ret));
GenTree* retVal = ret->gtGetOp1();
// Note: small types are returned as INT.
var_types nativeReturnType = genActualType(comp->info.compRetNativeType);
ret->ChangeType(nativeReturnType);
switch (retVal->OperGet())
{
case GT_CALL:
assert(retVal->TypeIs(nativeReturnType)); // Type should be changed during call processing.
break;
case GT_CNS_INT:
// When we promote LCL_VAR single fields into return
// we could have all type of constans here.
if (varTypeUsesFloatReg(nativeReturnType))
{
// Do not expect `initblock` for SIMD* types,
// only 'initobj'.
assert(retVal->AsIntCon()->IconValue() == 0);
retVal->BashToConst(0.0, TYP_FLOAT);
}
break;
case GT_OBJ:
retVal->ChangeOper(GT_IND);
FALLTHROUGH;
case GT_IND:
retVal->ChangeType(nativeReturnType);
LowerIndir(retVal->AsIndir());
break;
case GT_LCL_VAR:
LowerRetSingleRegStructLclVar(ret);
break;
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
#ifdef FEATURE_SIMD
case GT_SIMD:
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
#endif // FEATURE_HW_INTRINSICS
{
assert(!retVal->TypeIs(TYP_STRUCT));
if (varTypeUsesFloatReg(ret) != varTypeUsesFloatReg(retVal))
{
GenTree* bitcast = comp->gtNewBitCastNode(ret->TypeGet(), retVal);
ret->gtOp1 = bitcast;
BlockRange().InsertBefore(ret, bitcast);
ContainCheckBitCast(bitcast);
}
}
break;
#endif // FEATURE_SIMD || FEATURE_HW_INTRINSICS
case GT_LCL_FLD:
{
#ifdef DEBUG
LclVarDsc* varDsc = comp->lvaGetDesc(retVal->AsLclFld());
assert(varDsc->lvDoNotEnregister);
#endif
retVal->ChangeType(nativeReturnType);
}
break;
default:
assert(varTypeIsEnregisterable(retVal));
if (varTypeUsesFloatReg(ret) != varTypeUsesFloatReg(retVal))
{
GenTree* bitcast = comp->gtNewBitCastNode(ret->TypeGet(), retVal);
ret->gtOp1 = bitcast;
BlockRange().InsertBefore(ret, bitcast);
ContainCheckBitCast(bitcast);
}
break;
}
}
//----------------------------------------------------------------------------------------------
// LowerRetSingleRegStructLclVar: Lowers a return node with a struct lclVar as a source.
//
// Arguments:
// node - The return node to lower.
//
// Notes:
// - the function is only for LclVars that are returned in one register;
// - if LclVar is allocated in memory then read it as return type;
// - if LclVar can be enregistered read it as register type and add a bitcast if necessary;
//
void Lowering::LowerRetSingleRegStructLclVar(GenTreeUnOp* ret)
{
assert(!comp->compMethodReturnsMultiRegRegTypeAlternate());
assert(ret->OperIs(GT_RETURN));
GenTreeLclVarCommon* lclVar = ret->gtGetOp1()->AsLclVar();
assert(lclVar->OperIs(GT_LCL_VAR));
unsigned lclNum = lclVar->GetLclNum();
LclVarDsc* varDsc = comp->lvaGetDesc(lclNum);
if (varDsc->lvPromoted)
{
// TODO-1stClassStructs: We can no longer independently promote
// or enregister this struct, since it is referenced as a whole.
comp->lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::BlockOpRet));
}
if (varDsc->lvDoNotEnregister)
{
lclVar->ChangeOper(GT_LCL_FLD);
lclVar->AsLclFld()->SetLclOffs(0);
// We are returning as a primitive type and the lcl is of struct type.
assert(comp->info.compRetNativeType != TYP_STRUCT);
assert((genTypeSize(comp->info.compRetNativeType) == genTypeSize(ret)) ||
(varTypeIsIntegral(ret) && varTypeIsIntegral(comp->info.compRetNativeType) &&
(genTypeSize(comp->info.compRetNativeType) <= genTypeSize(ret))));
// If the actual return type requires normalization, then make sure we
// do so by using the correct small type for the GT_LCL_FLD. It would
// be conservative to check just compRetNativeType for this since small
// structs are normalized to primitive types when they are returned in
// registers, so we would normalize for them as well.
if (varTypeIsSmall(comp->info.compRetType))
{
assert(genTypeSize(comp->info.compRetNativeType) == genTypeSize(comp->info.compRetType));
lclVar->ChangeType(comp->info.compRetType);
}
else
{
// Otherwise we don't mind that we leave the upper bits undefined.
lclVar->ChangeType(ret->TypeGet());
}
}
else
{
const var_types lclVarType = varDsc->GetRegisterType(lclVar);
assert(lclVarType != TYP_UNDEF);
const var_types actualType = genActualType(lclVarType);
lclVar->ChangeType(actualType);
if (varTypeUsesFloatReg(ret) != varTypeUsesFloatReg(lclVarType))
{
GenTree* bitcast = comp->gtNewBitCastNode(ret->TypeGet(), ret->gtOp1);
ret->gtOp1 = bitcast;
BlockRange().InsertBefore(ret, bitcast);
ContainCheckBitCast(bitcast);
}
}
}
//----------------------------------------------------------------------------------------------
// LowerCallStruct: Lowers a call node that returns a stuct.
//
// Arguments:
// call - The call node to lower.
//
// Notes:
// - this handles only single-register returns;
// - it transforms the call's user for `GT_STOREIND`.
//
void Lowering::LowerCallStruct(GenTreeCall* call)
{
assert(varTypeIsStruct(call));
if (call->HasMultiRegRetVal())
{
return;
}
if (GlobalJitOptions::compFeatureHfa)
{
if (comp->IsHfa(call))
{
#if defined(TARGET_ARM64)
assert(comp->GetHfaCount(call) == 1);
#elif defined(TARGET_ARM)
// ARM returns double in 2 float registers, but
// `call->HasMultiRegRetVal()` count double registers.
assert(comp->GetHfaCount(call) <= 2);
#else // !TARGET_ARM64 && !TARGET_ARM
NYI("Unknown architecture");
#endif // !TARGET_ARM64 && !TARGET_ARM
var_types hfaType = comp->GetHfaType(call);
if (call->TypeIs(hfaType))
{
return;
}
}
}
CORINFO_CLASS_HANDLE retClsHnd = call->gtRetClsHnd;
Compiler::structPassingKind howToReturnStruct;
var_types returnType = comp->getReturnTypeForStruct(retClsHnd, call->GetUnmanagedCallConv(), &howToReturnStruct);
assert(returnType != TYP_STRUCT && returnType != TYP_UNKNOWN);
var_types origType = call->TypeGet();
call->gtType = genActualType(returnType);
LIR::Use callUse;
if (BlockRange().TryGetUse(call, &callUse))
{
GenTree* user = callUse.User();
switch (user->OperGet())
{
case GT_RETURN:
case GT_STORE_LCL_VAR:
case GT_STORE_BLK:
case GT_STORE_OBJ:
// Leave as is, the user will handle it.
assert(user->TypeIs(origType) || varTypeIsSIMD(user->TypeGet()));
break;
#ifdef FEATURE_SIMD
case GT_STORE_LCL_FLD:
// If the call type was ever updated (in importer) to TYP_SIMD*, it should match the user type.
// If not, the user type should match the struct's returnType.
assert((varTypeIsSIMD(user) && user->TypeIs(origType)) || (returnType == user->TypeGet()));
break;
#endif // FEATURE_SIMD
case GT_STOREIND:
#ifdef FEATURE_SIMD
if (varTypeIsSIMD(user))
{
user->ChangeType(returnType);
break;
}
#endif // FEATURE_SIMD
// importer has a separate mechanism to retype calls to helpers,
// keep it for now.
assert(user->TypeIs(TYP_REF) || (user->TypeIs(TYP_I_IMPL) && comp->IsTargetAbi(CORINFO_CORERT_ABI)));
assert(call->IsHelperCall());
assert(returnType == user->TypeGet());
break;
default:
unreached();
}
}
}
//----------------------------------------------------------------------------------------------
// LowerStoreSingleRegCallStruct: Lowers a store block where the source is a struct typed call.
//
// Arguments:
// store - The store node to lower.
//
// Notes:
// - the function is only for calls that return one register;
// - it spills the call's result if it can be retyped as a primitive type;
//
void Lowering::LowerStoreSingleRegCallStruct(GenTreeBlk* store)
{
assert(store->Data()->IsCall());
GenTreeCall* call = store->Data()->AsCall();
assert(!call->HasMultiRegRetVal());
const ClassLayout* layout = store->GetLayout();
var_types regType = layout->GetRegisterType();
if (regType != TYP_UNDEF)
{
#if defined(TARGET_LOONGARCH64)
if (varTypeIsFloating(call->TypeGet()))
{
regType = call->TypeGet();
}
#endif
store->ChangeType(regType);
store->SetOper(GT_STOREIND);
LowerStoreIndirCommon(store->AsStoreInd());
return;
}
else
{
#if defined(WINDOWS_AMD64_ABI)
// All ABI except Windows x64 supports passing 3 byte structs in registers.
// Other 64 bites ABI-s support passing 5, 6, 7 byte structs.
unreached();
#else // !WINDOWS_AMD64_ABI
if (store->OperIs(GT_STORE_OBJ))
{
store->SetOper(GT_STORE_BLK);
}
store->gtBlkOpKind = GenTreeObj::BlkOpKindUnroll;
GenTreeLclVar* spilledCall = SpillStructCallResult(call);
store->SetData(spilledCall);
LowerBlockStoreCommon(store);
#endif // WINDOWS_AMD64_ABI
}
}
#if !defined(WINDOWS_AMD64_ABI)
//----------------------------------------------------------------------------------------------
// SpillStructCallResult: Spill call result to memory.
//
// Arguments:
// call - call with 3, 5, 6 or 7 return size that has to be spilled to memory.
//
// Return Value:
// load of the spilled variable.
//
GenTreeLclVar* Lowering::SpillStructCallResult(GenTreeCall* call) const
{
// TODO-1stClassStructs: we can support this in codegen for `GT_STORE_BLK` without new temps.
const unsigned spillNum = comp->lvaGrabTemp(true DEBUGARG("Return value temp for an odd struct return size"));
comp->lvaSetVarDoNotEnregister(spillNum DEBUGARG(DoNotEnregisterReason::LocalField));
CORINFO_CLASS_HANDLE retClsHnd = call->gtRetClsHnd;
comp->lvaSetStruct(spillNum, retClsHnd, false);
GenTreeLclFld* spill = new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, call->gtType, spillNum, 0);
spill->gtOp1 = call;
spill->gtFlags |= GTF_VAR_DEF;
BlockRange().InsertAfter(call, spill);
ContainCheckStoreLoc(spill);
GenTreeLclVar* loadCallResult = comp->gtNewLclvNode(spillNum, TYP_STRUCT)->AsLclVar();
BlockRange().InsertAfter(spill, loadCallResult);
return loadCallResult;
}
#endif // !WINDOWS_AMD64_ABI
GenTree* Lowering::LowerDirectCall(GenTreeCall* call)
{
noway_assert(call->gtCallType == CT_USER_FUNC || call->gtCallType == CT_HELPER);
// Non-virtual direct/indirect calls: Work out if the address of the
// call is known at JIT time. If not it is either an indirect call
// or the address must be accessed via an single/double indirection.
void* addr;
InfoAccessType accessType;
CorInfoHelpFunc helperNum = comp->eeGetHelperNum(call->gtCallMethHnd);
#ifdef FEATURE_READYTORUN
if (call->gtEntryPoint.addr != nullptr)
{
accessType = call->gtEntryPoint.accessType;
addr = call->gtEntryPoint.addr;
}
else
#endif
if (call->gtCallType == CT_HELPER)
{
noway_assert(helperNum != CORINFO_HELP_UNDEF);
// the convention on getHelperFtn seems to be (it's not documented)
// that it returns an address or if it returns null, pAddr is set to
// another address, which requires an indirection
void* pAddr;
addr = comp->info.compCompHnd->getHelperFtn(helperNum, (void**)&pAddr);
if (addr != nullptr)
{
assert(pAddr == nullptr);
accessType = IAT_VALUE;
}
else
{
accessType = IAT_PVALUE;
addr = pAddr;
}
}
else
{
noway_assert(helperNum == CORINFO_HELP_UNDEF);
CORINFO_ACCESS_FLAGS aflags = CORINFO_ACCESS_ANY;
if (call->IsSameThis())
{
aflags = (CORINFO_ACCESS_FLAGS)(aflags | CORINFO_ACCESS_THIS);
}
if (!call->NeedsNullCheck())
{
aflags = (CORINFO_ACCESS_FLAGS)(aflags | CORINFO_ACCESS_NONNULL);
}
CORINFO_CONST_LOOKUP addrInfo;
comp->info.compCompHnd->getFunctionEntryPoint(call->gtCallMethHnd, &addrInfo, aflags);
accessType = addrInfo.accessType;
addr = addrInfo.addr;
}
GenTree* result = nullptr;
switch (accessType)
{
case IAT_VALUE:
// Non-virtual direct call to known address.
// For JIT helper based tailcall (only used on x86) the target
// address is passed as an arg to the helper so we want a node for
// it.
if (!IsCallTargetInRange(addr) || call->IsTailCallViaJitHelper())
{
result = AddrGen(addr);
}
else
{
// a direct call within range of hardware relative call instruction
// stash the address for codegen
call->gtDirectCallAddress = addr;
}
break;
case IAT_PVALUE:
{
// If we are using an indirection cell for a direct call then apply
// an optimization that loads the call target directly from the
// indirection cell, instead of duplicating the tree.
bool hasIndirectionCell = call->GetIndirectionCellArgKind() != NonStandardArgKind::None;
if (!hasIndirectionCell)
{
// Non-virtual direct calls to addresses accessed by
// a single indirection.
GenTree* cellAddr = AddrGen(addr);
#ifdef DEBUG
cellAddr->AsIntCon()->gtTargetHandle = (size_t)call->gtCallMethHnd;
#endif
GenTree* indir = Ind(cellAddr);
result = indir;
}
break;
}
case IAT_PPVALUE:
// Non-virtual direct calls to addresses accessed by
// a double indirection.
//
// Expanding an IAT_PPVALUE here, will lose the opportunity
// to Hoist/CSE the first indirection as it is an invariant load
//
assert(!"IAT_PPVALUE case in LowerDirectCall");
noway_assert(helperNum == CORINFO_HELP_UNDEF);
result = AddrGen(addr);
// Double-indirection. Load the address into a register
// and call indirectly through the register
//
result = Ind(Ind(result));
break;
case IAT_RELPVALUE:
{
// Non-virtual direct calls to addresses accessed by
// a single relative indirection.
GenTree* cellAddr = AddrGen(addr);
GenTree* indir = Ind(cellAddr);
result = comp->gtNewOperNode(GT_ADD, TYP_I_IMPL, indir, AddrGen(addr));
break;
}
default:
noway_assert(!"Bad accessType");
break;
}
return result;
}
GenTree* Lowering::LowerDelegateInvoke(GenTreeCall* call)
{
noway_assert(call->gtCallType == CT_USER_FUNC);
assert((comp->info.compCompHnd->getMethodAttribs(call->gtCallMethHnd) &
(CORINFO_FLG_DELEGATE_INVOKE | CORINFO_FLG_FINAL)) == (CORINFO_FLG_DELEGATE_INVOKE | CORINFO_FLG_FINAL));
GenTree* thisArgNode;
if (call->IsTailCallViaJitHelper())
{
const unsigned argNum = 0;
fgArgTabEntry* thisArgTabEntry = comp->gtArgEntryByArgNum(call, argNum);
thisArgNode = thisArgTabEntry->GetNode();
}
else
{
thisArgNode = comp->gtGetThisArg(call);
}
assert(thisArgNode != nullptr);
assert(thisArgNode->gtOper == GT_PUTARG_REG);
GenTree* thisExpr = thisArgNode->AsOp()->gtOp1;
// We're going to use the 'this' expression multiple times, so make a local to copy it.
GenTree* base;
if (thisExpr->OperIs(GT_LCL_VAR))
{
base = comp->gtNewLclvNode(thisExpr->AsLclVar()->GetLclNum(), thisExpr->TypeGet());
}
else if (thisExpr->OperIs(GT_LCL_FLD))
{
base = comp->gtNewLclFldNode(thisExpr->AsLclFld()->GetLclNum(), thisExpr->TypeGet(),
thisExpr->AsLclFld()->GetLclOffs());
}
else
{
unsigned delegateInvokeTmp = comp->lvaGrabTemp(true DEBUGARG("delegate invoke call"));
base = comp->gtNewLclvNode(delegateInvokeTmp, thisExpr->TypeGet());
LIR::Use thisExprUse(BlockRange(), &thisArgNode->AsOp()->gtOp1, thisArgNode);
ReplaceWithLclVar(thisExprUse, delegateInvokeTmp);
thisExpr = thisExprUse.Def(); // it's changed; reload it.
}
// replace original expression feeding into thisPtr with
// [originalThis + offsetOfDelegateInstance]
GenTree* newThisAddr = new (comp, GT_LEA)
GenTreeAddrMode(TYP_BYREF, thisExpr, nullptr, 0, comp->eeGetEEInfo()->offsetOfDelegateInstance);
GenTree* newThis = comp->gtNewOperNode(GT_IND, TYP_REF, newThisAddr);
BlockRange().InsertAfter(thisExpr, newThisAddr, newThis);
thisArgNode->AsOp()->gtOp1 = newThis;
ContainCheckIndir(newThis->AsIndir());
// the control target is
// [originalThis + firstTgtOffs]
unsigned targetOffs = comp->eeGetEEInfo()->offsetOfDelegateFirstTarget;
GenTree* result = new (comp, GT_LEA) GenTreeAddrMode(TYP_REF, base, nullptr, 0, targetOffs);
GenTree* callTarget = Ind(result);
// don't need to sequence and insert this tree, caller will do it
return callTarget;
}
GenTree* Lowering::LowerIndirectNonvirtCall(GenTreeCall* call)
{
#ifdef TARGET_X86
if (call->gtCallCookie != nullptr)
{
NYI_X86("Morphing indirect non-virtual call with non-standard args");
}
#endif
// Indirect cookie calls gets transformed by fgMorphArgs as indirect call with non-standard args.
// Hence we should never see this type of call in lower.
noway_assert(call->gtCallCookie == nullptr);
return nullptr;
}
//------------------------------------------------------------------------
// CreateReturnTrapSeq: Create a tree to perform a "return trap", used in PInvoke
// epilogs to invoke a GC under a condition. The return trap checks some global
// location (the runtime tells us where that is and how many indirections to make),
// then, based on the result, conditionally calls a GC helper. We use a special node
// for this because at this time (late in the compilation phases), introducing flow
// is tedious/difficult.
//
// This is used for PInvoke inlining.
//
// Return Value:
// Code tree to perform the action.
//
GenTree* Lowering::CreateReturnTrapSeq()
{
// The GT_RETURNTRAP node expands to this:
// if (g_TrapReturningThreads)
// {
// RareDisablePreemptiveGC();
// }
// The only thing to do here is build up the expression that evaluates 'g_TrapReturningThreads'.
void* pAddrOfCaptureThreadGlobal = nullptr;
int32_t* addrOfCaptureThreadGlobal =
comp->info.compCompHnd->getAddrOfCaptureThreadGlobal(&pAddrOfCaptureThreadGlobal);
GenTree* testTree;
if (addrOfCaptureThreadGlobal != nullptr)
{
testTree = AddrGen(addrOfCaptureThreadGlobal);
}
else
{
testTree = Ind(AddrGen(pAddrOfCaptureThreadGlobal));
}
return comp->gtNewOperNode(GT_RETURNTRAP, TYP_INT, Ind(testTree, TYP_INT));
}
//------------------------------------------------------------------------
// SetGCState: Create a tree that stores the given constant (0 or 1) into the
// thread's GC state field.
//
// This is used for PInvoke inlining.
//
// Arguments:
// state - constant (0 or 1) to store into the thread's GC state field.
//
// Return Value:
// Code tree to perform the action.
//
GenTree* Lowering::SetGCState(int state)
{
// Thread.offsetOfGcState = 0/1
assert(state == 0 || state == 1);
const CORINFO_EE_INFO* pInfo = comp->eeGetEEInfo();
GenTree* base = new (comp, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, TYP_I_IMPL, comp->info.compLvFrameListRoot);
GenTree* stateNode = new (comp, GT_CNS_INT) GenTreeIntCon(TYP_BYTE, state);
GenTree* addr = new (comp, GT_LEA) GenTreeAddrMode(TYP_I_IMPL, base, nullptr, 1, pInfo->offsetOfGCState);
GenTree* storeGcState = new (comp, GT_STOREIND) GenTreeStoreInd(TYP_BYTE, addr, stateNode);
return storeGcState;
}
//------------------------------------------------------------------------
// CreateFrameLinkUpdate: Create a tree that either links or unlinks the
// locally-allocated InlinedCallFrame from the Frame list.
//
// This is used for PInvoke inlining.
//
// Arguments:
// action - whether to link (push) or unlink (pop) the Frame
//
// Return Value:
// Code tree to perform the action.
//
GenTree* Lowering::CreateFrameLinkUpdate(FrameLinkAction action)
{
const CORINFO_EE_INFO* pInfo = comp->eeGetEEInfo();
const CORINFO_EE_INFO::InlinedCallFrameInfo& callFrameInfo = pInfo->inlinedCallFrameInfo;
GenTree* TCB = new (comp, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, TYP_I_IMPL, comp->info.compLvFrameListRoot);
// Thread->m_pFrame
GenTree* addr = new (comp, GT_LEA) GenTreeAddrMode(TYP_I_IMPL, TCB, nullptr, 1, pInfo->offsetOfThreadFrame);
GenTree* data = nullptr;
if (action == PushFrame)
{
// Thread->m_pFrame = &inlinedCallFrame;
data = new (comp, GT_LCL_FLD_ADDR)
GenTreeLclFld(GT_LCL_FLD_ADDR, TYP_BYREF, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfFrameVptr);
}
else
{
assert(action == PopFrame);
// Thread->m_pFrame = inlinedCallFrame.m_pNext;
data = new (comp, GT_LCL_FLD) GenTreeLclFld(GT_LCL_FLD, TYP_BYREF, comp->lvaInlinedPInvokeFrameVar,
pInfo->inlinedCallFrameInfo.offsetOfFrameLink);
}
GenTree* storeInd = new (comp, GT_STOREIND) GenTreeStoreInd(TYP_I_IMPL, addr, data);
return storeInd;
}
//------------------------------------------------------------------------
// InsertPInvokeMethodProlog: Create the code that runs at the start of
// every method that has PInvoke calls.
//
// Initialize the TCB local and the InlinedCallFrame object. Then link ("push")
// the InlinedCallFrame object on the Frame chain. The layout of InlinedCallFrame
// is defined in vm/frames.h. See also vm/jitinterface.cpp for more information.
// The offsets of these fields is returned by the VM in a call to ICorStaticInfo::getEEInfo().
//
// The (current) layout is as follows:
//
// 64-bit 32-bit CORINFO_EE_INFO
// offset offset field name offset when set
// -----------------------------------------------------------------------------------------
// +00h +00h GS cookie offsetOfGSCookie
// +08h +04h vptr for class InlinedCallFrame offsetOfFrameVptr method prolog
// +10h +08h m_Next offsetOfFrameLink method prolog
// +18h +0Ch m_Datum offsetOfCallTarget call site
// +20h n/a m_StubSecretArg not set by JIT
// +28h +10h m_pCallSiteSP offsetOfCallSiteSP x86: call site, and zeroed in method
// prolog;
// non-x86: method prolog (SP remains
// constant in function, after prolog: no
// localloc and PInvoke in same function)
// +30h +14h m_pCallerReturnAddress offsetOfReturnAddress call site
// +38h +18h m_pCalleeSavedFP offsetOfCalleeSavedFP not set by JIT
// +1Ch m_pThread
// +20h m_pSPAfterProlog offsetOfSPAfterProlog arm only
// +20/24h JIT retval spill area (int) before call_gc ???
// +24/28h JIT retval spill area (long) before call_gc ???
// +28/2Ch Saved value of EBP method prolog ???
//
// Note that in the VM, InlinedCallFrame is a C++ class whose objects have a 'this' pointer that points
// to the InlinedCallFrame vptr (the 2nd field listed above), and the GS cookie is stored *before*
// the object. When we link the InlinedCallFrame onto the Frame chain, we must point at this location,
// and not at the beginning of the InlinedCallFrame local, which is actually the GS cookie.
//
// Return Value:
// none
//
void Lowering::InsertPInvokeMethodProlog()
{
noway_assert(comp->info.compUnmanagedCallCountWithGCTransition);
noway_assert(comp->lvaInlinedPInvokeFrameVar != BAD_VAR_NUM);
if (comp->opts.ShouldUsePInvokeHelpers())
{
return;
}
JITDUMP("======= Inserting PInvoke method prolog\n");
// The first BB must be a scratch BB in order for us to be able to safely insert the P/Invoke prolog.
assert(comp->fgFirstBBisScratch());
LIR::Range& firstBlockRange = LIR::AsRange(comp->fgFirstBB);
const CORINFO_EE_INFO* pInfo = comp->eeGetEEInfo();
const CORINFO_EE_INFO::InlinedCallFrameInfo& callFrameInfo = pInfo->inlinedCallFrameInfo;
// First arg: &compiler->lvaInlinedPInvokeFrameVar + callFrameInfo.offsetOfFrameVptr
#if defined(DEBUG)
const LclVarDsc* inlinedPInvokeDsc = comp->lvaGetDesc(comp->lvaInlinedPInvokeFrameVar);
assert(inlinedPInvokeDsc->IsAddressExposed());
#endif // DEBUG
GenTree* frameAddr = new (comp, GT_LCL_FLD_ADDR)
GenTreeLclFld(GT_LCL_FLD_ADDR, TYP_BYREF, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfFrameVptr);
// Call runtime helper to fill in our InlinedCallFrame and push it on the Frame list:
// TCB = CORINFO_HELP_INIT_PINVOKE_FRAME(&symFrameStart, secretArg);
// for x86, don't pass the secretArg.
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_X86) || defined(TARGET_ARM)
GenTreeCall::Use* argList = comp->gtNewCallArgs(frameAddr);
#else
GenTreeCall::Use* argList = comp->gtNewCallArgs(frameAddr, PhysReg(REG_SECRET_STUB_PARAM));
#endif
GenTree* call = comp->gtNewHelperCallNode(CORINFO_HELP_INIT_PINVOKE_FRAME, TYP_I_IMPL, argList);
// some sanity checks on the frame list root vardsc
const unsigned lclNum = comp->info.compLvFrameListRoot;
const LclVarDsc* varDsc = comp->lvaGetDesc(lclNum);
noway_assert(!varDsc->lvIsParam);
noway_assert(varDsc->lvType == TYP_I_IMPL);
GenTree* store = new (comp, GT_STORE_LCL_VAR) GenTreeLclVar(GT_STORE_LCL_VAR, TYP_I_IMPL, lclNum);
store->AsOp()->gtOp1 = call;
store->gtFlags |= GTF_VAR_DEF;
GenTree* const insertionPoint = firstBlockRange.FirstNonCatchArgNode();
comp->fgMorphTree(store);
firstBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, store));
DISPTREERANGE(firstBlockRange, store);
#if !defined(TARGET_X86) && !defined(TARGET_ARM)
// For x86, this step is done at the call site (due to stack pointer not being static in the function).
// For arm32, CallSiteSP is set up by the call to CORINFO_HELP_INIT_PINVOKE_FRAME.
// --------------------------------------------------------
// InlinedCallFrame.m_pCallSiteSP = @RSP;
GenTreeLclFld* storeSP = new (comp, GT_STORE_LCL_FLD)
GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfCallSiteSP);
storeSP->gtOp1 = PhysReg(REG_SPBASE);
storeSP->gtFlags |= GTF_VAR_DEF;
assert(inlinedPInvokeDsc->lvDoNotEnregister);
firstBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, storeSP));
DISPTREERANGE(firstBlockRange, storeSP);
#endif // !defined(TARGET_X86) && !defined(TARGET_ARM)
#if !defined(TARGET_ARM)
// For arm32, CalleeSavedFP is set up by the call to CORINFO_HELP_INIT_PINVOKE_FRAME.
// --------------------------------------------------------
// InlinedCallFrame.m_pCalleeSavedEBP = @RBP;
GenTreeLclFld* storeFP =
new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar,
callFrameInfo.offsetOfCalleeSavedFP);
assert(inlinedPInvokeDsc->lvDoNotEnregister);
storeFP->gtOp1 = PhysReg(REG_FPBASE);
storeFP->gtFlags |= GTF_VAR_DEF;
firstBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, storeFP));
DISPTREERANGE(firstBlockRange, storeFP);
#endif // !defined(TARGET_ARM)
// --------------------------------------------------------
// On 32-bit targets, CORINFO_HELP_INIT_PINVOKE_FRAME initializes the PInvoke frame and then pushes it onto
// the current thread's Frame stack. On 64-bit targets, it only initializes the PInvoke frame.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_64BIT
if (comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB))
{
// Push a frame - if we are NOT in an IL stub, this is done right before the call
// The init routine sets InlinedCallFrame's m_pNext, so we just set the thead's top-of-stack
GenTree* frameUpd = CreateFrameLinkUpdate(PushFrame);
firstBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, frameUpd));
ContainCheckStoreIndir(frameUpd->AsStoreInd());
DISPTREERANGE(firstBlockRange, frameUpd);
}
#endif // TARGET_64BIT
}
//------------------------------------------------------------------------
// InsertPInvokeMethodEpilog: Code that needs to be run when exiting any method
// that has PInvoke inlines. This needs to be inserted any place you can exit the
// function: returns, tailcalls and jmps.
//
// Arguments:
// returnBB - basic block from which a method can return
// lastExpr - GenTree of the last top level stmnt of returnBB (debug only arg)
//
// Return Value:
// Code tree to perform the action.
//
void Lowering::InsertPInvokeMethodEpilog(BasicBlock* returnBB DEBUGARG(GenTree* lastExpr))
{
assert(returnBB != nullptr);
assert(comp->info.compUnmanagedCallCountWithGCTransition);
if (comp->opts.ShouldUsePInvokeHelpers())
{
return;
}
JITDUMP("======= Inserting PInvoke method epilog\n");
// Method doing PInvoke calls has exactly one return block unless it has "jmp" or tail calls.
assert(((returnBB == comp->genReturnBB) && (returnBB->bbJumpKind == BBJ_RETURN)) ||
returnBB->endsWithTailCallOrJmp(comp));
LIR::Range& returnBlockRange = LIR::AsRange(returnBB);
GenTree* insertionPoint = returnBlockRange.LastNode();
assert(insertionPoint == lastExpr);
// Note: PInvoke Method Epilog (PME) needs to be inserted just before GT_RETURN, GT_JMP or GT_CALL node in execution
// order so that it is guaranteed that there will be no further PInvokes after that point in the method.
//
// Example1: GT_RETURN(op1) - say execution order is: Op1, GT_RETURN. After inserting PME, execution order would be
// Op1, PME, GT_RETURN
//
// Example2: GT_CALL(arg side effect computing nodes, Stk Args Setup, Reg Args setup). The execution order would be
// arg side effect computing nodes, Stk Args setup, Reg Args setup, GT_CALL
// After inserting PME execution order would be:
// arg side effect computing nodes, Stk Args setup, Reg Args setup, PME, GT_CALL
//
// Example3: GT_JMP. After inserting PME execution order would be: PME, GT_JMP
// That is after PME, args for GT_JMP call will be setup.
// Pop the frame if necessary. This always happens in the epilog on 32-bit targets. For 64-bit targets, we only do
// this in the epilog for IL stubs; for non-IL stubs the frame is popped after every PInvoke call.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_64BIT
if (comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB))
#endif // TARGET_64BIT
{
GenTree* frameUpd = CreateFrameLinkUpdate(PopFrame);
returnBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, frameUpd));
ContainCheckStoreIndir(frameUpd->AsStoreInd());
}
}
//------------------------------------------------------------------------
// InsertPInvokeCallProlog: Emit the call-site prolog for direct calls to unmanaged code.
// It does all the necessary call-site setup of the InlinedCallFrame.
//
// Arguments:
// call - the call for which we are inserting the PInvoke prolog.
//
// Return Value:
// None.
//
void Lowering::InsertPInvokeCallProlog(GenTreeCall* call)
{
JITDUMP("======= Inserting PInvoke call prolog\n");
GenTree* insertBefore = call;
if (call->gtCallType == CT_INDIRECT)
{
bool isClosed;
insertBefore = BlockRange().GetTreeRange(call->gtCallAddr, &isClosed).FirstNode();
assert(isClosed);
}
const CORINFO_EE_INFO::InlinedCallFrameInfo& callFrameInfo = comp->eeGetEEInfo()->inlinedCallFrameInfo;
gtCallTypes callType = (gtCallTypes)call->gtCallType;
noway_assert(comp->lvaInlinedPInvokeFrameVar != BAD_VAR_NUM);
if (comp->opts.ShouldUsePInvokeHelpers())
{
// First argument is the address of the frame variable.
GenTree* frameAddr =
new (comp, GT_LCL_VAR_ADDR) GenTreeLclVar(GT_LCL_VAR_ADDR, TYP_BYREF, comp->lvaInlinedPInvokeFrameVar);
#if defined(TARGET_X86) && !defined(UNIX_X86_ABI)
// On x86 targets, PInvoke calls need the size of the stack args in InlinedCallFrame.m_Datum.
// This is because the callee pops stack arguments, and we need to keep track of this during stack
// walking
const unsigned numStkArgBytes = call->fgArgInfo->GetNextSlotByteOffset();
GenTree* stackBytes = comp->gtNewIconNode(numStkArgBytes, TYP_INT);
GenTreeCall::Use* args = comp->gtNewCallArgs(frameAddr, stackBytes);
#else
GenTreeCall::Use* args = comp->gtNewCallArgs(frameAddr);
#endif
// Insert call to CORINFO_HELP_JIT_PINVOKE_BEGIN
GenTree* helperCall = comp->gtNewHelperCallNode(CORINFO_HELP_JIT_PINVOKE_BEGIN, TYP_VOID, args);
comp->fgMorphTree(helperCall);
BlockRange().InsertBefore(insertBefore, LIR::SeqTree(comp, helperCall));
LowerNode(helperCall); // helper call is inserted before current node and should be lowered here.
return;
}
// Emit the following sequence:
//
// InlinedCallFrame.callTarget = methodHandle // stored in m_Datum
// InlinedCallFrame.m_pCallSiteSP = SP // x86 only
// InlinedCallFrame.m_pCallerReturnAddress = return address
// GT_START_PREEEMPTC
// Thread.gcState = 0
// (non-stub) - update top Frame on TCB // 64-bit targets only
// ----------------------------------------------------------------------------------
// Setup InlinedCallFrame.callSiteTarget (which is how the JIT refers to it).
// The actual field is InlinedCallFrame.m_Datum which has many different uses and meanings.
GenTree* src = nullptr;
if (callType == CT_INDIRECT)
{
#if !defined(TARGET_64BIT)
// On 32-bit targets, indirect calls need the size of the stack args in InlinedCallFrame.m_Datum.
const unsigned stackByteOffset = call->fgArgInfo->GetNextSlotByteOffset();
src = comp->gtNewIconNode(stackByteOffset, TYP_INT);
#else
// On 64-bit targets, indirect calls may need the stub parameter value in InlinedCallFrame.m_Datum.
// If the stub parameter value is not needed, m_Datum will be initialized by the VM.
if (comp->info.compPublishStubParam)
{
src = comp->gtNewLclvNode(comp->lvaStubArgumentVar, TYP_I_IMPL);
}
#endif // !defined(TARGET_64BIT)
}
else
{
assert(callType == CT_USER_FUNC);
void* pEmbedMethodHandle = nullptr;
CORINFO_METHOD_HANDLE embedMethodHandle =
comp->info.compCompHnd->embedMethodHandle(call->gtCallMethHnd, &pEmbedMethodHandle);
noway_assert((!embedMethodHandle) != (!pEmbedMethodHandle));
if (embedMethodHandle != nullptr)
{
// InlinedCallFrame.callSiteTarget = methodHandle
src = AddrGen(embedMethodHandle);
}
else
{
// InlinedCallFrame.callSiteTarget = *pEmbedMethodHandle
src = Ind(AddrGen(pEmbedMethodHandle));
}
}
if (src != nullptr)
{
// Store into InlinedCallFrame.m_Datum, the offset of which is given by offsetOfCallTarget.
GenTreeLclFld* store =
new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar,
callFrameInfo.offsetOfCallTarget);
store->gtOp1 = src;
store->gtFlags |= GTF_VAR_DEF;
InsertTreeBeforeAndContainCheck(insertBefore, store);
}
#ifdef TARGET_X86
// ----------------------------------------------------------------------------------
// InlinedCallFrame.m_pCallSiteSP = SP
GenTreeLclFld* storeCallSiteSP = new (comp, GT_STORE_LCL_FLD)
GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfCallSiteSP);
storeCallSiteSP->gtOp1 = PhysReg(REG_SPBASE);
storeCallSiteSP->gtFlags |= GTF_VAR_DEF;
InsertTreeBeforeAndContainCheck(insertBefore, storeCallSiteSP);
#endif
// ----------------------------------------------------------------------------------
// InlinedCallFrame.m_pCallerReturnAddress = &label (the address of the instruction immediately following the call)
GenTreeLclFld* storeLab =
new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar,
callFrameInfo.offsetOfReturnAddress);
storeLab->gtOp1 = new (comp, GT_LABEL) GenTree(GT_LABEL, TYP_I_IMPL);
storeLab->gtFlags |= GTF_VAR_DEF;
InsertTreeBeforeAndContainCheck(insertBefore, storeLab);
// Push the PInvoke frame if necessary. On 32-bit targets this only happens in the method prolog if a method
// contains PInvokes; on 64-bit targets this is necessary in non-stubs.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_64BIT
if (!comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB))
{
// Set the TCB's frame to be the one we just created.
// Note the init routine for the InlinedCallFrame (CORINFO_HELP_INIT_PINVOKE_FRAME)
// has prepended it to the linked list to maintain the stack of Frames.
//
// Stubs do this once per stub, not once per call.
GenTree* frameUpd = CreateFrameLinkUpdate(PushFrame);
BlockRange().InsertBefore(insertBefore, LIR::SeqTree(comp, frameUpd));
ContainCheckStoreIndir(frameUpd->AsStoreInd());
}
#endif // TARGET_64BIT
// IMPORTANT **** This instruction must be the last real instruction ****
// It changes the thread's state to Preemptive mode
// ----------------------------------------------------------------------------------
// [tcb + offsetOfGcState] = 0
GenTree* storeGCState = SetGCState(0);
BlockRange().InsertBefore(insertBefore, LIR::SeqTree(comp, storeGCState));
ContainCheckStoreIndir(storeGCState->AsStoreInd());
// Indicate that codegen has switched this thread to preemptive GC.
// This tree node doesn't generate any code, but impacts LSRA and gc reporting.
// This tree node is simple so doesn't require sequencing.
GenTree* preemptiveGCNode = new (comp, GT_START_PREEMPTGC) GenTree(GT_START_PREEMPTGC, TYP_VOID);
BlockRange().InsertBefore(insertBefore, preemptiveGCNode);
}
//------------------------------------------------------------------------
// InsertPInvokeCallEpilog: Insert the code that goes after every inlined pinvoke call.
//
// Arguments:
// call - the call for which we are inserting the PInvoke epilog.
//
// Return Value:
// None.
//
void Lowering::InsertPInvokeCallEpilog(GenTreeCall* call)
{
JITDUMP("======= Inserting PInvoke call epilog\n");
if (comp->opts.ShouldUsePInvokeHelpers())
{
noway_assert(comp->lvaInlinedPInvokeFrameVar != BAD_VAR_NUM);
// First argument is the address of the frame variable.
GenTree* frameAddr = comp->gtNewLclVarAddrNode(comp->lvaInlinedPInvokeFrameVar, TYP_BYREF);
#if defined(DEBUG)
const LclVarDsc* inlinedPInvokeDsc = comp->lvaGetDesc(comp->lvaInlinedPInvokeFrameVar);
assert(inlinedPInvokeDsc->IsAddressExposed());
#endif // DEBUG
// Insert call to CORINFO_HELP_JIT_PINVOKE_END
GenTreeCall* helperCall =
comp->gtNewHelperCallNode(CORINFO_HELP_JIT_PINVOKE_END, TYP_VOID, comp->gtNewCallArgs(frameAddr));
comp->fgMorphTree(helperCall);
BlockRange().InsertAfter(call, LIR::SeqTree(comp, helperCall));
ContainCheckCallOperands(helperCall);
return;
}
// gcstate = 1
GenTree* insertionPoint = call->gtNext;
GenTree* tree = SetGCState(1);
BlockRange().InsertBefore(insertionPoint, LIR::SeqTree(comp, tree));
ContainCheckStoreIndir(tree->AsStoreInd());
tree = CreateReturnTrapSeq();
BlockRange().InsertBefore(insertionPoint, LIR::SeqTree(comp, tree));
ContainCheckReturnTrap(tree->AsOp());
// Pop the frame if necessary. On 32-bit targets this only happens in the method epilog; on 64-bit targets thi
// happens after every PInvoke call in non-stubs. 32-bit targets instead mark the frame as inactive.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_64BIT
if (!comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB))
{
tree = CreateFrameLinkUpdate(PopFrame);
BlockRange().InsertBefore(insertionPoint, LIR::SeqTree(comp, tree));
ContainCheckStoreIndir(tree->AsStoreInd());
}
#else
const CORINFO_EE_INFO::InlinedCallFrameInfo& callFrameInfo = comp->eeGetEEInfo()->inlinedCallFrameInfo;
// ----------------------------------------------------------------------------------
// InlinedCallFrame.m_pCallerReturnAddress = nullptr
GenTreeLclFld* const storeCallSiteTracker =
new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar,
callFrameInfo.offsetOfReturnAddress);
GenTreeIntCon* const constantZero = new (comp, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, 0);
storeCallSiteTracker->gtOp1 = constantZero;
storeCallSiteTracker->gtFlags |= GTF_VAR_DEF;
BlockRange().InsertBefore(insertionPoint, constantZero, storeCallSiteTracker);
ContainCheckStoreLoc(storeCallSiteTracker);
#endif // TARGET_64BIT
}
//------------------------------------------------------------------------
// LowerNonvirtPinvokeCall: Lower a non-virtual / indirect PInvoke call
//
// Arguments:
// call - The call to lower.
//
// Return Value:
// The lowered call tree.
//
GenTree* Lowering::LowerNonvirtPinvokeCall(GenTreeCall* call)
{
// PInvoke lowering varies depending on the flags passed in by the EE. By default,
// GC transitions are generated inline; if CORJIT_FLAG_USE_PINVOKE_HELPERS is specified,
// GC transitions are instead performed using helper calls. Examples of each case are given
// below. Note that the data structure that is used to store information about a call frame
// containing any P/Invoke calls is initialized in the method prolog (see
// InsertPInvokeMethod{Prolog,Epilog} for details).
//
// Inline transitions:
// InlinedCallFrame inlinedCallFrame;
//
// ...
//
// // Set up frame information
// inlinedCallFrame.callTarget = methodHandle; // stored in m_Datum
// inlinedCallFrame.m_pCallSiteSP = SP; // x86 only
// inlinedCallFrame.m_pCallerReturnAddress = &label; (the address of the instruction immediately following the
// call)
// Thread.m_pFrame = &inlinedCallFrame; (non-IL-stub only)
//
// // Switch the thread's GC mode to preemptive mode
// thread->m_fPreemptiveGCDisabled = 0;
//
// // Call the unmanaged method
// target();
//
// // Switch the thread's GC mode back to cooperative mode
// thread->m_fPreemptiveGCDisabled = 1;
//
// // Rendezvous with a running collection if necessary
// if (g_TrapReturningThreads)
// RareDisablePreemptiveGC();
//
// Transistions using helpers:
//
// OpaqueFrame opaqueFrame;
//
// ...
//
// // Call the JIT_PINVOKE_BEGIN helper
// JIT_PINVOKE_BEGIN(&opaqueFrame);
//
// // Call the unmanaged method
// target();
//
// // Call the JIT_PINVOKE_END helper
// JIT_PINVOKE_END(&opaqueFrame);
//
// Note that the JIT_PINVOKE_{BEGIN.END} helpers currently use the default calling convention for the target
// platform. They may be changed in the future such that they preserve all register values.
GenTree* result = nullptr;
// All code generated by this function must not contain the randomly-inserted NOPs
// that we insert to inhibit JIT spraying in partial trust scenarios.
// The PINVOKE_PROLOG op signals this to the code generator/emitter.
GenTree* prolog = new (comp, GT_NOP) GenTree(GT_PINVOKE_PROLOG, TYP_VOID);
BlockRange().InsertBefore(call, prolog);
bool addPInvokePrologEpilog = !call->IsSuppressGCTransition();
if (addPInvokePrologEpilog)
{
InsertPInvokeCallProlog(call);
}
if (call->gtCallType != CT_INDIRECT)
{
noway_assert(call->gtCallType == CT_USER_FUNC);
CORINFO_METHOD_HANDLE methHnd = call->gtCallMethHnd;
CORINFO_CONST_LOOKUP lookup;
comp->info.compCompHnd->getAddressOfPInvokeTarget(methHnd, &lookup);
void* addr = lookup.addr;
GenTree* addrTree;
switch (lookup.accessType)
{
case IAT_VALUE:
// IsCallTargetInRange always return true on x64. It wants to use rip-based addressing
// for this call. Unfortunately, in case of pinvokes (+suppressgctransition) to external libs
// (e.g. kernel32.dll) the relative offset is unlikely to fit into int32 and we will have to
// turn fAllowRel32 off globally.
if ((call->IsSuppressGCTransition() && !comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) ||
!IsCallTargetInRange(addr))
{
result = AddrGen(addr);
}
else
{
// a direct call within range of hardware relative call instruction
// stash the address for codegen
call->gtDirectCallAddress = addr;
#ifdef FEATURE_READYTORUN
call->gtEntryPoint.addr = nullptr;
call->gtEntryPoint.accessType = IAT_VALUE;
#endif
}
break;
case IAT_PVALUE:
addrTree = AddrGen(addr);
#ifdef DEBUG
addrTree->AsIntCon()->gtTargetHandle = (size_t)methHnd;
#endif
result = Ind(addrTree);
break;
case IAT_PPVALUE:
// ToDo: Expanding an IAT_PPVALUE here, loses the opportunity
// to Hoist/CSE the first indirection as it is an invariant load
//
// This case currently occurs today when we make PInvoke calls in crossgen
//
// assert(!"IAT_PPVALUE in Lowering::LowerNonvirtPinvokeCall");
addrTree = AddrGen(addr);
#ifdef DEBUG
addrTree->AsIntCon()->gtTargetHandle = (size_t)methHnd;
#endif
// Double-indirection. Load the address into a register
// and call indirectly through the register
//
result = Ind(Ind(addrTree));
break;
case IAT_RELPVALUE:
unreached();
}
}
if (addPInvokePrologEpilog)
{
InsertPInvokeCallEpilog(call);
}
return result;
}
// Expand the code necessary to calculate the control target.
// Returns: the expression needed to calculate the control target
// May insert embedded statements
GenTree* Lowering::LowerVirtualVtableCall(GenTreeCall* call)
{
noway_assert(call->gtCallType == CT_USER_FUNC);
regNumber thisPtrArgReg = comp->codeGen->genGetThisArgReg(call);
// get a reference to the thisPtr being passed
fgArgTabEntry* argEntry = comp->gtArgEntryByArgNum(call, 0);
assert(argEntry->GetRegNum() == thisPtrArgReg);
assert(argEntry->GetNode()->OperIs(GT_PUTARG_REG));
GenTree* thisPtr = argEntry->GetNode()->AsUnOp()->gtGetOp1();
// If what we are passing as the thisptr is not already a local, make a new local to place it in
// because we will be creating expressions based on it.
unsigned lclNum;
if (thisPtr->OperIsLocal())
{
lclNum = thisPtr->AsLclVarCommon()->GetLclNum();
}
else
{
// Split off the thisPtr and store to a temporary variable.
if (vtableCallTemp == BAD_VAR_NUM)
{
vtableCallTemp = comp->lvaGrabTemp(true DEBUGARG("virtual vtable call"));
}
LIR::Use thisPtrUse(BlockRange(), &(argEntry->GetNode()->AsUnOp()->gtOp1), argEntry->GetNode());
ReplaceWithLclVar(thisPtrUse, vtableCallTemp);
lclNum = vtableCallTemp;
}
// Get hold of the vtable offset (note: this might be expensive)
unsigned vtabOffsOfIndirection;
unsigned vtabOffsAfterIndirection;
bool isRelative;
comp->info.compCompHnd->getMethodVTableOffset(call->gtCallMethHnd, &vtabOffsOfIndirection,
&vtabOffsAfterIndirection, &isRelative);
// If the thisPtr is a local field, then construct a local field type node
GenTree* local;
if (thisPtr->isLclField())
{
local = new (comp, GT_LCL_FLD)
GenTreeLclFld(GT_LCL_FLD, thisPtr->TypeGet(), lclNum, thisPtr->AsLclFld()->GetLclOffs());
}
else
{
local = new (comp, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, thisPtr->TypeGet(), lclNum);
}
// pointer to virtual table = [REG_CALL_THIS + offs]
GenTree* result = Ind(Offset(local, VPTR_OFFS));
// Get the appropriate vtable chunk
if (vtabOffsOfIndirection != CORINFO_VIRTUALCALL_NO_CHUNK)
{
if (isRelative)
{
// MethodTable offset is a relative pointer.
//
// Additional temporary variable is used to store virtual table pointer.
// Address of method is obtained by the next computations:
//
// Save relative offset to tmp (vtab is virtual table pointer, vtabOffsOfIndirection is offset of
// vtable-1st-level-indirection):
// tmp = vtab
//
// Save address of method to result (vtabOffsAfterIndirection is offset of vtable-2nd-level-indirection):
// result = [tmp + vtabOffsOfIndirection + vtabOffsAfterIndirection + [tmp + vtabOffsOfIndirection]]
//
//
// If relative pointers are also in second level indirection, additional temporary is used:
// tmp1 = vtab
// tmp2 = tmp1 + vtabOffsOfIndirection + vtabOffsAfterIndirection + [tmp1 + vtabOffsOfIndirection]
// result = tmp2 + [tmp2]
//
unsigned lclNumTmp = comp->lvaGrabTemp(true DEBUGARG("lclNumTmp"));
unsigned lclNumTmp2 = comp->lvaGrabTemp(true DEBUGARG("lclNumTmp2"));
GenTree* lclvNodeStore = comp->gtNewTempAssign(lclNumTmp, result);
GenTree* tmpTree = comp->gtNewLclvNode(lclNumTmp, result->TypeGet());
tmpTree = Offset(tmpTree, vtabOffsOfIndirection);
tmpTree = comp->gtNewOperNode(GT_IND, TYP_I_IMPL, tmpTree, false);
GenTree* offs = comp->gtNewIconNode(vtabOffsOfIndirection + vtabOffsAfterIndirection, TYP_INT);
result = comp->gtNewOperNode(GT_ADD, TYP_I_IMPL, comp->gtNewLclvNode(lclNumTmp, result->TypeGet()), offs);
GenTree* base = OffsetByIndexWithScale(result, tmpTree, 1);
GenTree* lclvNodeStore2 = comp->gtNewTempAssign(lclNumTmp2, base);
LIR::Range range = LIR::SeqTree(comp, lclvNodeStore);
JITDUMP("result of obtaining pointer to virtual table:\n");
DISPRANGE(range);
BlockRange().InsertBefore(call, std::move(range));
LIR::Range range2 = LIR::SeqTree(comp, lclvNodeStore2);
ContainCheckIndir(tmpTree->AsIndir());
JITDUMP("result of obtaining pointer to virtual table 2nd level indirection:\n");
DISPRANGE(range2);
BlockRange().InsertAfter(lclvNodeStore, std::move(range2));
result = Ind(comp->gtNewLclvNode(lclNumTmp2, result->TypeGet()));
result =
comp->gtNewOperNode(GT_ADD, TYP_I_IMPL, result, comp->gtNewLclvNode(lclNumTmp2, result->TypeGet()));
}
else
{
// result = [REG_CALL_IND_SCRATCH + vtabOffsOfIndirection]
result = Ind(Offset(result, vtabOffsOfIndirection));
}
}
else
{
assert(!isRelative);
}
// Load the function address
// result = [reg+vtabOffs]
if (!isRelative)
{
result = Ind(Offset(result, vtabOffsAfterIndirection));
}
return result;
}
// Lower stub dispatched virtual calls.
GenTree* Lowering::LowerVirtualStubCall(GenTreeCall* call)
{
assert(call->IsVirtualStub());
// An x86 JIT which uses full stub dispatch must generate only
// the following stub dispatch calls:
//
// (1) isCallRelativeIndirect:
// call dword ptr [rel32] ; FF 15 ---rel32----
// (2) isCallRelative:
// call abc ; E8 ---rel32----
// (3) isCallRegisterIndirect:
// 3-byte nop ;
// call dword ptr [eax] ; FF 10
//
// THIS IS VERY TIGHTLY TIED TO THE PREDICATES IN
// vm\i386\cGenCpu.h, esp. isCallRegisterIndirect.
GenTree* result = nullptr;
// This is code to set up an indirect call to a stub address computed
// via dictionary lookup.
if (call->gtCallType == CT_INDIRECT)
{
// The importer decided we needed a stub call via a computed
// stub dispatch address, i.e. an address which came from a dictionary lookup.
// - The dictionary lookup produces an indirected address, suitable for call
// via "call [VirtualStubParam.reg]"
//
// This combination will only be generated for shared generic code and when
// stub dispatch is active.
// fgMorphArgs will have created trees to pass the address in VirtualStubParam.reg.
// All we have to do here is add an indirection to generate the actual call target.
GenTree* ind = Ind(call->gtCallAddr);
BlockRange().InsertAfter(call->gtCallAddr, ind);
call->gtCallAddr = ind;
ind->gtFlags |= GTF_IND_REQ_ADDR_IN_REG;
ContainCheckIndir(ind->AsIndir());
}
else
{
// Direct stub call.
// Get stub addr. This will return NULL if virtual call stubs are not active
void* stubAddr = call->gtStubCallStubAddr;
noway_assert(stubAddr != nullptr);
// If not CT_INDIRECT, then it should always be relative indir call.
// This is ensured by VM.
noway_assert(call->IsVirtualStubRelativeIndir());
// Direct stub calls, though the stubAddr itself may still need to be
// accessed via an indirection.
GenTree* addr = AddrGen(stubAddr);
// On x86, for tailcall via helper, the JIT_TailCall helper takes the stubAddr as
// the target address, and we set a flag that it's a VSD call. The helper then
// handles any necessary indirection.
if (call->IsTailCallViaJitHelper())
{
result = addr;
}
else
{
bool shouldOptimizeVirtualStubCall = false;
#if defined(TARGET_ARMARCH) || defined(TARGET_AMD64)
// Skip inserting the indirection node to load the address that is already
// computed in the VSD stub arg register as a hidden parameter. Instead during the
// codegen, just load the call target from there.
shouldOptimizeVirtualStubCall = !comp->opts.IsCFGEnabled();
#endif
if (!shouldOptimizeVirtualStubCall)
{
result = Ind(addr);
}
}
}
// TODO-Cleanup: start emitting random NOPS
return result;
}
//------------------------------------------------------------------------
// Lowering::AreSourcesPossibleModifiedLocals:
// Given two nodes which will be used in an addressing mode (base,
// index), check to see if they are lclVar reads, and if so, walk
// backwards from the use until both reads have been visited to
// determine if they are potentially modified in that range.
//
// Arguments:
// addr - the node that uses the base and index nodes
// base - the base node
// index - the index node
//
// Returns: true if either the base or index may be modified between the
// node and addr.
//
bool Lowering::AreSourcesPossiblyModifiedLocals(GenTree* addr, GenTree* base, GenTree* index)
{
assert(addr != nullptr);
SideEffectSet baseSideEffects;
if (base != nullptr)
{
if (base->OperIsLocalRead())
{
baseSideEffects.AddNode(comp, base);
}
else
{
base = nullptr;
}
}
SideEffectSet indexSideEffects;
if (index != nullptr)
{
if (index->OperIsLocalRead())
{
indexSideEffects.AddNode(comp, index);
}
else
{
index = nullptr;
}
}
for (GenTree* cursor = addr;; cursor = cursor->gtPrev)
{
assert(cursor != nullptr);
if (cursor == base)
{
base = nullptr;
}
if (cursor == index)
{
index = nullptr;
}
if ((base == nullptr) && (index == nullptr))
{
return false;
}
m_scratchSideEffects.Clear();
m_scratchSideEffects.AddNode(comp, cursor);
if ((base != nullptr) && m_scratchSideEffects.InterferesWith(baseSideEffects, false))
{
return true;
}
if ((index != nullptr) && m_scratchSideEffects.InterferesWith(indexSideEffects, false))
{
return true;
}
}
}
//------------------------------------------------------------------------
// TryCreateAddrMode: recognize trees which can be implemented using an
// addressing mode and transform them to a GT_LEA
//
// Arguments:
// addr - the use of the address we want to transform
// isContainable - true if this addressing mode can be contained
// parent - the node that consumes the given addr (most likely it's an IND)
//
// Returns:
// true if the address node was changed to a LEA, false otherwise.
//
bool Lowering::TryCreateAddrMode(GenTree* addr, bool isContainable, GenTree* parent)
{
if (!addr->OperIs(GT_ADD) || addr->gtOverflow())
{
#ifdef TARGET_ARM64
if (!addr->OperIs(GT_ADDEX))
{
return false;
}
#else
return false;
#endif
}
#ifdef TARGET_ARM64
if (parent->OperIsIndir() && parent->AsIndir()->IsVolatile() && !varTypeIsGC(addr))
{
// For Arm64 we avoid using LEA for volatile INDs
// because we won't be able to use ldar/star
return false;
}
#endif
GenTree* base = nullptr;
GenTree* index = nullptr;
unsigned scale = 0;
ssize_t offset = 0;
bool rev = false;
// Find out if an addressing mode can be constructed
bool doAddrMode = comp->codeGen->genCreateAddrMode(addr, // address
true, // fold
&rev, // reverse ops
&base, // base addr
&index, // index val
&scale, // scaling
&offset); // displacement
var_types targetType = parent->OperIsIndir() ? parent->TypeGet() : TYP_UNDEF;
#ifdef TARGET_ARMARCH
// Multiplier should be a "natural-scale" power of two number which is equal to target's width.
//
// *(ulong*)(data + index * 8); - can be optimized
// *(ulong*)(data + index * 7); - can not be optimized
// *(int*)(data + index * 2); - can not be optimized
//
if ((scale > 0) && (genTypeSize(targetType) != scale))
{
return false;
}
if (((scale | offset) > 0) && parent->OperIsHWIntrinsic())
{
// For now we only support unscaled indices for SIMD loads
return false;
}
#endif
if (scale == 0)
{
scale = 1;
}
if (!isContainable)
{
// this is just a reg-const add
if (index == nullptr)
{
return false;
}
// this is just a reg-reg add
if ((scale == 1) && (offset == 0))
{
return false;
}
}
// make sure there are not any side effects between def of leaves and use
if (!doAddrMode || AreSourcesPossiblyModifiedLocals(addr, base, index))
{
JITDUMP("No addressing mode:\n ");
DISPNODE(addr);
return false;
}
JITDUMP("Addressing mode:\n");
JITDUMP(" Base\n ");
DISPNODE(base);
if (index != nullptr)
{
JITDUMP(" + Index * %u + %d\n ", scale, offset);
DISPNODE(index);
}
else
{
JITDUMP(" + %d\n", offset);
}
// Save the (potentially) unused operands before changing the address to LEA.
ArrayStack<GenTree*> unusedStack(comp->getAllocator(CMK_ArrayStack));
unusedStack.Push(addr->AsOp()->gtGetOp1());
unusedStack.Push(addr->AsOp()->gtGetOp2());
addr->ChangeOper(GT_LEA);
// Make sure there are no leftover side effects (though the existing ADD we're
// changing shouldn't have any at this point, but sometimes it does).
addr->gtFlags &= ~GTF_ALL_EFFECT;
GenTreeAddrMode* addrMode = addr->AsAddrMode();
addrMode->SetBase(base);
addrMode->SetIndex(index);
addrMode->SetScale(scale);
addrMode->SetOffset(static_cast<int>(offset));
// Neither the base nor the index should now be contained.
if (base != nullptr)
{
base->ClearContained();
}
if (index != nullptr)
{
index->ClearContained();
}
// Remove all the nodes that are no longer used.
while (!unusedStack.Empty())
{
GenTree* unused = unusedStack.Pop();
// Use a loop to process some of the nodes iteratively
// instead of pushing them on the stack.
while ((unused != base) && (unused != index))
{
JITDUMP("Removing unused node:\n ");
DISPNODE(unused);
BlockRange().Remove(unused);
if (unused->OperIs(GT_ADD, GT_MUL, GT_LSH))
{
// Push the first operand and loop back to process the second one.
// This minimizes the stack depth because the second one tends to be
// a constant so it gets processed and then the first one gets popped.
unusedStack.Push(unused->AsOp()->gtGetOp1());
unused = unused->AsOp()->gtGetOp2();
}
else
{
assert(unused->OperIs(GT_CNS_INT));
break;
}
}
}
#ifdef TARGET_ARM64
if ((index != nullptr) && index->OperIs(GT_CAST) && (scale == 1) && (offset == 0) && varTypeIsByte(targetType))
{
MakeSrcContained(addrMode, index);
}
// Check if we can "contain" LEA(BFIZ) in order to extend 32bit index to 64bit as part of load/store.
if ((index != nullptr) && index->OperIs(GT_BFIZ) && index->gtGetOp1()->OperIs(GT_CAST) &&
index->gtGetOp2()->IsCnsIntOrI() && (varTypeIsIntegral(targetType) || varTypeIsFloating(targetType)))
{
// BFIZ node is a binary op where op1 is GT_CAST and op2 is GT_CNS_INT
GenTreeCast* cast = index->gtGetOp1()->AsCast();
assert(cast->isContained());
const unsigned shiftBy = (unsigned)index->gtGetOp2()->AsIntCon()->IconValue();
// 'scale' and 'offset' have to be unset since we're going to use [base + index * SXTW/UXTW scale] form
// where there is no room for additional offsets/scales on ARM64. 'shiftBy' has to match target's width.
if (cast->CastOp()->TypeIs(TYP_INT) && cast->TypeIs(TYP_LONG) && (genTypeSize(targetType) == (1U << shiftBy)) &&
(scale == 1) && (offset == 0))
{
// TODO: Make sure that genCreateAddrMode marks such BFIZ candidates as GTF_DONT_CSE for better CQ.
MakeSrcContained(addrMode, index);
}
}
#endif
JITDUMP("New addressing mode node:\n ");
DISPNODE(addrMode);
JITDUMP("\n");
return true;
}
//------------------------------------------------------------------------
// LowerAdd: turn this add into a GT_LEA if that would be profitable
//
// Arguments:
// node - the node we care about
//
// Returns:
// nullptr if no transformation was done, or the next node in the transformed node sequence that
// needs to be lowered.
//
GenTree* Lowering::LowerAdd(GenTreeOp* node)
{
if (varTypeIsIntegralOrI(node->TypeGet()))
{
GenTree* op1 = node->gtGetOp1();
GenTree* op2 = node->gtGetOp2();
LIR::Use use;
// It is not the best place to do such simple arithmetic optimizations,
// but it allows us to avoid `LEA(addr, 0)` nodes and doing that in morph
// requires more changes. Delete that part if we get an expression optimizer.
if (op2->IsIntegralConst(0))
{
JITDUMP("Lower: optimize val + 0: ");
DISPNODE(node);
JITDUMP("Replaced with: ");
DISPNODE(op1);
if (BlockRange().TryGetUse(node, &use))
{
use.ReplaceWith(op1);
}
else
{
op1->SetUnusedValue();
}
GenTree* next = node->gtNext;
BlockRange().Remove(op2);
BlockRange().Remove(node);
JITDUMP("Remove [%06u], [%06u]\n", op2->gtTreeID, node->gtTreeID);
return next;
}
#ifdef TARGET_XARCH
if (BlockRange().TryGetUse(node, &use))
{
// If this is a child of an indir, let the parent handle it.
// If there is a chain of adds, only look at the topmost one.
GenTree* parent = use.User();
if (!parent->OperIsIndir() && !parent->OperIs(GT_ADD))
{
TryCreateAddrMode(node, false, parent);
}
}
#endif // TARGET_XARCH
}
if (node->OperIs(GT_ADD))
{
ContainCheckBinary(node);
}
return nullptr;
}
//------------------------------------------------------------------------
// LowerUnsignedDivOrMod: Lowers a GT_UDIV/GT_UMOD node.
//
// Arguments:
// divMod - pointer to the GT_UDIV/GT_UMOD node to be lowered
//
// Return Value:
// Returns a boolean indicating whether the node was transformed.
//
// Notes:
// - Transform UDIV/UMOD by power of 2 into RSZ/AND
// - Transform UDIV by constant >= 2^(N-1) into GE
// - Transform UDIV/UMOD by constant >= 3 into "magic division"
//
bool Lowering::LowerUnsignedDivOrMod(GenTreeOp* divMod)
{
assert(divMod->OperIs(GT_UDIV, GT_UMOD));
#if defined(USE_HELPERS_FOR_INT_DIV)
if (!varTypeIsIntegral(divMod->TypeGet()))
{
assert(!"unreachable: integral GT_UDIV/GT_UMOD should get morphed into helper calls");
}
assert(varTypeIsFloating(divMod->TypeGet()));
#endif // USE_HELPERS_FOR_INT_DIV
#if defined(TARGET_ARM64)
assert(divMod->OperGet() != GT_UMOD);
#endif // TARGET_ARM64
GenTree* dividend = divMod->gtGetOp1();
GenTree* divisor = divMod->gtGetOp2();
#if !defined(TARGET_64BIT)
if (dividend->OperIs(GT_LONG))
{
return false;
}
#endif
if (!divisor->IsCnsIntOrI())
{
return false;
}
if (dividend->IsCnsIntOrI())
{
// We shouldn't see a divmod with constant operands here but if we do then it's likely
// because optimizations are disabled or it's a case that's supposed to throw an exception.
// Don't optimize this.
return false;
}
const var_types type = divMod->TypeGet();
assert((type == TYP_INT) || (type == TYP_I_IMPL));
size_t divisorValue = static_cast<size_t>(divisor->AsIntCon()->IconValue());
if (type == TYP_INT)
{
// Clear up the upper 32 bits of the value, they may be set to 1 because constants
// are treated as signed and stored in ssize_t which is 64 bit in size on 64 bit targets.
divisorValue &= UINT32_MAX;
}
if (divisorValue == 0)
{
return false;
}
const bool isDiv = divMod->OperIs(GT_UDIV);
if (isPow2(divisorValue))
{
genTreeOps newOper;
if (isDiv)
{
newOper = GT_RSZ;
divisorValue = genLog2(divisorValue);
}
else
{
newOper = GT_AND;
divisorValue -= 1;
}
divMod->SetOper(newOper);
divisor->AsIntCon()->SetIconValue(divisorValue);
ContainCheckNode(divMod);
return true;
}
if (isDiv)
{
// If the divisor is greater or equal than 2^(N - 1) then the result is 1
// iff the dividend is greater or equal than the divisor.
if (((type == TYP_INT) && (divisorValue > (UINT32_MAX / 2))) ||
((type == TYP_LONG) && (divisorValue > (UINT64_MAX / 2))))
{
divMod->SetOper(GT_GE);
divMod->gtFlags |= GTF_UNSIGNED;
ContainCheckNode(divMod);
return true;
}
}
// TODO-ARM-CQ: Currently there's no GT_MULHI for ARM32
#if defined(TARGET_XARCH) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)
if (!comp->opts.MinOpts() && (divisorValue >= 3))
{
size_t magic;
bool increment;
int preShift;
int postShift;
bool simpleMul = false;
unsigned bits = type == TYP_INT ? 32 : 64;
// if the dividend operand is AND or RSZ with a constant then the number of input bits can be reduced
if (dividend->OperIs(GT_AND) && dividend->gtGetOp2()->IsCnsIntOrI())
{
size_t maskCns = static_cast<size_t>(dividend->gtGetOp2()->AsIntCon()->IconValue());
if (maskCns != 0)
{
unsigned maskBits = 1;
while (maskCns >>= 1)
maskBits++;
if (maskBits < bits)
bits = maskBits;
}
}
else if (dividend->OperIs(GT_RSZ) && dividend->gtGetOp2()->IsCnsIntOrI())
{
size_t shiftCns = static_cast<size_t>(dividend->gtGetOp2()->AsIntCon()->IconValue());
if (shiftCns < bits)
{
bits -= static_cast<unsigned>(shiftCns);
}
}
if (type == TYP_INT)
{
magic = MagicDivide::GetUnsigned32Magic(static_cast<uint32_t>(divisorValue), &increment, &preShift,
&postShift, bits);
#ifdef TARGET_64BIT
// avoid inc_saturate/multiple shifts by widening to 32x64 MULHI
if (increment || (preShift
#ifdef TARGET_XARCH
// IMUL reg,reg,imm32 can't be used if magic<0 because of sign-extension
&& static_cast<int32_t>(magic) < 0
#endif
))
{
magic = MagicDivide::GetUnsigned64Magic(static_cast<uint64_t>(divisorValue), &increment, &preShift,
&postShift, bits);
}
// otherwise just widen to regular multiplication
else
{
postShift += 32;
simpleMul = true;
}
#endif
}
else
{
#ifdef TARGET_64BIT
magic = MagicDivide::GetUnsigned64Magic(static_cast<uint64_t>(divisorValue), &increment, &preShift,
&postShift, bits);
#else
unreached();
#endif
}
assert(divMod->MarkedDivideByConstOptimized());
const bool requiresDividendMultiuse = !isDiv;
const weight_t curBBWeight = m_block->getBBWeight(comp);
if (requiresDividendMultiuse)
{
LIR::Use dividendUse(BlockRange(), &divMod->gtOp1, divMod);
dividend = ReplaceWithLclVar(dividendUse);
}
GenTree* firstNode = nullptr;
GenTree* adjustedDividend = dividend;
#if defined(TARGET_ARM64)
// On ARM64 we will use a 32x32->64 bit multiply instead of a 64x64->64 one.
bool widenToNativeIntForMul = (type != TYP_I_IMPL) && !simpleMul;
#else
CLANG_FORMAT_COMMENT_ANCHOR;
bool widenToNativeIntForMul = (type != TYP_I_IMPL);
#endif
// If "increment" flag is returned by GetUnsignedMagic we need to do Saturating Increment first
if (increment)
{
adjustedDividend = comp->gtNewOperNode(GT_INC_SATURATE, type, adjustedDividend);
BlockRange().InsertBefore(divMod, adjustedDividend);
firstNode = adjustedDividend;
assert(!preShift);
}
// if "preShift" is required, then do a right shift before
else if (preShift)
{
GenTree* preShiftBy = comp->gtNewIconNode(preShift, TYP_INT);
adjustedDividend = comp->gtNewOperNode(GT_RSZ, type, adjustedDividend, preShiftBy);
BlockRange().InsertBefore(divMod, preShiftBy, adjustedDividend);
firstNode = preShiftBy;
}
else if (widenToNativeIntForMul)
{
adjustedDividend = comp->gtNewCastNode(TYP_I_IMPL, adjustedDividend, true, TYP_I_IMPL);
BlockRange().InsertBefore(divMod, adjustedDividend);
firstNode = adjustedDividend;
}
#ifdef TARGET_XARCH
// force input transformation to RAX because the following MULHI will kill RDX:RAX anyway and LSRA often causes
// reduntant copies otherwise
if (firstNode && !simpleMul)
{
adjustedDividend->SetRegNum(REG_RAX);
}
#endif
if (widenToNativeIntForMul)
{
divisor->gtType = TYP_I_IMPL;
}
divisor->AsIntCon()->SetIconValue(magic);
if (isDiv && !postShift && (type == TYP_I_IMPL))
{
divMod->SetOper(GT_MULHI);
divMod->gtOp1 = adjustedDividend;
divMod->SetUnsigned();
}
else
{
#ifdef TARGET_ARM64
// 64-bit MUL is more expensive than UMULL on ARM64.
genTreeOps mulOper = simpleMul ? GT_MUL_LONG : GT_MULHI;
#else
// 64-bit IMUL is less expensive than MUL eax:edx on x64.
genTreeOps mulOper = simpleMul ? GT_MUL : GT_MULHI;
#endif
// Insert a new multiplication node before the existing GT_UDIV/GT_UMOD node.
// The existing node will later be transformed into a GT_RSZ/GT_SUB that
// computes the final result. This way don't need to find and change the use
// of the existing node.
GenTree* mulhi = comp->gtNewOperNode(mulOper, TYP_I_IMPL, adjustedDividend, divisor);
mulhi->SetUnsigned();
BlockRange().InsertBefore(divMod, mulhi);
if (firstNode == nullptr)
{
firstNode = mulhi;
}
if (postShift)
{
GenTree* shiftBy = comp->gtNewIconNode(postShift, TYP_INT);
BlockRange().InsertBefore(divMod, shiftBy);
if (isDiv && (type == TYP_I_IMPL))
{
divMod->SetOper(GT_RSZ);
divMod->gtOp1 = mulhi;
divMod->gtOp2 = shiftBy;
}
else
{
mulhi = comp->gtNewOperNode(GT_RSZ, TYP_I_IMPL, mulhi, shiftBy);
BlockRange().InsertBefore(divMod, mulhi);
}
}
if (!isDiv)
{
// divisor UMOD dividend = dividend SUB (div MUL divisor)
GenTree* divisor = comp->gtNewIconNode(divisorValue, type);
GenTree* mul = comp->gtNewOperNode(GT_MUL, type, mulhi, divisor);
dividend = comp->gtNewLclvNode(dividend->AsLclVar()->GetLclNum(), dividend->TypeGet());
divMod->SetOper(GT_SUB);
divMod->gtOp1 = dividend;
divMod->gtOp2 = mul;
BlockRange().InsertBefore(divMod, divisor, mul, dividend);
}
else if (type != TYP_I_IMPL)
{
#ifdef TARGET_ARMARCH
divMod->SetOper(GT_CAST);
divMod->SetUnsigned();
divMod->AsCast()->gtCastType = TYP_INT;
#else
divMod->SetOper(GT_BITCAST);
#endif
divMod->gtOp1 = mulhi;
divMod->gtOp2 = nullptr;
}
}
if (firstNode != nullptr)
{
ContainCheckRange(firstNode, divMod);
}
return true;
}
#endif
return false;
}
// LowerConstIntDivOrMod: Transform integer GT_DIV/GT_MOD nodes with a power of 2
// const divisor into equivalent but faster sequences.
//
// Arguments:
// node - pointer to the DIV or MOD node
//
// Returns:
// nullptr if no transformation is done, or the next node in the transformed node sequence that
// needs to be lowered.
//
GenTree* Lowering::LowerConstIntDivOrMod(GenTree* node)
{
assert((node->OperGet() == GT_DIV) || (node->OperGet() == GT_MOD));
GenTree* divMod = node;
GenTree* dividend = divMod->gtGetOp1();
GenTree* divisor = divMod->gtGetOp2();
const var_types type = divMod->TypeGet();
assert((type == TYP_INT) || (type == TYP_LONG));
#if defined(USE_HELPERS_FOR_INT_DIV)
assert(!"unreachable: integral GT_DIV/GT_MOD should get morphed into helper calls");
#endif // USE_HELPERS_FOR_INT_DIV
#if defined(TARGET_ARM64)
assert(node->OperGet() != GT_MOD);
#endif // TARGET_ARM64
if (!divisor->IsCnsIntOrI())
{
return nullptr; // no transformations to make
}
if (dividend->IsCnsIntOrI())
{
// We shouldn't see a divmod with constant operands here but if we do then it's likely
// because optimizations are disabled or it's a case that's supposed to throw an exception.
// Don't optimize this.
return nullptr;
}
ssize_t divisorValue = divisor->AsIntCon()->IconValue();
if (divisorValue == -1 || divisorValue == 0)
{
// x / 0 and x % 0 can't be optimized because they are required to throw an exception.
// x / -1 can't be optimized because INT_MIN / -1 is required to throw an exception.
// x % -1 is always 0 and the IL spec says that the rem instruction "can" throw an exception if x is
// the minimum representable integer. However, the C# spec says that an exception "is" thrown in this
// case so optimizing this case would break C# code.
// A runtime check could be used to handle this case but it's probably too rare to matter.
return nullptr;
}
bool isDiv = divMod->OperGet() == GT_DIV;
if (isDiv)
{
if ((type == TYP_INT && divisorValue == INT_MIN) || (type == TYP_LONG && divisorValue == INT64_MIN))
{
// If the divisor is the minimum representable integer value then we can use a compare,
// the result is 1 iff the dividend equals divisor.
divMod->SetOper(GT_EQ);
return node;
}
}
size_t absDivisorValue =
(divisorValue == SSIZE_T_MIN) ? static_cast<size_t>(divisorValue) : static_cast<size_t>(abs(divisorValue));
if (!isPow2(absDivisorValue))
{
if (comp->opts.MinOpts())
{
return nullptr;
}
#if defined(TARGET_XARCH) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)
ssize_t magic;
int shift;
if (type == TYP_INT)
{
magic = MagicDivide::GetSigned32Magic(static_cast<int32_t>(divisorValue), &shift);
}
else
{
#ifdef TARGET_64BIT
magic = MagicDivide::GetSigned64Magic(static_cast<int64_t>(divisorValue), &shift);
#else // !TARGET_64BIT
unreached();
#endif // !TARGET_64BIT
}
divisor->AsIntConCommon()->SetIconValue(magic);
// Insert a new GT_MULHI node in front of the existing GT_DIV/GT_MOD node.
// The existing node will later be transformed into a GT_ADD/GT_SUB that
// computes the final result. This way don't need to find and change the
// use of the existing node.
GenTree* mulhi = comp->gtNewOperNode(GT_MULHI, type, divisor, dividend);
BlockRange().InsertBefore(divMod, mulhi);
// mulhi was the easy part. Now we need to generate different code depending
// on the divisor value:
// For 3 we need:
// div = signbit(mulhi) + mulhi
// For 5 we need:
// div = signbit(mulhi) + sar(mulhi, 1) ; requires shift adjust
// For 7 we need:
// mulhi += dividend ; requires add adjust
// div = signbit(mulhi) + sar(mulhi, 2) ; requires shift adjust
// For -3 we need:
// mulhi -= dividend ; requires sub adjust
// div = signbit(mulhi) + sar(mulhi, 1) ; requires shift adjust
bool requiresAddSubAdjust = signum(divisorValue) != signum(magic);
bool requiresShiftAdjust = shift != 0;
bool requiresDividendMultiuse = requiresAddSubAdjust || !isDiv;
if (requiresDividendMultiuse)
{
LIR::Use dividendUse(BlockRange(), &mulhi->AsOp()->gtOp2, mulhi);
dividend = ReplaceWithLclVar(dividendUse);
}
GenTree* adjusted;
if (requiresAddSubAdjust)
{
dividend = comp->gtNewLclvNode(dividend->AsLclVar()->GetLclNum(), dividend->TypeGet());
adjusted = comp->gtNewOperNode(divisorValue > 0 ? GT_ADD : GT_SUB, type, mulhi, dividend);
BlockRange().InsertBefore(divMod, dividend, adjusted);
}
else
{
adjusted = mulhi;
}
GenTree* shiftBy = comp->gtNewIconNode(genTypeSize(type) * 8 - 1, type);
GenTree* signBit = comp->gtNewOperNode(GT_RSZ, type, adjusted, shiftBy);
BlockRange().InsertBefore(divMod, shiftBy, signBit);
LIR::Use adjustedUse(BlockRange(), &signBit->AsOp()->gtOp1, signBit);
adjusted = ReplaceWithLclVar(adjustedUse);
adjusted = comp->gtNewLclvNode(adjusted->AsLclVar()->GetLclNum(), adjusted->TypeGet());
BlockRange().InsertBefore(divMod, adjusted);
if (requiresShiftAdjust)
{
shiftBy = comp->gtNewIconNode(shift, TYP_INT);
adjusted = comp->gtNewOperNode(GT_RSH, type, adjusted, shiftBy);
BlockRange().InsertBefore(divMod, shiftBy, adjusted);
}
if (isDiv)
{
divMod->SetOperRaw(GT_ADD);
divMod->AsOp()->gtOp1 = adjusted;
divMod->AsOp()->gtOp2 = signBit;
}
else
{
GenTree* div = comp->gtNewOperNode(GT_ADD, type, adjusted, signBit);
dividend = comp->gtNewLclvNode(dividend->AsLclVar()->GetLclNum(), dividend->TypeGet());
// divisor % dividend = dividend - divisor x div
GenTree* divisor = comp->gtNewIconNode(divisorValue, type);
GenTree* mul = comp->gtNewOperNode(GT_MUL, type, div, divisor);
BlockRange().InsertBefore(divMod, dividend, div, divisor, mul);
divMod->SetOperRaw(GT_SUB);
divMod->AsOp()->gtOp1 = dividend;
divMod->AsOp()->gtOp2 = mul;
}
return mulhi;
#elif defined(TARGET_ARM)
// Currently there's no GT_MULHI for ARM32
return nullptr;
#else
#error Unsupported or unset target architecture
#endif
}
// We're committed to the conversion now. Go find the use if any.
LIR::Use use;
if (!BlockRange().TryGetUse(node, &use))
{
return nullptr;
}
// We need to use the dividend node multiple times so its value needs to be
// computed once and stored in a temp variable.
LIR::Use opDividend(BlockRange(), &divMod->AsOp()->gtOp1, divMod);
dividend = ReplaceWithLclVar(opDividend);
GenTree* adjustment = comp->gtNewOperNode(GT_RSH, type, dividend, comp->gtNewIconNode(type == TYP_INT ? 31 : 63));
if (absDivisorValue == 2)
{
// If the divisor is +/-2 then we'd end up with a bitwise and between 0/-1 and 1.
// We can get the same result by using GT_RSZ instead of GT_RSH.
adjustment->SetOper(GT_RSZ);
}
else
{
adjustment = comp->gtNewOperNode(GT_AND, type, adjustment, comp->gtNewIconNode(absDivisorValue - 1, type));
}
GenTree* adjustedDividend =
comp->gtNewOperNode(GT_ADD, type, adjustment,
comp->gtNewLclvNode(dividend->AsLclVar()->GetLclNum(), dividend->TypeGet()));
GenTree* newDivMod;
if (isDiv)
{
// perform the division by right shifting the adjusted dividend
divisor->AsIntCon()->SetIconValue(genLog2(absDivisorValue));
newDivMod = comp->gtNewOperNode(GT_RSH, type, adjustedDividend, divisor);
ContainCheckShiftRotate(newDivMod->AsOp());
if (divisorValue < 0)
{
// negate the result if the divisor is negative
newDivMod = comp->gtNewOperNode(GT_NEG, type, newDivMod);
ContainCheckNode(newDivMod);
}
}
else
{
// divisor % dividend = dividend - divisor x (dividend / divisor)
// divisor x (dividend / divisor) translates to (dividend >> log2(divisor)) << log2(divisor)
// which simply discards the low log2(divisor) bits, that's just dividend & ~(divisor - 1)
divisor->AsIntCon()->SetIconValue(~(absDivisorValue - 1));
newDivMod = comp->gtNewOperNode(GT_SUB, type,
comp->gtNewLclvNode(dividend->AsLclVar()->GetLclNum(), dividend->TypeGet()),
comp->gtNewOperNode(GT_AND, type, adjustedDividend, divisor));
}
// Remove the divisor and dividend nodes from the linear order,
// since we have reused them and will resequence the tree
BlockRange().Remove(divisor);
BlockRange().Remove(dividend);
// linearize and insert the new tree before the original divMod node
InsertTreeBeforeAndContainCheck(divMod, newDivMod);
BlockRange().Remove(divMod);
// replace the original divmod node with the new divmod tree
use.ReplaceWith(newDivMod);
return newDivMod->gtNext;
}
//------------------------------------------------------------------------
// LowerSignedDivOrMod: transform integer GT_DIV/GT_MOD nodes with a power of 2
// const divisor into equivalent but faster sequences.
//
// Arguments:
// node - the DIV or MOD node
//
// Returns:
// The next node to lower.
//
GenTree* Lowering::LowerSignedDivOrMod(GenTree* node)
{
assert((node->OperGet() == GT_DIV) || (node->OperGet() == GT_MOD));
GenTree* next = node->gtNext;
if (varTypeIsIntegral(node->TypeGet()))
{
// LowerConstIntDivOrMod will return nullptr if it doesn't transform the node.
GenTree* newNode = LowerConstIntDivOrMod(node);
if (newNode != nullptr)
{
return newNode;
}
}
ContainCheckDivOrMod(node->AsOp());
return next;
}
//------------------------------------------------------------------------
// LowerShift: Lower shift nodes
//
// Arguments:
// shift - the shift node (GT_LSH, GT_RSH or GT_RSZ)
//
// Notes:
// Remove unnecessary shift count masking, xarch shift instructions
// mask the shift count to 5 bits (or 6 bits for 64 bit operations).
void Lowering::LowerShift(GenTreeOp* shift)
{
assert(shift->OperIs(GT_LSH, GT_RSH, GT_RSZ));
size_t mask = 0x1f;
#ifdef TARGET_64BIT
if (varTypeIsLong(shift->TypeGet()))
{
mask = 0x3f;
}
#else
assert(!varTypeIsLong(shift->TypeGet()));
#endif
for (GenTree* andOp = shift->gtGetOp2(); andOp->OperIs(GT_AND); andOp = andOp->gtGetOp1())
{
GenTree* maskOp = andOp->gtGetOp2();
if (!maskOp->IsCnsIntOrI())
{
break;
}
if ((static_cast<size_t>(maskOp->AsIntCon()->IconValue()) & mask) != mask)
{
break;
}
shift->gtOp2 = andOp->gtGetOp1();
BlockRange().Remove(andOp);
BlockRange().Remove(maskOp);
// The parent was replaced, clear contain and regOpt flag.
shift->gtOp2->ClearContained();
}
ContainCheckShiftRotate(shift);
#ifdef TARGET_ARM64
// Try to recognize ubfiz/sbfiz idiom in LSH(CAST(X), CNS) tree
if (comp->opts.OptimizationEnabled() && shift->OperIs(GT_LSH) && shift->gtGetOp1()->OperIs(GT_CAST) &&
shift->gtGetOp2()->IsCnsIntOrI() && !shift->isContained())
{
GenTreeIntCon* cns = shift->gtGetOp2()->AsIntCon();
GenTreeCast* cast = shift->gtGetOp1()->AsCast();
if (!cast->isContained() && !cast->IsRegOptional() && !cast->gtOverflow() &&
// Smaller CastOp is most likely an IND(X) node which is lowered to a zero-extend load
cast->CastOp()->TypeIs(TYP_LONG, TYP_INT))
{
// Cast is either "TYP_LONG <- TYP_INT" or "TYP_INT <- %SMALL_INT% <- TYP_INT" (signed or unsigned)
unsigned dstBits = genTypeSize(cast) * BITS_PER_BYTE;
unsigned srcBits = varTypeIsSmall(cast->CastToType()) ? genTypeSize(cast->CastToType()) * BITS_PER_BYTE
: genTypeSize(cast->CastOp()) * BITS_PER_BYTE;
assert(!cast->CastOp()->isContained());
// It has to be an upcast and CNS must be in [1..srcBits) range
if ((srcBits < dstBits) && (cns->IconValue() > 0) && (cns->IconValue() < srcBits))
{
JITDUMP("Recognized ubfix/sbfix pattern in LSH(CAST, CNS). Changing op to GT_BFIZ");
shift->ChangeOper(GT_BFIZ);
MakeSrcContained(shift, cast);
}
}
}
#endif
}
void Lowering::WidenSIMD12IfNecessary(GenTreeLclVarCommon* node)
{
#ifdef FEATURE_SIMD
if (node->TypeGet() == TYP_SIMD12)
{
// Assumption 1:
// RyuJit backend depends on the assumption that on 64-Bit targets Vector3 size is rounded off
// to TARGET_POINTER_SIZE and hence Vector3 locals on stack can be treated as TYP_SIMD16 for
// reading and writing purposes.
//
// Assumption 2:
// RyuJit backend is making another implicit assumption that Vector3 type args when passed in
// registers or on stack, the upper most 4-bytes will be zero.
//
// For P/Invoke return and Reverse P/Invoke argument passing, native compiler doesn't guarantee
// that upper 4-bytes of a Vector3 type struct is zero initialized and hence assumption 2 is
// invalid.
//
// RyuJIT x64 Windows: arguments are treated as passed by ref and hence read/written just 12
// bytes. In case of Vector3 returns, Caller allocates a zero initialized Vector3 local and
// passes it retBuf arg and Callee method writes only 12 bytes to retBuf. For this reason,
// there is no need to clear upper 4-bytes of Vector3 type args.
//
// RyuJIT x64 Unix: arguments are treated as passed by value and read/writen as if TYP_SIMD16.
// Vector3 return values are returned two return registers and Caller assembles them into a
// single xmm reg. Hence RyuJIT explicitly generates code to clears upper 4-bytes of Vector3
// type args in prolog and Vector3 type return value of a call
//
// RyuJIT x86 Windows: all non-param Vector3 local vars are allocated as 16 bytes. Vector3 arguments
// are pushed as 12 bytes. For return values, a 16-byte local is allocated and the address passed
// as a return buffer pointer. The callee doesn't write the high 4 bytes, and we don't need to clear
// it either.
LclVarDsc* varDsc = comp->lvaGetDesc(node->AsLclVarCommon());
if (comp->lvaMapSimd12ToSimd16(varDsc))
{
JITDUMP("Mapping TYP_SIMD12 lclvar node to TYP_SIMD16:\n");
DISPNODE(node);
JITDUMP("============");
node->gtType = TYP_SIMD16;
}
}
#endif // FEATURE_SIMD
}
//------------------------------------------------------------------------
// LowerArrElem: Lower a GT_ARR_ELEM node
//
// Arguments:
// node - the GT_ARR_ELEM node to lower.
//
// Return Value:
// The next node to lower.
//
// Assumptions:
// pTree points to a pointer to a GT_ARR_ELEM node.
//
// Notes:
// This performs the following lowering. We start with a node of the form:
// /--* <arrObj>
// +--* <index0>
// +--* <index1>
// /--* arrMD&[,]
//
// First, we create temps for arrObj if it is not already a lclVar, and for any of the index
// expressions that have side-effects.
// We then transform the tree into:
// <offset is null - no accumulated offset for the first index>
// /--* <arrObj>
// +--* <index0>
// /--* ArrIndex[i, ]
// +--* <arrObj>
// /--| arrOffs[i, ]
// | +--* <arrObj>
// | +--* <index1>
// +--* ArrIndex[*,j]
// +--* <arrObj>
// /--| arrOffs[*,j]
// +--* lclVar NewTemp
// /--* lea (scale = element size, offset = offset of first element)
//
// The new stmtExpr may be omitted if the <arrObj> is a lclVar.
// The new stmtExpr may be embedded if the <arrObj> is not the first tree in linear order for
// the statement containing the original arrMD.
// Note that the arrMDOffs is the INDEX of the lea, but is evaluated before the BASE (which is the second
// reference to NewTemp), because that provides more accurate lifetimes.
// There may be 1, 2 or 3 dimensions, with 1, 2 or 3 arrMDIdx nodes, respectively.
//
GenTree* Lowering::LowerArrElem(GenTree* node)
{
// This will assert if we don't have an ArrElem node
GenTreeArrElem* arrElem = node->AsArrElem();
const unsigned char rank = arrElem->gtArrRank;
JITDUMP("Lowering ArrElem\n");
JITDUMP("============\n");
DISPTREERANGE(BlockRange(), arrElem);
JITDUMP("\n");
assert(arrElem->gtArrObj->TypeGet() == TYP_REF);
// We need to have the array object in a lclVar.
if (!arrElem->gtArrObj->IsLocal())
{
LIR::Use arrObjUse(BlockRange(), &arrElem->gtArrObj, arrElem);
ReplaceWithLclVar(arrObjUse);
}
GenTree* arrObjNode = arrElem->gtArrObj;
assert(arrObjNode->IsLocal());
GenTree* insertionPoint = arrElem;
// The first ArrOffs node will have 0 for the offset of the previous dimension.
GenTree* prevArrOffs = new (comp, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, 0);
BlockRange().InsertBefore(insertionPoint, prevArrOffs);
GenTree* nextToLower = prevArrOffs;
for (unsigned char dim = 0; dim < rank; dim++)
{
GenTree* indexNode = arrElem->gtArrInds[dim];
// Use the original arrObjNode on the 0th ArrIndex node, and clone it for subsequent ones.
GenTree* idxArrObjNode;
if (dim == 0)
{
idxArrObjNode = arrObjNode;
}
else
{
idxArrObjNode = comp->gtClone(arrObjNode);
BlockRange().InsertBefore(insertionPoint, idxArrObjNode);
}
// Next comes the GT_ARR_INDEX node.
GenTreeArrIndex* arrMDIdx = new (comp, GT_ARR_INDEX)
GenTreeArrIndex(TYP_INT, idxArrObjNode, indexNode, dim, rank, arrElem->gtArrElemType);
arrMDIdx->gtFlags |= ((idxArrObjNode->gtFlags | indexNode->gtFlags) & GTF_ALL_EFFECT);
BlockRange().InsertBefore(insertionPoint, arrMDIdx);
GenTree* offsArrObjNode = comp->gtClone(arrObjNode);
BlockRange().InsertBefore(insertionPoint, offsArrObjNode);
GenTreeArrOffs* arrOffs = new (comp, GT_ARR_OFFSET)
GenTreeArrOffs(TYP_I_IMPL, prevArrOffs, arrMDIdx, offsArrObjNode, dim, rank, arrElem->gtArrElemType);
arrOffs->gtFlags |= ((prevArrOffs->gtFlags | arrMDIdx->gtFlags | offsArrObjNode->gtFlags) & GTF_ALL_EFFECT);
BlockRange().InsertBefore(insertionPoint, arrOffs);
prevArrOffs = arrOffs;
}
// Generate the LEA and make it reverse evaluation, because we want to evaluate the index expression before the
// base.
unsigned scale = arrElem->gtArrElemSize;
unsigned offset = comp->eeGetMDArrayDataOffset(arrElem->gtArrRank);
GenTree* leaIndexNode = prevArrOffs;
if (!jitIsScaleIndexMul(scale))
{
// We do the address arithmetic in TYP_I_IMPL, though note that the lower bounds and lengths in memory are
// TYP_INT
GenTree* scaleNode = new (comp, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, scale);
GenTree* mulNode = new (comp, GT_MUL) GenTreeOp(GT_MUL, TYP_I_IMPL, leaIndexNode, scaleNode);
BlockRange().InsertBefore(insertionPoint, scaleNode, mulNode);
leaIndexNode = mulNode;
scale = 1;
}
GenTree* leaBase = comp->gtClone(arrObjNode);
BlockRange().InsertBefore(insertionPoint, leaBase);
GenTree* leaNode = new (comp, GT_LEA) GenTreeAddrMode(arrElem->TypeGet(), leaBase, leaIndexNode, scale, offset);
BlockRange().InsertBefore(insertionPoint, leaNode);
LIR::Use arrElemUse;
if (BlockRange().TryGetUse(arrElem, &arrElemUse))
{
arrElemUse.ReplaceWith(leaNode);
}
else
{
leaNode->SetUnusedValue();
}
BlockRange().Remove(arrElem);
JITDUMP("Results of lowering ArrElem:\n");
DISPTREERANGE(BlockRange(), leaNode);
JITDUMP("\n\n");
return nextToLower;
}
PhaseStatus Lowering::DoPhase()
{
// If we have any PInvoke calls, insert the one-time prolog code. We'll insert the epilog code in the
// appropriate spots later. NOTE: there is a minor optimization opportunity here, as we still create p/invoke
// data structures and setup/teardown even if we've eliminated all p/invoke calls due to dead code elimination.
if (comp->compMethodRequiresPInvokeFrame())
{
InsertPInvokeMethodProlog();
}
#if !defined(TARGET_64BIT)
DecomposeLongs decomp(comp); // Initialize the long decomposition class.
if (comp->compLongUsed)
{
decomp.PrepareForDecomposition();
}
#endif // !defined(TARGET_64BIT)
if (!comp->compEnregLocals())
{
// Lowering is checking if lvDoNotEnregister is already set for contained optimizations.
// If we are running without `CLFLG_REGVAR` flag set (`compEnregLocals() == false`)
// then we already know that we won't enregister any locals and it is better to set
// `lvDoNotEnregister` flag before we start reading it.
// The main reason why this flag is not set is that we are running in minOpts.
comp->lvSetMinOptsDoNotEnreg();
}
for (BasicBlock* const block : comp->Blocks())
{
/* Make the block publicly available */
comp->compCurBB = block;
#if !defined(TARGET_64BIT)
if (comp->compLongUsed)
{
decomp.DecomposeBlock(block);
}
#endif //! TARGET_64BIT
LowerBlock(block);
}
#ifdef DEBUG
JITDUMP("Lower has completed modifying nodes.\n");
if (VERBOSE)
{
comp->fgDispBasicBlocks(true);
}
#endif
// Recompute local var ref counts before potentially sorting for liveness.
// Note this does minimal work in cases where we are not going to sort.
const bool isRecompute = true;
const bool setSlotNumbers = false;
comp->lvaComputeRefCounts(isRecompute, setSlotNumbers);
comp->fgLocalVarLiveness();
// local var liveness can delete code, which may create empty blocks
if (comp->opts.OptimizationEnabled())
{
comp->optLoopsMarked = false;
bool modified = comp->fgUpdateFlowGraph();
if (modified)
{
JITDUMP("had to run another liveness pass:\n");
comp->fgLocalVarLiveness();
}
}
// Recompute local var ref counts again after liveness to reflect
// impact of any dead code removal. Note this may leave us with
// tracked vars that have zero refs.
comp->lvaComputeRefCounts(isRecompute, setSlotNumbers);
return PhaseStatus::MODIFIED_EVERYTHING;
}
#ifdef DEBUG
//------------------------------------------------------------------------
// Lowering::CheckCallArg: check that a call argument is in an expected
// form after lowering.
//
// Arguments:
// arg - the argument to check.
//
void Lowering::CheckCallArg(GenTree* arg)
{
if (!arg->IsValue() && !arg->OperIsPutArgStk())
{
assert((arg->OperIsStore() && !arg->IsValue()) || arg->IsArgPlaceHolderNode() || arg->IsNothingNode() ||
arg->OperIsCopyBlkOp());
return;
}
switch (arg->OperGet())
{
case GT_FIELD_LIST:
{
GenTreeFieldList* list = arg->AsFieldList();
assert(list->isContained());
for (GenTreeFieldList::Use& use : list->Uses())
{
assert(use.GetNode()->OperIsPutArg());
}
}
break;
default:
assert(arg->OperIsPutArg());
break;
}
}
//------------------------------------------------------------------------
// Lowering::CheckCall: check that a call is in an expected form after
// lowering. Currently this amounts to checking its
// arguments, but could be expanded to verify more
// properties in the future.
//
// Arguments:
// call - the call to check.
//
void Lowering::CheckCall(GenTreeCall* call)
{
if (call->gtCallThisArg != nullptr)
{
CheckCallArg(call->gtCallThisArg->GetNode());
}
for (GenTreeCall::Use& use : call->Args())
{
CheckCallArg(use.GetNode());
}
for (GenTreeCall::Use& use : call->LateArgs())
{
CheckCallArg(use.GetNode());
}
}
//------------------------------------------------------------------------
// Lowering::CheckNode: check that an LIR node is in an expected form
// after lowering.
//
// Arguments:
// compiler - the compiler context.
// node - the node to check.
//
void Lowering::CheckNode(Compiler* compiler, GenTree* node)
{
switch (node->OperGet())
{
case GT_CALL:
CheckCall(node->AsCall());
break;
#ifdef FEATURE_SIMD
case GT_SIMD:
case GT_HWINTRINSIC:
assert(node->TypeGet() != TYP_SIMD12);
break;
#endif // FEATURE_SIMD
case GT_LCL_VAR:
case GT_STORE_LCL_VAR:
{
const LclVarDsc* varDsc = compiler->lvaGetDesc(node->AsLclVar());
#if defined(FEATURE_SIMD) && defined(TARGET_64BIT)
if (node->TypeIs(TYP_SIMD12))
{
assert(compiler->lvaIsFieldOfDependentlyPromotedStruct(varDsc) || (varDsc->lvSize() == 12));
}
#endif // FEATURE_SIMD && TARGET_64BIT
if (varDsc->lvPromoted)
{
assert(varDsc->lvDoNotEnregister || varDsc->lvIsMultiRegRet);
}
}
break;
case GT_LCL_VAR_ADDR:
case GT_LCL_FLD_ADDR:
{
const GenTreeLclVarCommon* lclVarAddr = node->AsLclVarCommon();
const LclVarDsc* varDsc = compiler->lvaGetDesc(lclVarAddr);
if (((lclVarAddr->gtFlags & GTF_VAR_DEF) != 0) && varDsc->HasGCPtr())
{
// Emitter does not correctly handle live updates for LCL_VAR_ADDR
// when they are not contained, for example, `STOREIND byref(GT_LCL_VAR_ADDR not-contained)`
// would generate:
// add r1, sp, 48 // r1 contains address of a lclVar V01.
// str r0, [r1] // a gc ref becomes live in V01, but emitter would not report it.
// Make sure that we use uncontained address nodes only for variables
// that will be marked as mustInit and will be alive throughout the whole block even when tracked.
assert(lclVarAddr->isContained() || !varDsc->lvTracked || varTypeIsStruct(varDsc));
// TODO: support this assert for uses, see https://github.com/dotnet/runtime/issues/51900.
}
assert(varDsc->lvDoNotEnregister);
break;
}
case GT_PHI:
case GT_PHI_ARG:
assert(!"Should not see phi nodes after rationalize");
break;
case GT_LCL_FLD:
case GT_STORE_LCL_FLD:
{
const LclVarDsc* varDsc = compiler->lvaGetDesc(node->AsLclFld());
assert(varDsc->lvDoNotEnregister);
}
break;
default:
break;
}
}
//------------------------------------------------------------------------
// Lowering::CheckBlock: check that the contents of an LIR block are in an
// expected form after lowering.
//
// Arguments:
// compiler - the compiler context.
// block - the block to check.
//
bool Lowering::CheckBlock(Compiler* compiler, BasicBlock* block)
{
assert(block->isEmpty() || block->IsLIR());
LIR::Range& blockRange = LIR::AsRange(block);
for (GenTree* node : blockRange)
{
CheckNode(compiler, node);
}
assert(blockRange.CheckLIR(compiler, true));
return true;
}
#endif
//------------------------------------------------------------------------
// Lowering::LowerBlock: Lower all the nodes in a BasicBlock
//
// Arguments:
// block - the block to lower.
//
void Lowering::LowerBlock(BasicBlock* block)
{
assert(block == comp->compCurBB); // compCurBB must already be set.
assert(block->isEmpty() || block->IsLIR());
m_block = block;
// NOTE: some of the lowering methods insert calls before the node being
// lowered (See e.g. InsertPInvoke{Method,Call}{Prolog,Epilog}). In
// general, any code that is inserted before the current node should be
// "pre-lowered" as they won't be subject to further processing.
// Lowering::CheckBlock() runs some extra checks on call arguments in
// order to help catch unlowered nodes.
GenTree* node = BlockRange().FirstNode();
while (node != nullptr)
{
node = LowerNode(node);
}
assert(CheckBlock(comp, block));
}
/** Verifies if both of these trees represent the same indirection.
* Used by Lower to annotate if CodeGen generate an instruction of the
* form *addrMode BinOp= expr
*
* Preconditions: both trees are children of GT_INDs and their underlying children
* have the same gtOper.
*
* This is a first iteration to actually recognize trees that can be code-generated
* as a single read-modify-write instruction on AMD64/x86. For now
* this method only supports the recognition of simple addressing modes (through GT_LEA)
* or local var indirections. Local fields, array access and other more complex nodes are
* not yet supported.
*
* TODO-CQ: Perform tree recognition by using the Value Numbering Package, that way we can recognize
* arbitrary complex trees and support much more addressing patterns.
*/
bool Lowering::IndirsAreEquivalent(GenTree* candidate, GenTree* storeInd)
{
assert(candidate->OperGet() == GT_IND);
assert(storeInd->OperGet() == GT_STOREIND);
// We should check the size of the indirections. If they are
// different, say because of a cast, then we can't call them equivalent. Doing so could cause us
// to drop a cast.
// Signed-ness difference is okay and expected since a store indirection must always
// be signed based on the CIL spec, but a load could be unsigned.
if (genTypeSize(candidate->gtType) != genTypeSize(storeInd->gtType))
{
return false;
}
GenTree* pTreeA = candidate->gtGetOp1();
GenTree* pTreeB = storeInd->gtGetOp1();
// This method will be called by codegen (as well as during lowering).
// After register allocation, the sources may have been spilled and reloaded
// to a different register, indicated by an inserted GT_RELOAD node.
pTreeA = pTreeA->gtSkipReloadOrCopy();
pTreeB = pTreeB->gtSkipReloadOrCopy();
genTreeOps oper;
if (pTreeA->OperGet() != pTreeB->OperGet())
{
return false;
}
oper = pTreeA->OperGet();
switch (oper)
{
case GT_LCL_VAR:
case GT_LCL_VAR_ADDR:
case GT_CLS_VAR_ADDR:
case GT_CNS_INT:
return NodesAreEquivalentLeaves(pTreeA, pTreeB);
case GT_LEA:
{
GenTreeAddrMode* gtAddr1 = pTreeA->AsAddrMode();
GenTreeAddrMode* gtAddr2 = pTreeB->AsAddrMode();
return NodesAreEquivalentLeaves(gtAddr1->Base(), gtAddr2->Base()) &&
NodesAreEquivalentLeaves(gtAddr1->Index(), gtAddr2->Index()) &&
(gtAddr1->gtScale == gtAddr2->gtScale) && (gtAddr1->Offset() == gtAddr2->Offset());
}
default:
// We don't handle anything that is not either a constant,
// a local var or LEA.
return false;
}
}
//------------------------------------------------------------------------
// NodesAreEquivalentLeaves: Check whether the two given nodes are the same leaves.
//
// Arguments:
// tree1 and tree2 are nodes to be checked.
// Return Value:
// Returns true if they are same leaves, false otherwise.
//
// static
bool Lowering::NodesAreEquivalentLeaves(GenTree* tree1, GenTree* tree2)
{
if (tree1 == tree2)
{
return true;
}
if (tree1 == nullptr || tree2 == nullptr)
{
return false;
}
tree1 = tree1->gtSkipReloadOrCopy();
tree2 = tree2->gtSkipReloadOrCopy();
if (tree1->TypeGet() != tree2->TypeGet())
{
return false;
}
if (tree1->OperGet() != tree2->OperGet())
{
return false;
}
if (!tree1->OperIsLeaf() || !tree2->OperIsLeaf())
{
return false;
}
switch (tree1->OperGet())
{
case GT_CNS_INT:
return tree1->AsIntCon()->IconValue() == tree2->AsIntCon()->IconValue() &&
tree1->IsIconHandle() == tree2->IsIconHandle();
case GT_LCL_VAR:
case GT_LCL_VAR_ADDR:
return tree1->AsLclVarCommon()->GetLclNum() == tree2->AsLclVarCommon()->GetLclNum();
case GT_CLS_VAR_ADDR:
return tree1->AsClsVar()->gtClsVarHnd == tree2->AsClsVar()->gtClsVarHnd;
default:
return false;
}
}
//------------------------------------------------------------------------
// Lowering::CheckMultiRegLclVar: Check whether a MultiReg GT_LCL_VAR node can
// remain a multi-reg.
//
// Arguments:
// lclNode - the GT_LCL_VAR or GT_STORE_LCL_VAR node.
// retTypeDesc - a return type descriptor either for a call source of a store of
// the local, or for the GT_RETURN consumer of the local.
//
// Notes:
// If retTypeDesc is non-null, this method will check that the fields are compatible.
// Otherwise, it will only check that the lclVar is independently promoted
// (i.e. it is marked lvPromoted and not lvDoNotEnregister).
//
bool Lowering::CheckMultiRegLclVar(GenTreeLclVar* lclNode, const ReturnTypeDesc* retTypeDesc)
{
bool canEnregister = false;
#if FEATURE_MULTIREG_RET
LclVarDsc* varDsc = comp->lvaGetDesc(lclNode->GetLclNum());
if ((comp->lvaEnregMultiRegVars) && varDsc->lvPromoted)
{
// We can enregister if we have a promoted struct and all the fields' types match the ABI requirements.
// Note that we don't promote structs with explicit layout, so we don't need to check field offsets, and
// if we have multiple types packed into a single register, we won't have matching reg and field counts,
// so we can tolerate mismatches of integer size.
if (varDsc->lvPromoted && (comp->lvaGetPromotionType(varDsc) == Compiler::PROMOTION_TYPE_INDEPENDENT))
{
// If we have no retTypeDesc, we only care that it is independently promoted.
if (retTypeDesc == nullptr)
{
canEnregister = true;
}
else
{
unsigned regCount = retTypeDesc->GetReturnRegCount();
if (regCount == varDsc->lvFieldCnt)
{
canEnregister = true;
}
}
}
}
#ifdef TARGET_XARCH
// For local stores on XARCH we only handle mismatched src/dest register count for
// calls of SIMD type. If the source was another lclVar similarly promoted, we would
// have broken it into multiple stores.
if (lclNode->OperIs(GT_STORE_LCL_VAR) && !lclNode->gtGetOp1()->OperIs(GT_CALL))
{
canEnregister = false;
}
#endif // TARGET_XARCH
if (canEnregister)
{
lclNode->SetMultiReg();
}
else
{
lclNode->ClearMultiReg();
if (varDsc->lvPromoted && !varDsc->lvDoNotEnregister)
{
comp->lvaSetVarDoNotEnregister(lclNode->GetLclNum() DEBUGARG(DoNotEnregisterReason::BlockOp));
}
}
#endif
return canEnregister;
}
//------------------------------------------------------------------------
// Containment Analysis
//------------------------------------------------------------------------
void Lowering::ContainCheckNode(GenTree* node)
{
switch (node->gtOper)
{
case GT_STORE_LCL_VAR:
case GT_STORE_LCL_FLD:
ContainCheckStoreLoc(node->AsLclVarCommon());
break;
case GT_EQ:
case GT_NE:
case GT_LT:
case GT_LE:
case GT_GE:
case GT_GT:
case GT_TEST_EQ:
case GT_TEST_NE:
case GT_CMP:
case GT_JCMP:
ContainCheckCompare(node->AsOp());
break;
case GT_JTRUE:
ContainCheckJTrue(node->AsOp());
break;
case GT_ADD:
case GT_SUB:
#if !defined(TARGET_64BIT)
case GT_ADD_LO:
case GT_ADD_HI:
case GT_SUB_LO:
case GT_SUB_HI:
#endif
case GT_AND:
case GT_OR:
case GT_XOR:
ContainCheckBinary(node->AsOp());
break;
#if defined(TARGET_X86)
case GT_MUL_LONG:
#endif
case GT_MUL:
case GT_MULHI:
ContainCheckMul(node->AsOp());
break;
case GT_DIV:
case GT_MOD:
case GT_UDIV:
case GT_UMOD:
ContainCheckDivOrMod(node->AsOp());
break;
case GT_LSH:
case GT_RSH:
case GT_RSZ:
case GT_ROL:
case GT_ROR:
#ifndef TARGET_64BIT
case GT_LSH_HI:
case GT_RSH_LO:
#endif
ContainCheckShiftRotate(node->AsOp());
break;
case GT_ARR_OFFSET:
ContainCheckArrOffset(node->AsArrOffs());
break;
case GT_LCLHEAP:
ContainCheckLclHeap(node->AsOp());
break;
case GT_RETURN:
ContainCheckRet(node->AsOp());
break;
case GT_RETURNTRAP:
ContainCheckReturnTrap(node->AsOp());
break;
case GT_STOREIND:
ContainCheckStoreIndir(node->AsStoreInd());
break;
case GT_IND:
ContainCheckIndir(node->AsIndir());
break;
case GT_PUTARG_REG:
case GT_PUTARG_STK:
#if FEATURE_ARG_SPLIT
case GT_PUTARG_SPLIT:
#endif // FEATURE_ARG_SPLIT
// The regNum must have been set by the lowering of the call.
assert(node->GetRegNum() != REG_NA);
break;
#ifdef TARGET_XARCH
case GT_INTRINSIC:
ContainCheckIntrinsic(node->AsOp());
break;
#endif // TARGET_XARCH
#ifdef FEATURE_SIMD
case GT_SIMD:
ContainCheckSIMD(node->AsSIMD());
break;
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
ContainCheckHWIntrinsic(node->AsHWIntrinsic());
break;
#endif // FEATURE_HW_INTRINSICS
default:
break;
}
}
//------------------------------------------------------------------------
// ContainCheckReturnTrap: determine whether the source of a RETURNTRAP should be contained.
//
// Arguments:
// node - pointer to the GT_RETURNTRAP node
//
void Lowering::ContainCheckReturnTrap(GenTreeOp* node)
{
#ifdef TARGET_XARCH
assert(node->OperIs(GT_RETURNTRAP));
// This just turns into a compare of its child with an int + a conditional call
if (node->gtOp1->isIndir())
{
MakeSrcContained(node, node->gtOp1);
}
#endif // TARGET_XARCH
}
//------------------------------------------------------------------------
// ContainCheckArrOffset: determine whether the source of an ARR_OFFSET should be contained.
//
// Arguments:
// node - pointer to the GT_ARR_OFFSET node
//
void Lowering::ContainCheckArrOffset(GenTreeArrOffs* node)
{
assert(node->OperIs(GT_ARR_OFFSET));
// we don't want to generate code for this
if (node->gtOffset->IsIntegralConst(0))
{
MakeSrcContained(node, node->AsArrOffs()->gtOffset);
}
}
//------------------------------------------------------------------------
// ContainCheckLclHeap: determine whether the source of a GT_LCLHEAP node should be contained.
//
// Arguments:
// node - pointer to the node
//
void Lowering::ContainCheckLclHeap(GenTreeOp* node)
{
assert(node->OperIs(GT_LCLHEAP));
GenTree* size = node->AsOp()->gtOp1;
if (size->IsCnsIntOrI())
{
MakeSrcContained(node, size);
}
}
//------------------------------------------------------------------------
// ContainCheckRet: determine whether the source of a node should be contained.
//
// Arguments:
// node - pointer to the node
//
void Lowering::ContainCheckRet(GenTreeUnOp* ret)
{
assert(ret->OperIs(GT_RETURN));
#if !defined(TARGET_64BIT)
if (ret->TypeGet() == TYP_LONG)
{
GenTree* op1 = ret->gtGetOp1();
noway_assert(op1->OperGet() == GT_LONG);
MakeSrcContained(ret, op1);
}
#endif // !defined(TARGET_64BIT)
#if FEATURE_MULTIREG_RET
if (ret->TypeIs(TYP_STRUCT))
{
GenTree* op1 = ret->gtGetOp1();
// op1 must be either a lclvar or a multi-reg returning call
if (op1->OperGet() == GT_LCL_VAR)
{
const LclVarDsc* varDsc = comp->lvaGetDesc(op1->AsLclVarCommon());
// This must be a multi-reg return or an HFA of a single element.
assert(varDsc->lvIsMultiRegRet || (varDsc->lvIsHfa() && varTypeIsValidHfaType(varDsc->lvType)));
// Mark var as contained if not enregisterable.
if (!varDsc->IsEnregisterableLcl())
{
if (!op1->IsMultiRegLclVar())
{
MakeSrcContained(ret, op1);
}
}
}
}
#endif // FEATURE_MULTIREG_RET
}
//------------------------------------------------------------------------
// ContainCheckJTrue: determine whether the source of a JTRUE should be contained.
//
// Arguments:
// node - pointer to the node
//
void Lowering::ContainCheckJTrue(GenTreeOp* node)
{
// The compare does not need to be generated into a register.
GenTree* cmp = node->gtGetOp1();
cmp->gtType = TYP_VOID;
cmp->gtFlags |= GTF_SET_FLAGS;
}
//------------------------------------------------------------------------
// ContainCheckBitCast: determine whether the source of a BITCAST should be contained.
//
// Arguments:
// node - pointer to the node
//
void Lowering::ContainCheckBitCast(GenTree* node)
{
GenTree* const op1 = node->AsOp()->gtOp1;
if (op1->isMemoryOp())
{
op1->SetContained();
}
else if (op1->OperIs(GT_LCL_VAR))
{
if (!m_lsra->willEnregisterLocalVars())
{
op1->SetContained();
}
const LclVarDsc* varDsc = comp->lvaGetDesc(op1->AsLclVar());
// TODO-Cleanup: we want to check if the local is already known not
// to be on reg, for example, because local enreg is disabled.
if (varDsc->lvDoNotEnregister)
{
op1->SetContained();
}
else
{
op1->SetRegOptional();
}
}
else if (op1->IsLocal())
{
op1->SetContained();
}
}
//------------------------------------------------------------------------
// LowerStoreIndirCommon: a common logic to lower StoreIndir.
//
// Arguments:
// ind - the store indirection node we are lowering.
//
void Lowering::LowerStoreIndirCommon(GenTreeStoreInd* ind)
{
assert(ind->TypeGet() != TYP_STRUCT);
#if defined(TARGET_ARM64)
// Verify containment safety before creating an LEA that must be contained.
//
const bool isContainable = IsSafeToContainMem(ind, ind->Addr());
#else
const bool isContainable = true;
#endif
TryCreateAddrMode(ind->Addr(), isContainable, ind);
if (!comp->codeGen->gcInfo.gcIsWriteBarrierStoreIndNode(ind))
{
if (varTypeIsFloating(ind) && ind->Data()->IsCnsFltOrDbl())
{
// Optimize *x = DCON to *x = ICON which can be slightly faster and/or smaller.
GenTree* data = ind->Data();
double dblCns = data->AsDblCon()->gtDconVal;
ssize_t intCns = 0;
var_types type = TYP_UNKNOWN;
// XARCH: we can always contain the immediates.
// ARM64: zero can always be contained, other cases will use immediates from the data
// section and it is not a clear win to switch them to inline integers.
// ARM: FP constants are assembled from integral ones, so it is always profitable
// to directly use the integers as it avoids the int -> float conversion.
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_XARCH) || defined(TARGET_ARM)
bool shouldSwitchToInteger = true;
#else // TARGET_ARM64
bool shouldSwitchToInteger = !data->IsCnsNonZeroFltOrDbl();
#endif
if (shouldSwitchToInteger)
{
if (ind->TypeIs(TYP_FLOAT))
{
float fltCns = static_cast<float>(dblCns); // should be a safe round-trip
intCns = static_cast<ssize_t>(*reinterpret_cast<INT32*>(&fltCns));
type = TYP_INT;
}
#ifdef TARGET_64BIT
else
{
assert(ind->TypeIs(TYP_DOUBLE));
intCns = static_cast<ssize_t>(*reinterpret_cast<INT64*>(&dblCns));
type = TYP_LONG;
}
#endif
}
if (type != TYP_UNKNOWN)
{
data->BashToConst(intCns, type);
ind->ChangeType(type);
}
}
LowerStoreIndir(ind);
}
}
//------------------------------------------------------------------------
// LowerIndir: a common logic to lower IND load or NullCheck.
//
// Arguments:
// ind - the ind node we are lowering.
//
void Lowering::LowerIndir(GenTreeIndir* ind)
{
assert(ind->OperIs(GT_IND, GT_NULLCHECK));
// Process struct typed indirs separately unless they are unused;
// they only appear as the source of a block copy operation or a return node.
if (!ind->TypeIs(TYP_STRUCT) || ind->IsUnusedValue())
{
// TODO-Cleanup: We're passing isContainable = true but ContainCheckIndir rejects
// address containment in some cases so we end up creating trivial (reg + offfset)
// or (reg + reg) LEAs that are not necessary.
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_ARM64)
// Verify containment safety before creating an LEA that must be contained.
//
const bool isContainable = IsSafeToContainMem(ind, ind->Addr());
#else
const bool isContainable = true;
#endif
TryCreateAddrMode(ind->Addr(), isContainable, ind);
ContainCheckIndir(ind);
if (ind->OperIs(GT_NULLCHECK) || ind->IsUnusedValue())
{
TransformUnusedIndirection(ind, comp, m_block);
}
}
else
{
// If the `ADDR` node under `STORE_OBJ(dstAddr, IND(struct(ADDR))`
// is a complex one it could benefit from an `LEA` that is not contained.
const bool isContainable = false;
TryCreateAddrMode(ind->Addr(), isContainable, ind);
}
}
//------------------------------------------------------------------------
// TransformUnusedIndirection: change the opcode and the type of the unused indirection.
//
// Arguments:
// ind - Indirection to transform.
// comp - Compiler instance.
// block - Basic block of the indirection.
//
void Lowering::TransformUnusedIndirection(GenTreeIndir* ind, Compiler* comp, BasicBlock* block)
{
// A nullcheck is essentially the same as an indirection with no use.
// The difference lies in whether a target register must be allocated.
// On XARCH we can generate a compare with no target register as long as the address
// is not contained.
// On ARM64 we can generate a load to REG_ZR in all cases.
// However, on ARM we must always generate a load to a register.
// In the case where we require a target register, it is better to use GT_IND, since
// GT_NULLCHECK is a non-value node and would therefore require an internal register
// to use as the target. That is non-optimal because it will be modeled as conflicting
// with the source register(s).
// So, to summarize:
// - On ARM64, always use GT_NULLCHECK for a dead indirection.
// - On ARM, always use GT_IND.
// - On XARCH, use GT_IND if we have a contained address, and GT_NULLCHECK otherwise.
// In all cases we try to preserve the original type and never make it wider to avoid AVEs.
// For structs we conservatively lower it to BYTE. For 8-byte primitives we lower it to TYP_INT
// on XARCH as an optimization.
//
assert(ind->OperIs(GT_NULLCHECK, GT_IND, GT_BLK, GT_OBJ));
ind->ChangeType(comp->gtTypeForNullCheck(ind));
#ifdef TARGET_ARM64
bool useNullCheck = true;
#elif TARGET_ARM
bool useNullCheck = false;
#else // TARGET_XARCH
bool useNullCheck = !ind->Addr()->isContained();
#endif // !TARGET_XARCH
if (useNullCheck && !ind->OperIs(GT_NULLCHECK))
{
comp->gtChangeOperToNullCheck(ind, block);
ind->ClearUnusedValue();
}
else if (!useNullCheck && !ind->OperIs(GT_IND))
{
ind->ChangeOper(GT_IND);
ind->SetUnusedValue();
}
}
//------------------------------------------------------------------------
// LowerBlockStoreCommon: a common logic to lower STORE_OBJ/BLK/DYN_BLK.
//
// Arguments:
// blkNode - the store blk/obj node we are lowering.
//
void Lowering::LowerBlockStoreCommon(GenTreeBlk* blkNode)
{
assert(blkNode->OperIs(GT_STORE_BLK, GT_STORE_DYN_BLK, GT_STORE_OBJ));
// Lose the type information stored in the source - we no longer need it.
if (blkNode->Data()->OperIs(GT_OBJ, GT_BLK))
{
blkNode->Data()->SetOper(GT_IND);
LowerIndir(blkNode->Data()->AsIndir());
}
if (TryTransformStoreObjAsStoreInd(blkNode))
{
return;
}
LowerBlockStore(blkNode);
}
//------------------------------------------------------------------------
// TryTransformStoreObjAsStoreInd: try to replace STORE_OBJ/BLK as STOREIND.
//
// Arguments:
// blkNode - the store node.
//
// Return value:
// true if the replacement was made, false otherwise.
//
// Notes:
// TODO-CQ: this method should do the transformation when possible
// and STOREIND should always generate better or the same code as
// STORE_OBJ/BLK for the same copy.
//
bool Lowering::TryTransformStoreObjAsStoreInd(GenTreeBlk* blkNode)
{
assert(blkNode->OperIs(GT_STORE_BLK, GT_STORE_DYN_BLK, GT_STORE_OBJ));
if (!comp->opts.OptimizationEnabled())
{
return false;
}
if (blkNode->OperIs(GT_STORE_DYN_BLK))
{
return false;
}
ClassLayout* layout = blkNode->GetLayout();
if (layout == nullptr)
{
return false;
}
var_types regType = layout->GetRegisterType();
if (regType == TYP_UNDEF)
{
return false;
}
GenTree* src = blkNode->Data();
if (varTypeIsSIMD(regType) && src->IsConstInitVal())
{
// TODO-CQ: support STORE_IND SIMD16(SIMD16, CNT_INT 0).
return false;
}
if (varTypeIsGC(regType))
{
// TODO-CQ: STOREIND does not try to contain src if we need a barrier,
// STORE_OBJ generates better code currently.
return false;
}
if (src->OperIsInitVal() && !src->IsConstInitVal())
{
return false;
}
if (varTypeIsSmall(regType) && !src->IsConstInitVal() && !src->IsLocal())
{
// source operand INDIR will use a widening instruction
// and generate worse code, like `movzx` instead of `mov`
// on x64.
return false;
}
JITDUMP("Replacing STORE_OBJ with STOREIND for [%06u]\n", blkNode->gtTreeID);
blkNode->ChangeOper(GT_STOREIND);
blkNode->ChangeType(regType);
if ((blkNode->gtFlags & GTF_IND_TGT_NOT_HEAP) == 0)
{
blkNode->gtFlags |= GTF_IND_TGTANYWHERE;
}
if (varTypeIsStruct(src))
{
src->ChangeType(regType);
LowerNode(blkNode->Data());
}
else if (src->OperIsInitVal())
{
GenTreeUnOp* initVal = src->AsUnOp();
src = src->gtGetOp1();
assert(src->IsCnsIntOrI());
src->AsIntCon()->FixupInitBlkValue(regType);
blkNode->SetData(src);
BlockRange().Remove(initVal);
}
else
{
assert(src->TypeIs(regType) || src->IsCnsIntOrI() || src->IsCall());
}
LowerStoreIndirCommon(blkNode->AsStoreInd());
return true;
}
#ifdef FEATURE_SIMD
//----------------------------------------------------------------------------------------------
// Lowering::LowerSIMD: Perform containment analysis for a SIMD intrinsic node.
//
// Arguments:
// simdNode - The SIMD intrinsic node.
//
void Lowering::LowerSIMD(GenTreeSIMD* simdNode)
{
if (simdNode->TypeGet() == TYP_SIMD12)
{
// GT_SIMD node requiring to produce TYP_SIMD12 in fact
// produces a TYP_SIMD16 result
simdNode->gtType = TYP_SIMD16;
}
if (simdNode->GetSIMDIntrinsicId() == SIMDIntrinsicInitN)
{
assert(simdNode->GetSimdBaseType() == TYP_FLOAT);
size_t argCount = simdNode->GetOperandCount();
size_t constArgCount = 0;
float constArgValues[4]{0, 0, 0, 0};
for (GenTree* arg : simdNode->Operands())
{
assert(arg->TypeIs(simdNode->GetSimdBaseType()));
if (arg->IsCnsFltOrDbl())
{
constArgValues[constArgCount] = static_cast<float>(arg->AsDblCon()->gtDconVal);
constArgCount++;
}
}
if (constArgCount == argCount)
{
for (GenTree* arg : simdNode->Operands())
{
BlockRange().Remove(arg);
}
assert(sizeof(constArgValues) == 16);
unsigned cnsSize = sizeof(constArgValues);
unsigned cnsAlign = (comp->compCodeOpt() != Compiler::SMALL_CODE) ? cnsSize : 1;
CORINFO_FIELD_HANDLE hnd =
comp->GetEmitter()->emitBlkConst(constArgValues, cnsSize, cnsAlign, simdNode->GetSimdBaseType());
GenTree* clsVarAddr = new (comp, GT_CLS_VAR_ADDR) GenTreeClsVar(GT_CLS_VAR_ADDR, TYP_I_IMPL, hnd, nullptr);
BlockRange().InsertBefore(simdNode, clsVarAddr);
simdNode->ChangeOper(GT_IND);
simdNode->AsOp()->gtOp1 = clsVarAddr;
ContainCheckIndir(simdNode->AsIndir());
return;
}
}
ContainCheckSIMD(simdNode);
}
#endif // FEATURE_SIMD
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Lower XX
XX XX
XX Preconditions: XX
XX XX
XX Postconditions (for the nodes currently handled): XX
XX - All operands requiring a register are explicit in the graph XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#include "lower.h"
#if !defined(TARGET_64BIT)
#include "decomposelongs.h"
#endif // !defined(TARGET_64BIT)
//------------------------------------------------------------------------
// MakeSrcContained: Make "childNode" a contained node
//
// Arguments:
// parentNode - is a non-leaf node that can contain its 'childNode'
// childNode - is an op that will now be contained by its parent.
//
// Notes:
// If 'childNode' it has any existing sources, they will now be sources for the parent.
//
void Lowering::MakeSrcContained(GenTree* parentNode, GenTree* childNode) const
{
assert(!parentNode->OperIsLeaf());
assert(childNode->canBeContained());
childNode->SetContained();
assert(childNode->isContained());
#ifdef DEBUG
if (IsContainableMemoryOp(childNode))
{
// Verify caller of this method checked safety.
//
const bool isSafeToContainMem = IsSafeToContainMem(parentNode, childNode);
if (!isSafeToContainMem)
{
JITDUMP("** Unsafe mem containment of [%06u] in [%06u}, comp->dspTreeID(childNode), "
"comp->dspTreeID(parentNode)\n");
assert(isSafeToContainMem);
}
}
#endif
}
//------------------------------------------------------------------------
// CheckImmedAndMakeContained: Checks if the 'childNode' is a containable immediate
// and, if so, makes it contained.
//
// Arguments:
// parentNode - is any non-leaf node
// childNode - is an child op of 'parentNode'
//
// Return value:
// true if we are able to make childNode a contained immediate
//
bool Lowering::CheckImmedAndMakeContained(GenTree* parentNode, GenTree* childNode)
{
assert(!parentNode->OperIsLeaf());
// If childNode is a containable immediate
if (IsContainableImmed(parentNode, childNode))
{
// then make it contained within the parentNode
MakeSrcContained(parentNode, childNode);
return true;
}
return false;
}
//------------------------------------------------------------------------
// IsSafeToContainMem: Checks for conflicts between childNode and parentNode,
// and returns 'true' iff memory operand childNode can be contained in parentNode.
//
// Arguments:
// parentNode - any non-leaf node
// childNode - some node that is an input to `parentNode`
//
// Return value:
// true if it is safe to make childNode a contained memory operand.
//
bool Lowering::IsSafeToContainMem(GenTree* parentNode, GenTree* childNode) const
{
// Quick early-out for unary cases
//
if (childNode->gtNext == parentNode)
{
return true;
}
m_scratchSideEffects.Clear();
m_scratchSideEffects.AddNode(comp, childNode);
for (GenTree* node = childNode->gtNext; node != parentNode; node = node->gtNext)
{
const bool strict = true;
if (m_scratchSideEffects.InterferesWith(comp, node, strict))
{
return false;
}
}
return true;
}
//------------------------------------------------------------------------
// IsSafeToContainMem: Checks for conflicts between childNode and grandParentNode
// and returns 'true' iff memory operand childNode can be contained in ancestorNode
//
// Arguments:
// grandParentNode - any non-leaf node
// parentNode - parent of `childNode` and an input to `grandParentNode`
// childNode - some node that is an input to `parentNode`
//
// Return value:
// true if it is safe to make childNode a contained memory operand.
//
bool Lowering::IsSafeToContainMem(GenTree* grandparentNode, GenTree* parentNode, GenTree* childNode) const
{
m_scratchSideEffects.Clear();
m_scratchSideEffects.AddNode(comp, childNode);
for (GenTree* node = childNode->gtNext; node != grandparentNode; node = node->gtNext)
{
if (node == parentNode)
{
continue;
}
const bool strict = true;
if (m_scratchSideEffects.InterferesWith(comp, node, strict))
{
return false;
}
}
return true;
}
//------------------------------------------------------------------------
// LowerNode: this is the main entry point for Lowering.
//
// Arguments:
// node - the node we are lowering.
//
// Returns:
// next node in the transformed node sequence that needs to be lowered.
//
GenTree* Lowering::LowerNode(GenTree* node)
{
assert(node != nullptr);
switch (node->gtOper)
{
case GT_NULLCHECK:
case GT_IND:
LowerIndir(node->AsIndir());
break;
case GT_STOREIND:
LowerStoreIndirCommon(node->AsStoreInd());
break;
case GT_ADD:
{
GenTree* next = LowerAdd(node->AsOp());
if (next != nullptr)
{
return next;
}
}
break;
#if !defined(TARGET_64BIT)
case GT_ADD_LO:
case GT_ADD_HI:
case GT_SUB_LO:
case GT_SUB_HI:
#endif
case GT_SUB:
case GT_AND:
case GT_OR:
case GT_XOR:
return LowerBinaryArithmetic(node->AsOp());
case GT_MUL:
case GT_MULHI:
#if defined(TARGET_X86) || defined(TARGET_ARM64)
case GT_MUL_LONG:
#endif
return LowerMul(node->AsOp());
case GT_UDIV:
case GT_UMOD:
if (!LowerUnsignedDivOrMod(node->AsOp()))
{
ContainCheckDivOrMod(node->AsOp());
}
break;
case GT_DIV:
case GT_MOD:
return LowerSignedDivOrMod(node);
case GT_SWITCH:
return LowerSwitch(node);
case GT_CALL:
LowerCall(node);
break;
case GT_LT:
case GT_LE:
case GT_GT:
case GT_GE:
case GT_EQ:
case GT_NE:
case GT_TEST_EQ:
case GT_TEST_NE:
case GT_CMP:
return LowerCompare(node);
case GT_JTRUE:
return LowerJTrue(node->AsOp());
case GT_JMP:
LowerJmpMethod(node);
break;
case GT_RETURN:
LowerRet(node->AsUnOp());
break;
case GT_RETURNTRAP:
ContainCheckReturnTrap(node->AsOp());
break;
case GT_CAST:
LowerCast(node);
break;
#if defined(TARGET_XARCH) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)
case GT_BOUNDS_CHECK:
ContainCheckBoundsChk(node->AsBoundsChk());
break;
#endif // TARGET_XARCH
case GT_ARR_ELEM:
return LowerArrElem(node);
case GT_ARR_OFFSET:
ContainCheckArrOffset(node->AsArrOffs());
break;
case GT_ROL:
case GT_ROR:
LowerRotate(node);
break;
#ifndef TARGET_64BIT
case GT_LSH_HI:
case GT_RSH_LO:
ContainCheckShiftRotate(node->AsOp());
break;
#endif // !TARGET_64BIT
case GT_LSH:
case GT_RSH:
case GT_RSZ:
#if defined(TARGET_XARCH) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)
LowerShift(node->AsOp());
#else
ContainCheckShiftRotate(node->AsOp());
#endif
break;
case GT_STORE_BLK:
case GT_STORE_OBJ:
if (node->AsBlk()->Data()->IsCall())
{
LowerStoreSingleRegCallStruct(node->AsBlk());
break;
}
FALLTHROUGH;
case GT_STORE_DYN_BLK:
LowerBlockStoreCommon(node->AsBlk());
break;
case GT_LCLHEAP:
ContainCheckLclHeap(node->AsOp());
break;
#ifdef TARGET_XARCH
case GT_INTRINSIC:
ContainCheckIntrinsic(node->AsOp());
break;
#endif // TARGET_XARCH
#ifdef FEATURE_SIMD
case GT_SIMD:
LowerSIMD(node->AsSIMD());
break;
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
LowerHWIntrinsic(node->AsHWIntrinsic());
break;
#endif // FEATURE_HW_INTRINSICS
case GT_LCL_FLD:
{
// We should only encounter this for lclVars that are lvDoNotEnregister.
verifyLclFldDoNotEnregister(node->AsLclVarCommon()->GetLclNum());
break;
}
case GT_LCL_VAR:
{
GenTreeLclVar* lclNode = node->AsLclVar();
WidenSIMD12IfNecessary(lclNode);
LclVarDsc* varDsc = comp->lvaGetDesc(lclNode);
// The consumer of this node must check compatibility of the fields.
// This merely checks whether it is possible for this to be a multireg node.
if (lclNode->IsMultiRegLclVar())
{
if (!varDsc->lvPromoted ||
(comp->lvaGetPromotionType(varDsc) != Compiler::PROMOTION_TYPE_INDEPENDENT) ||
(varDsc->lvFieldCnt > MAX_MULTIREG_COUNT))
{
lclNode->ClearMultiReg();
if (lclNode->TypeIs(TYP_STRUCT))
{
comp->lvaSetVarDoNotEnregister(lclNode->GetLclNum() DEBUGARG(DoNotEnregisterReason::BlockOp));
}
}
}
break;
}
case GT_STORE_LCL_VAR:
WidenSIMD12IfNecessary(node->AsLclVarCommon());
FALLTHROUGH;
case GT_STORE_LCL_FLD:
LowerStoreLocCommon(node->AsLclVarCommon());
break;
#if defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)
case GT_CMPXCHG:
CheckImmedAndMakeContained(node, node->AsCmpXchg()->gtOpComparand);
break;
case GT_XORR:
case GT_XAND:
case GT_XADD:
CheckImmedAndMakeContained(node, node->AsOp()->gtOp2);
break;
#elif defined(TARGET_XARCH)
case GT_XORR:
case GT_XAND:
case GT_XADD:
if (node->IsUnusedValue())
{
node->ClearUnusedValue();
// Make sure the types are identical, since the node type is changed to VOID
// CodeGen relies on op2's type to determine the instruction size.
// Note that the node type cannot be a small int but the data operand can.
assert(genActualType(node->gtGetOp2()->TypeGet()) == node->TypeGet());
node->SetOper(GT_LOCKADD);
node->gtType = TYP_VOID;
CheckImmedAndMakeContained(node, node->gtGetOp2());
}
break;
#endif
#if !defined(TARGET_ARMARCH) && !defined(TARGET_LOONGARCH64)
// TODO-ARMARCH-CQ: We should contain this as long as the offset fits.
case GT_OBJ:
if (node->AsObj()->Addr()->OperIsLocalAddr())
{
node->AsObj()->Addr()->SetContained();
}
break;
#endif // !TARGET_ARMARCH
case GT_KEEPALIVE:
node->gtGetOp1()->SetRegOptional();
break;
case GT_LCL_FLD_ADDR:
case GT_LCL_VAR_ADDR:
{
const GenTreeLclVarCommon* lclAddr = node->AsLclVarCommon();
const LclVarDsc* varDsc = comp->lvaGetDesc(lclAddr);
if (!varDsc->lvDoNotEnregister)
{
// TODO-Cleanup: this is definitely not the best place for this detection,
// but for now it is the easiest. Move it to morph.
comp->lvaSetVarDoNotEnregister(lclAddr->GetLclNum() DEBUGARG(DoNotEnregisterReason::LclAddrNode));
}
}
break;
default:
break;
}
return node->gtNext;
}
/** -- Switch Lowering --
* The main idea of switch lowering is to keep transparency of the register requirements of this node
* downstream in LSRA. Given that the switch instruction is inherently a control statement which in the JIT
* is represented as a simple tree node, at the time we actually generate code for it we end up
* generating instructions that actually modify the flow of execution that imposes complicated
* register requirement and lifetimes.
*
* So, for the purpose of LSRA, we want to have a more detailed specification of what a switch node actually
* means and more importantly, which and when do we need a register for each instruction we want to issue
* to correctly allocate them downstream.
*
* For this purpose, this procedure performs switch lowering in two different ways:
*
* a) Represent the switch statement as a zero-index jump table construct. This means that for every destination
* of the switch, we will store this destination in an array of addresses and the code generator will issue
* a data section where this array will live and will emit code that based on the switch index, will indirect and
* jump to the destination specified in the jump table.
*
* For this transformation we introduce a new GT node called GT_SWITCH_TABLE that is a specialization of the switch
* node for jump table based switches.
* The overall structure of a GT_SWITCH_TABLE is:
*
* GT_SWITCH_TABLE
* |_________ localVar (a temporary local that holds the switch index)
* |_________ jumpTable (this is a special node that holds the address of the jump table array)
*
* Now, the way we morph a GT_SWITCH node into this lowered switch table node form is the following:
*
* Input: GT_SWITCH (inside a basic block whose Branch Type is BBJ_SWITCH)
* |_____ expr (an arbitrarily complex GT_NODE that represents the switch index)
*
* This gets transformed into the following statements inside a BBJ_COND basic block (the target would be
* the default case of the switch in case the conditional is evaluated to true).
*
* ----- original block, transformed
* GT_STORE_LCL_VAR tempLocal (a new temporary local variable used to store the switch index)
* |_____ expr (the index expression)
*
* GT_JTRUE
* |_____ GT_COND
* |_____ GT_GE
* |___ Int_Constant (This constant is the index of the default case
* that happens to be the highest index in the jump table).
* |___ tempLocal (The local variable were we stored the index expression).
*
* ----- new basic block
* GT_SWITCH_TABLE
* |_____ tempLocal
* |_____ jumpTable (a new jump table node that now LSRA can allocate registers for explicitly
* and LinearCodeGen will be responsible to generate downstream).
*
* This way there are no implicit temporaries.
*
* b) For small-sized switches, we will actually morph them into a series of conditionals of the form
* if (case falls into the default){ goto jumpTable[size]; // last entry in the jump table is the default case }
* (For the default case conditional, we'll be constructing the exact same code as the jump table case one).
* else if (case == firstCase){ goto jumpTable[1]; }
* else if (case == secondCase) { goto jumptable[2]; } and so on.
*
* This transformation is of course made in JIT-IR, not downstream to CodeGen level, so this way we no longer
* require internal temporaries to maintain the index we're evaluating plus we're using existing code from
* LinearCodeGen to implement this instead of implement all the control flow constructs using InstrDscs and
* InstrGroups downstream.
*/
GenTree* Lowering::LowerSwitch(GenTree* node)
{
unsigned jumpCnt;
unsigned targetCnt;
BasicBlock** jumpTab;
assert(node->gtOper == GT_SWITCH);
// The first step is to build the default case conditional construct that is
// shared between both kinds of expansion of the switch node.
// To avoid confusion, we'll alias m_block to originalSwitchBB
// that represents the node we're morphing.
BasicBlock* originalSwitchBB = m_block;
LIR::Range& switchBBRange = LIR::AsRange(originalSwitchBB);
// jumpCnt is the number of elements in the jump table array.
// jumpTab is the actual pointer to the jump table array.
// targetCnt is the number of unique targets in the jump table array.
jumpCnt = originalSwitchBB->bbJumpSwt->bbsCount;
jumpTab = originalSwitchBB->bbJumpSwt->bbsDstTab;
targetCnt = originalSwitchBB->NumSucc(comp);
// GT_SWITCH must be a top-level node with no use.
#ifdef DEBUG
{
LIR::Use use;
assert(!switchBBRange.TryGetUse(node, &use));
}
#endif
JITDUMP("Lowering switch " FMT_BB ", %d cases\n", originalSwitchBB->bbNum, jumpCnt);
// Handle a degenerate case: if the switch has only a default case, just convert it
// to an unconditional branch. This should only happen in minopts or with debuggable
// code.
if (targetCnt == 1)
{
JITDUMP("Lowering switch " FMT_BB ": single target; converting to BBJ_ALWAYS\n", originalSwitchBB->bbNum);
noway_assert(comp->opts.OptimizationDisabled());
if (originalSwitchBB->bbNext == jumpTab[0])
{
originalSwitchBB->bbJumpKind = BBJ_NONE;
originalSwitchBB->bbJumpDest = nullptr;
}
else
{
originalSwitchBB->bbJumpKind = BBJ_ALWAYS;
originalSwitchBB->bbJumpDest = jumpTab[0];
}
// Remove extra predecessor links if there was more than one case.
for (unsigned i = 1; i < jumpCnt; ++i)
{
(void)comp->fgRemoveRefPred(jumpTab[i], originalSwitchBB);
}
// We have to get rid of the GT_SWITCH node but a child might have side effects so just assign
// the result of the child subtree to a temp.
GenTree* rhs = node->AsOp()->gtOp1;
unsigned lclNum = comp->lvaGrabTemp(true DEBUGARG("Lowering is creating a new local variable"));
comp->lvaTable[lclNum].lvType = rhs->TypeGet();
GenTreeLclVar* store = comp->gtNewStoreLclVar(lclNum, rhs);
switchBBRange.InsertAfter(node, store);
switchBBRange.Remove(node);
return store;
}
noway_assert(jumpCnt >= 2);
// Spill the argument to the switch node into a local so that it can be used later.
LIR::Use use(switchBBRange, &(node->AsOp()->gtOp1), node);
ReplaceWithLclVar(use);
// GT_SWITCH(indexExpression) is now two statements:
// 1. a statement containing 'asg' (for temp = indexExpression)
// 2. and a statement with GT_SWITCH(temp)
assert(node->gtOper == GT_SWITCH);
GenTree* temp = node->AsOp()->gtOp1;
assert(temp->gtOper == GT_LCL_VAR);
unsigned tempLclNum = temp->AsLclVarCommon()->GetLclNum();
var_types tempLclType = temp->TypeGet();
BasicBlock* defaultBB = jumpTab[jumpCnt - 1];
BasicBlock* followingBB = originalSwitchBB->bbNext;
/* Is the number of cases right for a test and jump switch? */
const bool fFirstCaseFollows = (followingBB == jumpTab[0]);
const bool fDefaultFollows = (followingBB == defaultBB);
unsigned minSwitchTabJumpCnt = 2; // table is better than just 2 cmp/jcc
// This means really just a single cmp/jcc (aka a simple if/else)
if (fFirstCaseFollows || fDefaultFollows)
{
minSwitchTabJumpCnt++;
}
#if defined(TARGET_ARM)
// On ARM for small switch tables we will
// generate a sequence of compare and branch instructions
// because the code to load the base of the switch
// table is huge and hideous due to the relocation... :(
minSwitchTabJumpCnt += 2;
#endif // TARGET_ARM
// Once we have the temporary variable, we construct the conditional branch for
// the default case. As stated above, this conditional is being shared between
// both GT_SWITCH lowering code paths.
// This condition is of the form: if (temp > jumpTableLength - 2){ goto jumpTable[jumpTableLength - 1]; }
GenTree* gtDefaultCaseCond = comp->gtNewOperNode(GT_GT, TYP_INT, comp->gtNewLclvNode(tempLclNum, tempLclType),
comp->gtNewIconNode(jumpCnt - 2, genActualType(tempLclType)));
// Make sure we perform an unsigned comparison, just in case the switch index in 'temp'
// is now less than zero 0 (that would also hit the default case).
gtDefaultCaseCond->gtFlags |= GTF_UNSIGNED;
GenTree* gtDefaultCaseJump = comp->gtNewOperNode(GT_JTRUE, TYP_VOID, gtDefaultCaseCond);
gtDefaultCaseJump->gtFlags = node->gtFlags;
LIR::Range condRange = LIR::SeqTree(comp, gtDefaultCaseJump);
switchBBRange.InsertAtEnd(std::move(condRange));
BasicBlock* afterDefaultCondBlock = comp->fgSplitBlockAfterNode(originalSwitchBB, condRange.LastNode());
// afterDefaultCondBlock is now the switch, and all the switch targets have it as a predecessor.
// originalSwitchBB is now a BBJ_NONE, and there is a predecessor edge in afterDefaultCondBlock
// representing the fall-through flow from originalSwitchBB.
assert(originalSwitchBB->bbJumpKind == BBJ_NONE);
assert(originalSwitchBB->bbNext == afterDefaultCondBlock);
assert(afterDefaultCondBlock->bbJumpKind == BBJ_SWITCH);
assert(afterDefaultCondBlock->bbJumpSwt->bbsHasDefault);
assert(afterDefaultCondBlock->isEmpty()); // Nothing here yet.
// The GT_SWITCH code is still in originalSwitchBB (it will be removed later).
// Turn originalSwitchBB into a BBJ_COND.
originalSwitchBB->bbJumpKind = BBJ_COND;
originalSwitchBB->bbJumpDest = jumpTab[jumpCnt - 1];
// Fix the pred for the default case: the default block target still has originalSwitchBB
// as a predecessor, but the fgSplitBlockAfterStatement() moved all predecessors to point
// to afterDefaultCondBlock.
flowList* oldEdge = comp->fgRemoveRefPred(jumpTab[jumpCnt - 1], afterDefaultCondBlock);
comp->fgAddRefPred(jumpTab[jumpCnt - 1], originalSwitchBB, oldEdge);
bool useJumpSequence = jumpCnt < minSwitchTabJumpCnt;
if (TargetOS::IsUnix && TargetArchitecture::IsArm32)
{
// Force using an inlined jumping instead switch table generation.
// Switch jump table is generated with incorrect values in CoreRT case,
// so any large switch will crash after loading to PC any such value.
// I think this is due to the fact that we use absolute addressing
// instead of relative. But in CoreRT is used as a rule relative
// addressing when we generate an executable.
// See also https://github.com/dotnet/runtime/issues/8683
// Also https://github.com/dotnet/coreclr/pull/13197
useJumpSequence = useJumpSequence || comp->IsTargetAbi(CORINFO_CORERT_ABI);
}
// If we originally had 2 unique successors, check to see whether there is a unique
// non-default case, in which case we can eliminate the switch altogether.
// Note that the single unique successor case is handled above.
BasicBlock* uniqueSucc = nullptr;
if (targetCnt == 2)
{
uniqueSucc = jumpTab[0];
noway_assert(jumpCnt >= 2);
for (unsigned i = 1; i < jumpCnt - 1; i++)
{
if (jumpTab[i] != uniqueSucc)
{
uniqueSucc = nullptr;
break;
}
}
}
if (uniqueSucc != nullptr)
{
// If the unique successor immediately follows this block, we have nothing to do -
// it will simply fall-through after we remove the switch, below.
// Otherwise, make this a BBJ_ALWAYS.
// Now, fixup the predecessor links to uniqueSucc. In the original jumpTab:
// jumpTab[i-1] was the default target, which we handled above,
// jumpTab[0] is the first target, and we'll leave that predecessor link.
// Remove any additional predecessor links to uniqueSucc.
for (unsigned i = 1; i < jumpCnt - 1; ++i)
{
assert(jumpTab[i] == uniqueSucc);
(void)comp->fgRemoveRefPred(uniqueSucc, afterDefaultCondBlock);
}
if (afterDefaultCondBlock->bbNext == uniqueSucc)
{
afterDefaultCondBlock->bbJumpKind = BBJ_NONE;
afterDefaultCondBlock->bbJumpDest = nullptr;
}
else
{
afterDefaultCondBlock->bbJumpKind = BBJ_ALWAYS;
afterDefaultCondBlock->bbJumpDest = uniqueSucc;
}
}
// If the number of possible destinations is small enough, we proceed to expand the switch
// into a series of conditional branches, otherwise we follow the jump table based switch
// transformation.
else if (useJumpSequence || comp->compStressCompile(Compiler::STRESS_SWITCH_CMP_BR_EXPANSION, 50))
{
// Lower the switch into a series of compare and branch IR trees.
//
// In this case we will morph the node in the following way:
// 1. Generate a JTRUE statement to evaluate the default case. (This happens above.)
// 2. Start splitting the switch basic block into subsequent basic blocks, each of which will contain
// a statement that is responsible for performing a comparison of the table index and conditional
// branch if equal.
JITDUMP("Lowering switch " FMT_BB ": using compare/branch expansion\n", originalSwitchBB->bbNum);
// We'll use 'afterDefaultCondBlock' for the first conditional. After that, we'll add new
// blocks. If we end up not needing it at all (say, if all the non-default cases just fall through),
// we'll delete it.
bool fUsedAfterDefaultCondBlock = false;
BasicBlock* currentBlock = afterDefaultCondBlock;
LIR::Range* currentBBRange = &LIR::AsRange(currentBlock);
// Walk to entries 0 to jumpCnt - 1. If a case target follows, ignore it and let it fall through.
// If no case target follows, the last one doesn't need to be a compare/branch: it can be an
// unconditional branch.
bool fAnyTargetFollows = false;
for (unsigned i = 0; i < jumpCnt - 1; ++i)
{
assert(currentBlock != nullptr);
// Remove the switch from the predecessor list of this case target's block.
// We'll add the proper new predecessor edge later.
flowList* oldEdge = comp->fgRemoveRefPred(jumpTab[i], afterDefaultCondBlock);
if (jumpTab[i] == followingBB)
{
// This case label follows the switch; let it fall through.
fAnyTargetFollows = true;
continue;
}
// We need a block to put in the new compare and/or branch.
// If we haven't used the afterDefaultCondBlock yet, then use that.
if (fUsedAfterDefaultCondBlock)
{
BasicBlock* newBlock = comp->fgNewBBafter(BBJ_NONE, currentBlock, true);
comp->fgAddRefPred(newBlock, currentBlock); // The fall-through predecessor.
currentBlock = newBlock;
currentBBRange = &LIR::AsRange(currentBlock);
}
else
{
assert(currentBlock == afterDefaultCondBlock);
fUsedAfterDefaultCondBlock = true;
}
// We're going to have a branch, either a conditional or unconditional,
// to the target. Set the target.
currentBlock->bbJumpDest = jumpTab[i];
// Wire up the predecessor list for the "branch" case.
comp->fgAddRefPred(jumpTab[i], currentBlock, oldEdge);
if (!fAnyTargetFollows && (i == jumpCnt - 2))
{
// We're processing the last one, and there is no fall through from any case
// to the following block, so we can use an unconditional branch to the final
// case: there is no need to compare against the case index, since it's
// guaranteed to be taken (since the default case was handled first, above).
currentBlock->bbJumpKind = BBJ_ALWAYS;
}
else
{
// Otherwise, it's a conditional branch. Set the branch kind, then add the
// condition statement.
currentBlock->bbJumpKind = BBJ_COND;
// Now, build the conditional statement for the current case that is
// being evaluated:
// GT_JTRUE
// |__ GT_COND
// |____GT_EQ
// |____ (switchIndex) (The temp variable)
// |____ (ICon) (The actual case constant)
GenTree* gtCaseCond = comp->gtNewOperNode(GT_EQ, TYP_INT, comp->gtNewLclvNode(tempLclNum, tempLclType),
comp->gtNewIconNode(i, tempLclType));
GenTree* gtCaseBranch = comp->gtNewOperNode(GT_JTRUE, TYP_VOID, gtCaseCond);
LIR::Range caseRange = LIR::SeqTree(comp, gtCaseBranch);
currentBBRange->InsertAtEnd(std::move(caseRange));
}
}
if (fAnyTargetFollows)
{
// There is a fall-through to the following block. In the loop
// above, we deleted all the predecessor edges from the switch.
// In this case, we need to add one back.
comp->fgAddRefPred(currentBlock->bbNext, currentBlock);
}
if (!fUsedAfterDefaultCondBlock)
{
// All the cases were fall-through! We don't need this block.
// Convert it from BBJ_SWITCH to BBJ_NONE and unset the BBF_DONT_REMOVE flag
// so fgRemoveBlock() doesn't complain.
JITDUMP("Lowering switch " FMT_BB ": all switch cases were fall-through\n", originalSwitchBB->bbNum);
assert(currentBlock == afterDefaultCondBlock);
assert(currentBlock->bbJumpKind == BBJ_SWITCH);
currentBlock->bbJumpKind = BBJ_NONE;
currentBlock->bbFlags &= ~BBF_DONT_REMOVE;
comp->fgRemoveBlock(currentBlock, /* unreachable */ false); // It's an empty block.
}
}
else
{
// At this point the default case has already been handled and we need to generate a jump
// table based switch or a bit test based switch at the end of afterDefaultCondBlock. Both
// switch variants need the switch value so create the necessary LclVar node here.
GenTree* switchValue = comp->gtNewLclvNode(tempLclNum, tempLclType);
LIR::Range& switchBlockRange = LIR::AsRange(afterDefaultCondBlock);
switchBlockRange.InsertAtEnd(switchValue);
// Try generating a bit test based switch first,
// if that's not possible a jump table based switch will be generated.
if (!TryLowerSwitchToBitTest(jumpTab, jumpCnt, targetCnt, afterDefaultCondBlock, switchValue))
{
JITDUMP("Lowering switch " FMT_BB ": using jump table expansion\n", originalSwitchBB->bbNum);
#ifdef TARGET_64BIT
if (tempLclType != TYP_I_IMPL)
{
// SWITCH_TABLE expects the switch value (the index into the jump table) to be TYP_I_IMPL.
// Note that the switch value is unsigned so the cast should be unsigned as well.
switchValue = comp->gtNewCastNode(TYP_I_IMPL, switchValue, true, TYP_U_IMPL);
switchBlockRange.InsertAtEnd(switchValue);
}
#endif
GenTree* switchTable = comp->gtNewJmpTableNode();
GenTree* switchJump = comp->gtNewOperNode(GT_SWITCH_TABLE, TYP_VOID, switchValue, switchTable);
switchBlockRange.InsertAfter(switchValue, switchTable, switchJump);
// this block no longer branches to the default block
afterDefaultCondBlock->bbJumpSwt->removeDefault();
}
comp->fgInvalidateSwitchDescMapEntry(afterDefaultCondBlock);
}
GenTree* next = node->gtNext;
// Get rid of the GT_SWITCH(temp).
switchBBRange.Remove(node->AsOp()->gtOp1);
switchBBRange.Remove(node);
return next;
}
//------------------------------------------------------------------------
// TryLowerSwitchToBitTest: Attempts to transform a jump table switch into a bit test.
//
// Arguments:
// jumpTable - The jump table
// jumpCount - The number of blocks in the jump table
// targetCount - The number of distinct blocks in the jump table
// bbSwitch - The switch block
// switchValue - A LclVar node that provides the switch value
//
// Return value:
// true if the switch has been lowered to a bit test
//
// Notes:
// If the jump table contains less than 32 (64 on 64 bit targets) entries and there
// are at most 2 distinct jump targets then the jump table can be converted to a word
// of bits where a 0 bit corresponds to one jump target and a 1 bit corresponds to the
// other jump target. Instead of the indirect jump a BT-JCC sequence is used to jump
// to the appropriate target:
// mov eax, 245 ; jump table converted to a "bit table"
// bt eax, ebx ; ebx is supposed to contain the switch value
// jc target1
// target0:
// ...
// target1:
// Such code is both shorter and faster (in part due to the removal of a memory load)
// than the traditional jump table base code. And of course, it also avoids the need
// to emit the jump table itself that can reach up to 256 bytes (for 64 entries).
//
bool Lowering::TryLowerSwitchToBitTest(
BasicBlock* jumpTable[], unsigned jumpCount, unsigned targetCount, BasicBlock* bbSwitch, GenTree* switchValue)
{
#ifndef TARGET_XARCH
// Other architectures may use this if they substitute GT_BT with equivalent code.
return false;
#else
assert(jumpCount >= 2);
assert(targetCount >= 2);
assert(bbSwitch->bbJumpKind == BBJ_SWITCH);
assert(switchValue->OperIs(GT_LCL_VAR));
//
// Quick check to see if it's worth going through the jump table. The bit test switch supports
// up to 2 targets but targetCount also includes the default block so we need to allow 3 targets.
// We'll ensure that there are only 2 targets when building the bit table.
//
if (targetCount > 3)
{
return false;
}
//
// The number of bits in the bit table is the same as the number of jump table entries. But the
// jump table also includes the default target (at the end) so we need to ignore it. The default
// has already been handled by a JTRUE(GT(switchValue, jumpCount - 2)) that LowerSwitch generates.
//
const unsigned bitCount = jumpCount - 1;
if (bitCount > (genTypeSize(TYP_I_IMPL) * 8))
{
return false;
}
//
// Build a bit table where a bit set to 0 corresponds to bbCase0 and a bit set to 1 corresponds to
// bbCase1. Simply use the first block in the jump table as bbCase1, later we can invert the bit
// table and/or swap the blocks if it's beneficial.
//
BasicBlock* bbCase0 = nullptr;
BasicBlock* bbCase1 = jumpTable[0];
size_t bitTable = 1;
for (unsigned bitIndex = 1; bitIndex < bitCount; bitIndex++)
{
if (jumpTable[bitIndex] == bbCase1)
{
bitTable |= (size_t(1) << bitIndex);
}
else if (bbCase0 == nullptr)
{
bbCase0 = jumpTable[bitIndex];
}
else if (jumpTable[bitIndex] != bbCase0)
{
// If it's neither bbCase0 nor bbCase1 then it means we have 3 targets. There can't be more
// than 3 because of the check at the start of the function.
assert(targetCount == 3);
return false;
}
}
//
// One of the case blocks has to follow the switch block. This requirement could be avoided
// by adding a BBJ_ALWAYS block after the switch block but doing that sometimes negatively
// impacts register allocation.
//
if ((bbSwitch->bbNext != bbCase0) && (bbSwitch->bbNext != bbCase1))
{
return false;
}
#ifdef TARGET_64BIT
//
// See if we can avoid a 8 byte immediate on 64 bit targets. If all upper 32 bits are 1
// then inverting the bit table will make them 0 so that the table now fits in 32 bits.
// Note that this does not change the number of bits in the bit table, it just takes
// advantage of the fact that loading a 32 bit immediate into a 64 bit register zero
// extends the immediate value to 64 bit.
//
if (~bitTable <= UINT32_MAX)
{
bitTable = ~bitTable;
std::swap(bbCase0, bbCase1);
}
#endif
//
// Rewire the blocks as needed and figure out the condition to use for JCC.
//
GenCondition bbSwitchCondition;
bbSwitch->bbJumpKind = BBJ_COND;
comp->fgRemoveAllRefPreds(bbCase1, bbSwitch);
comp->fgRemoveAllRefPreds(bbCase0, bbSwitch);
if (bbSwitch->bbNext == bbCase0)
{
// GenCondition::C generates JC so we jump to bbCase1 when the bit is set
bbSwitchCondition = GenCondition::C;
bbSwitch->bbJumpDest = bbCase1;
comp->fgAddRefPred(bbCase0, bbSwitch);
comp->fgAddRefPred(bbCase1, bbSwitch);
}
else
{
assert(bbSwitch->bbNext == bbCase1);
// GenCondition::NC generates JNC so we jump to bbCase0 when the bit is not set
bbSwitchCondition = GenCondition::NC;
bbSwitch->bbJumpDest = bbCase0;
comp->fgAddRefPred(bbCase0, bbSwitch);
comp->fgAddRefPred(bbCase1, bbSwitch);
}
//
// Append BT(bitTable, switchValue) and JCC(condition) to the switch block.
//
var_types bitTableType = (bitCount <= (genTypeSize(TYP_INT) * 8)) ? TYP_INT : TYP_LONG;
GenTree* bitTableIcon = comp->gtNewIconNode(bitTable, bitTableType);
GenTree* bitTest = comp->gtNewOperNode(GT_BT, TYP_VOID, bitTableIcon, switchValue);
bitTest->gtFlags |= GTF_SET_FLAGS;
GenTreeCC* jcc = new (comp, GT_JCC) GenTreeCC(GT_JCC, bbSwitchCondition);
jcc->gtFlags |= GTF_USE_FLAGS;
LIR::AsRange(bbSwitch).InsertAfter(switchValue, bitTableIcon, bitTest, jcc);
return true;
#endif // TARGET_XARCH
}
// NOTE: this method deliberately does not update the call arg table. It must only
// be used by NewPutArg and LowerArg; these functions are responsible for updating
// the call arg table as necessary.
void Lowering::ReplaceArgWithPutArgOrBitcast(GenTree** argSlot, GenTree* putArgOrBitcast)
{
assert(argSlot != nullptr);
assert(*argSlot != nullptr);
assert(putArgOrBitcast->OperIsPutArg() || putArgOrBitcast->OperIs(GT_BITCAST));
GenTree* arg = *argSlot;
// Replace the argument with the putarg/copy
*argSlot = putArgOrBitcast;
putArgOrBitcast->AsOp()->gtOp1 = arg;
// Insert the putarg/copy into the block
BlockRange().InsertAfter(arg, putArgOrBitcast);
}
//------------------------------------------------------------------------
// NewPutArg: rewrites the tree to put an arg in a register or on the stack.
//
// Arguments:
// call - the call whose arg is being rewritten.
// arg - the arg being rewritten.
// info - the fgArgTabEntry information for the argument.
// type - the type of the argument.
//
// Return Value:
// The new tree that was created to put the arg in the right place
// or the incoming arg if the arg tree was not rewritten.
//
// Assumptions:
// call, arg, and info must be non-null.
//
// Notes:
// For System V systems with native struct passing (i.e. UNIX_AMD64_ABI defined)
// this method allocates a single GT_PUTARG_REG for 1 eightbyte structs and a GT_FIELD_LIST of two GT_PUTARG_REGs
// for two eightbyte structs.
//
// For STK passed structs the method generates GT_PUTARG_STK tree. For System V systems with native struct passing
// (i.e. UNIX_AMD64_ABI defined) this method also sets the GC pointers count and the pointers
// layout object, so the codegen of the GT_PUTARG_STK could use this for optimizing copying to the stack by value.
// (using block copy primitives for non GC pointers and a single TARGET_POINTER_SIZE copy with recording GC info.)
//
GenTree* Lowering::NewPutArg(GenTreeCall* call, GenTree* arg, fgArgTabEntry* info, var_types type)
{
assert(call != nullptr);
assert(arg != nullptr);
assert(info != nullptr);
GenTree* putArg = nullptr;
bool isOnStack = (info->GetRegNum() == REG_STK);
#if defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64)
// Mark contained when we pass struct
// GT_FIELD_LIST is always marked contained when it is generated
if (type == TYP_STRUCT)
{
arg->SetContained();
if ((arg->OperGet() == GT_OBJ) && (arg->AsObj()->Addr()->OperGet() == GT_LCL_VAR_ADDR))
{
MakeSrcContained(arg, arg->AsObj()->Addr());
}
}
#endif
#if FEATURE_ARG_SPLIT
// Struct can be split into register(s) and stack on ARM
if (compFeatureArgSplit() && info->IsSplit())
{
assert(arg->OperGet() == GT_OBJ || arg->OperGet() == GT_FIELD_LIST);
// TODO: Need to check correctness for FastTailCall
if (call->IsFastTailCall())
{
#ifdef TARGET_ARM
NYI_ARM("lower: struct argument by fast tail call");
#endif // TARGET_ARM
}
const unsigned slotNumber = info->GetByteOffset() / TARGET_POINTER_SIZE;
DEBUG_ARG_SLOTS_ASSERT(slotNumber == info->slotNum);
const bool putInIncomingArgArea = call->IsFastTailCall();
putArg = new (comp, GT_PUTARG_SPLIT)
GenTreePutArgSplit(arg, info->GetByteOffset(),
#if defined(DEBUG_ARG_SLOTS) && defined(FEATURE_PUT_STRUCT_ARG_STK)
info->GetStackByteSize(), slotNumber, info->GetStackSlotsNumber(),
#elif defined(DEBUG_ARG_SLOTS) && !defined(FEATURE_PUT_STRUCT_ARG_STK)
slotNumber,
#elif !defined(DEBUG_ARG_SLOTS) && defined(FEATURE_PUT_STRUCT_ARG_STK)
info->GetStackByteSize(),
#endif
info->numRegs, call, putInIncomingArgArea);
// If struct argument is morphed to GT_FIELD_LIST node(s),
// we can know GC info by type of each GT_FIELD_LIST node.
// So we skip setting GC Pointer info.
//
GenTreePutArgSplit* argSplit = putArg->AsPutArgSplit();
for (unsigned regIndex = 0; regIndex < info->numRegs; regIndex++)
{
argSplit->SetRegNumByIdx(info->GetRegNum(regIndex), regIndex);
}
if (arg->OperGet() == GT_OBJ)
{
ClassLayout* layout = arg->AsObj()->GetLayout();
// Set type of registers
for (unsigned index = 0; index < info->numRegs; index++)
{
argSplit->m_regType[index] = layout->GetGCPtrType(index);
}
}
else
{
unsigned regIndex = 0;
for (GenTreeFieldList::Use& use : arg->AsFieldList()->Uses())
{
if (regIndex >= info->numRegs)
{
break;
}
var_types regType = use.GetNode()->TypeGet();
// Account for the possibility that float fields may be passed in integer registers.
if (varTypeIsFloating(regType) && !genIsValidFloatReg(argSplit->GetRegNumByIdx(regIndex)))
{
regType = (regType == TYP_FLOAT) ? TYP_INT : TYP_LONG;
}
argSplit->m_regType[regIndex] = regType;
regIndex++;
}
// Clear the register assignment on the fieldList node, as these are contained.
arg->SetRegNum(REG_NA);
}
}
else
#endif // FEATURE_ARG_SPLIT
{
if (!isOnStack)
{
#if FEATURE_MULTIREG_ARGS
if ((info->numRegs > 1) && (arg->OperGet() == GT_FIELD_LIST))
{
unsigned int regIndex = 0;
for (GenTreeFieldList::Use& use : arg->AsFieldList()->Uses())
{
regNumber argReg = info->GetRegNum(regIndex);
GenTree* curOp = use.GetNode();
var_types curTyp = curOp->TypeGet();
// Create a new GT_PUTARG_REG node with op1
GenTree* newOper = comp->gtNewPutArgReg(curTyp, curOp, argReg);
// Splice in the new GT_PUTARG_REG node in the GT_FIELD_LIST
ReplaceArgWithPutArgOrBitcast(&use.NodeRef(), newOper);
regIndex++;
}
// Just return arg. The GT_FIELD_LIST is not replaced.
// Nothing more to do.
return arg;
}
else
#endif // FEATURE_MULTIREG_ARGS
{
putArg = comp->gtNewPutArgReg(type, arg, info->GetRegNum());
}
}
else
{
// Mark this one as tail call arg if it is a fast tail call.
// This provides the info to put this argument in in-coming arg area slot
// instead of in out-going arg area slot.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
// Make sure state is correct. The PUTARG_STK has TYP_VOID, as it doesn't produce
// a result. So the type of its operand must be the correct type to push on the stack.
// For a FIELD_LIST, this will be the type of the field (not the type of the arg),
// but otherwise it is generally the type of the operand.
info->checkIsStruct();
#endif
if ((arg->OperGet() != GT_FIELD_LIST))
{
#if defined(FEATURE_SIMD) && defined(FEATURE_PUT_STRUCT_ARG_STK)
if (type == TYP_SIMD12)
{
#if !defined(TARGET_64BIT)
assert(info->GetByteSize() == 12);
#else // TARGET_64BIT
if (compMacOsArm64Abi())
{
assert(info->GetByteSize() == 12);
}
else
{
assert(info->GetByteSize() == 16);
}
#endif // TARGET_64BIT
}
else
#endif // defined(FEATURE_SIMD) && defined(FEATURE_PUT_STRUCT_ARG_STK)
{
assert(genActualType(arg->TypeGet()) == type);
}
}
const unsigned slotNumber = info->GetByteOffset() / TARGET_POINTER_SIZE;
const bool putInIncomingArgArea = call->IsFastTailCall();
putArg = new (comp, GT_PUTARG_STK)
GenTreePutArgStk(GT_PUTARG_STK, TYP_VOID, arg, info->GetByteOffset(),
#if defined(DEBUG_ARG_SLOTS) && defined(FEATURE_PUT_STRUCT_ARG_STK)
info->GetStackByteSize(), slotNumber, info->GetStackSlotsNumber(),
#elif defined(DEBUG_ARG_SLOTS) && !defined(FEATURE_PUT_STRUCT_ARG_STK)
slotNumber,
#elif !defined(DEBUG_ARG_SLOTS) && defined(FEATURE_PUT_STRUCT_ARG_STK)
info->GetStackByteSize(),
#endif
call, putInIncomingArgArea);
#ifdef FEATURE_PUT_STRUCT_ARG_STK
// If the ArgTabEntry indicates that this arg is a struct
// get and store the number of slots that are references.
// This is later used in the codegen for PUT_ARG_STK implementation
// for struct to decide whether and how many single eight-byte copies
// to be done (only for reference slots), so gcinfo is emitted.
// For non-reference slots faster/smaller size instructions are used -
// pair copying using XMM registers or rep mov instructions.
if (info->isStruct)
{
// We use GT_OBJ only for non-lclVar, non-SIMD, non-FIELD_LIST struct arguments.
if (arg->OperIsLocal())
{
// This must have a type with a known size (SIMD or has been morphed to a primitive type).
assert(arg->TypeGet() != TYP_STRUCT);
}
else if (arg->OperIs(GT_OBJ))
{
assert(!varTypeIsSIMD(arg));
#ifdef TARGET_X86
// On x86 VM lies about the type of a struct containing a pointer sized
// integer field by returning the type of its field as the type of struct.
// Such struct can be passed in a register depending its position in
// parameter list. VM does this unwrapping only one level and therefore
// a type like Struct Foo { Struct Bar { int f}} awlays needs to be
// passed on stack. Also, VM doesn't lie about type of such a struct
// when it is a field of another struct. That is VM doesn't lie about
// the type of Foo.Bar
//
// We now support the promotion of fields that are of type struct.
// However we only support a limited case where the struct field has a
// single field and that single field must be a scalar type. Say Foo.Bar
// field is getting passed as a parameter to a call, Since it is a TYP_STRUCT,
// as per x86 ABI it should always be passed on stack. Therefore GenTree
// node under a PUTARG_STK could be GT_OBJ(GT_LCL_VAR_ADDR(v1)), where
// local v1 could be a promoted field standing for Foo.Bar. Note that
// the type of v1 will be the type of field of Foo.Bar.f when Foo is
// promoted. That is v1 will be a scalar type. In this case we need to
// pass v1 on stack instead of in a register.
//
// TODO-PERF: replace GT_OBJ(GT_LCL_VAR_ADDR(v1)) with v1 if v1 is
// a scalar type and the width of GT_OBJ matches the type size of v1.
// Note that this cannot be done till call node arguments are morphed
// because we should not lose the fact that the type of argument is
// a struct so that the arg gets correctly marked to be passed on stack.
GenTree* objOp1 = arg->gtGetOp1();
if (objOp1->OperGet() == GT_LCL_VAR_ADDR)
{
unsigned lclNum = objOp1->AsLclVarCommon()->GetLclNum();
if (comp->lvaTable[lclNum].lvType != TYP_STRUCT)
{
comp->lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::VMNeedsStackAddr));
}
}
#endif // TARGET_X86
}
else if (!arg->OperIs(GT_FIELD_LIST))
{
#ifdef TARGET_ARM
assert((info->GetStackSlotsNumber() == 1) ||
((arg->TypeGet() == TYP_DOUBLE) && (info->GetStackSlotsNumber() == 2)));
#else
assert(varTypeIsSIMD(arg) || (info->GetStackSlotsNumber() == 1));
#endif
}
}
#endif // FEATURE_PUT_STRUCT_ARG_STK
}
}
JITDUMP("new node is : ");
DISPNODE(putArg);
JITDUMP("\n");
if (arg->gtFlags & GTF_LATE_ARG)
{
putArg->gtFlags |= GTF_LATE_ARG;
}
return putArg;
}
//------------------------------------------------------------------------
// LowerArg: Lower one argument of a call. This entails splicing a "putarg" node between
// the argument evaluation and the call. This is the point at which the source is
// consumed and the value transitions from control of the register allocator to the calling
// convention.
//
// Arguments:
// call - The call node
// ppArg - Pointer to the call argument pointer. We might replace the call argument by
// changing *ppArg.
//
// Return Value:
// None.
//
void Lowering::LowerArg(GenTreeCall* call, GenTree** ppArg)
{
GenTree* arg = *ppArg;
JITDUMP("lowering arg : ");
DISPNODE(arg);
// No assignments should remain by Lowering.
assert(!arg->OperIs(GT_ASG));
assert(!arg->OperIsPutArgStk());
// Assignments/stores at this level are not really placing an argument.
// They are setting up temporary locals that will later be placed into
// outgoing regs or stack.
// Note that atomic ops may be stores and still produce a value.
if (!arg->IsValue())
{
assert((arg->OperIsStore() && !arg->IsValue()) || arg->IsArgPlaceHolderNode() || arg->IsNothingNode() ||
arg->OperIsCopyBlkOp());
return;
}
fgArgTabEntry* info = comp->gtArgEntryByNode(call, arg);
assert(info->GetNode() == arg);
var_types type = arg->TypeGet();
if (varTypeIsSmall(type))
{
// Normalize 'type', it represents the item that we will be storing in the Outgoing Args
type = TYP_INT;
}
#if defined(FEATURE_SIMD)
#if defined(TARGET_X86)
// Non-param TYP_SIMD12 local var nodes are massaged in Lower to TYP_SIMD16 to match their
// allocated size (see lvSize()). However, when passing the variables as arguments, and
// storing the variables to the outgoing argument area on the stack, we must use their
// actual TYP_SIMD12 type, so exactly 12 bytes is allocated and written.
if (type == TYP_SIMD16)
{
if ((arg->OperGet() == GT_LCL_VAR) || (arg->OperGet() == GT_STORE_LCL_VAR))
{
const LclVarDsc* varDsc = comp->lvaGetDesc(arg->AsLclVarCommon());
type = varDsc->lvType;
}
else if (arg->OperIs(GT_SIMD, GT_HWINTRINSIC))
{
GenTreeJitIntrinsic* jitIntrinsic = reinterpret_cast<GenTreeJitIntrinsic*>(arg);
// For HWIntrinsic, there are some intrinsics like ExtractVector128 which have
// a gtType of TYP_SIMD16 but a SimdSize of 32, so we need to include that in
// the assert below.
assert((jitIntrinsic->GetSimdSize() == 12) || (jitIntrinsic->GetSimdSize() == 16) ||
(jitIntrinsic->GetSimdSize() == 32));
if (jitIntrinsic->GetSimdSize() == 12)
{
type = TYP_SIMD12;
}
}
}
#elif defined(TARGET_AMD64)
// TYP_SIMD8 parameters that are passed as longs
if (type == TYP_SIMD8 && genIsValidIntReg(info->GetRegNum()))
{
GenTree* bitcast = comp->gtNewBitCastNode(TYP_LONG, arg);
BlockRange().InsertAfter(arg, bitcast);
*ppArg = arg = bitcast;
assert(info->GetNode() == arg);
type = TYP_LONG;
}
#endif // defined(TARGET_X86)
#endif // defined(FEATURE_SIMD)
// If we hit this we are probably double-lowering.
assert(!arg->OperIsPutArg());
#if !defined(TARGET_64BIT)
if (varTypeIsLong(type))
{
noway_assert(arg->OperIs(GT_LONG));
GenTreeFieldList* fieldList = new (comp, GT_FIELD_LIST) GenTreeFieldList();
fieldList->AddFieldLIR(comp, arg->AsOp()->gtGetOp1(), 0, TYP_INT);
fieldList->AddFieldLIR(comp, arg->AsOp()->gtGetOp2(), 4, TYP_INT);
GenTree* newArg = NewPutArg(call, fieldList, info, type);
if (info->GetRegNum() != REG_STK)
{
assert(info->numRegs == 2);
// In the register argument case, NewPutArg replaces the original field list args with new
// GT_PUTARG_REG nodes, inserts them in linear order and returns the field list. So the
// only thing left to do is to insert the field list itself in linear order.
assert(newArg == fieldList);
BlockRange().InsertBefore(arg, newArg);
}
else
{
// For longs, we will replace the GT_LONG with a GT_FIELD_LIST, and put that under a PUTARG_STK.
// Although the hi argument needs to be pushed first, that will be handled by the general case,
// in which the fields will be reversed.
assert(info->numSlots == 2);
newArg->SetRegNum(REG_STK);
BlockRange().InsertBefore(arg, fieldList, newArg);
}
*ppArg = newArg;
assert(info->GetNode() == newArg);
BlockRange().Remove(arg);
}
else
#endif // !defined(TARGET_64BIT)
{
#if defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64)
if (call->IsVarargs() || comp->opts.compUseSoftFP)
{
// For vararg call or on armel, reg args should be all integer.
// Insert copies as needed to move float value to integer register.
GenTree* newNode = LowerFloatArg(ppArg, info);
if (newNode != nullptr)
{
type = newNode->TypeGet();
}
}
#endif // TARGET_ARMARCH || TARGET_LOONGARCH64
GenTree* putArg = NewPutArg(call, arg, info, type);
// In the case of register passable struct (in one or two registers)
// the NewPutArg returns a new node (GT_PUTARG_REG or a GT_FIELD_LIST with two GT_PUTARG_REGs.)
// If an extra node is returned, splice it in the right place in the tree.
if (arg != putArg)
{
ReplaceArgWithPutArgOrBitcast(ppArg, putArg);
}
}
}
#if defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64)
//------------------------------------------------------------------------
// LowerFloatArg: Lower float call arguments on the arm/LoongArch64 platform.
//
// Arguments:
// arg - The arg node
// info - call argument info
//
// Return Value:
// Return nullptr, if no transformation was done;
// return arg if there was in place transformation;
// return a new tree if the root was changed.
//
// Notes:
// This must handle scalar float arguments as well as GT_FIELD_LISTs
// with floating point fields.
//
GenTree* Lowering::LowerFloatArg(GenTree** pArg, fgArgTabEntry* info)
{
GenTree* arg = *pArg;
if (info->GetRegNum() != REG_STK)
{
if (arg->OperIs(GT_FIELD_LIST))
{
// Transform fields that are passed as registers in place.
regNumber currRegNumber = info->GetRegNum();
unsigned regIndex = 0;
for (GenTreeFieldList::Use& use : arg->AsFieldList()->Uses())
{
if (regIndex >= info->numRegs)
{
break;
}
GenTree* node = use.GetNode();
if (varTypeIsFloating(node))
{
GenTree* intNode = LowerFloatArgReg(node, currRegNumber);
assert(intNode != nullptr);
ReplaceArgWithPutArgOrBitcast(&use.NodeRef(), intNode);
}
if (node->TypeGet() == TYP_DOUBLE)
{
currRegNumber = REG_NEXT(REG_NEXT(currRegNumber));
regIndex += 2;
}
else
{
currRegNumber = REG_NEXT(currRegNumber);
regIndex += 1;
}
}
// List fields were replaced in place.
return arg;
}
else if (varTypeIsFloating(arg))
{
GenTree* intNode = LowerFloatArgReg(arg, info->GetRegNum());
assert(intNode != nullptr);
ReplaceArgWithPutArgOrBitcast(pArg, intNode);
return *pArg;
}
}
return nullptr;
}
//------------------------------------------------------------------------
// LowerFloatArgReg: Lower the float call argument node that is passed via register.
//
// Arguments:
// arg - The arg node
// regNum - register number
//
// Return Value:
// Return new bitcast node, that moves float to int register.
//
GenTree* Lowering::LowerFloatArgReg(GenTree* arg, regNumber regNum)
{
var_types floatType = arg->TypeGet();
assert(varTypeIsFloating(floatType));
var_types intType = (floatType == TYP_DOUBLE) ? TYP_LONG : TYP_INT;
GenTree* intArg = comp->gtNewBitCastNode(intType, arg);
intArg->SetRegNum(regNum);
#ifdef TARGET_ARM
if (floatType == TYP_DOUBLE)
{
// A special case when we introduce TYP_LONG
// during lowering for arm32 softFP to pass double
// in int registers.
assert(comp->opts.compUseSoftFP);
regNumber nextReg = REG_NEXT(regNum);
intArg->AsMultiRegOp()->gtOtherReg = nextReg;
}
#endif
return intArg;
}
#endif
// do lowering steps for each arg of a call
void Lowering::LowerArgsForCall(GenTreeCall* call)
{
JITDUMP("objp:\n======\n");
if (call->gtCallThisArg != nullptr)
{
LowerArg(call, &call->gtCallThisArg->NodeRef());
}
JITDUMP("\nargs:\n======\n");
for (GenTreeCall::Use& use : call->Args())
{
LowerArg(call, &use.NodeRef());
}
JITDUMP("\nlate:\n======\n");
for (GenTreeCall::Use& use : call->LateArgs())
{
LowerArg(call, &use.NodeRef());
}
}
// helper that create a node representing a relocatable physical address computation
GenTree* Lowering::AddrGen(ssize_t addr)
{
// this should end up in codegen as : instGen_Set_Reg_To_Imm(EA_HANDLE_CNS_RELOC, reg, addr)
GenTree* result = comp->gtNewIconHandleNode(addr, GTF_ICON_FTN_ADDR);
return result;
}
// variant that takes a void*
GenTree* Lowering::AddrGen(void* addr)
{
return AddrGen((ssize_t)addr);
}
// do lowering steps for a call
// this includes:
// - adding the placement nodes (either stack or register variety) for arguments
// - lowering the expression that calculates the target address
// - adding nodes for other operations that occur after the call sequence starts and before
// control transfer occurs (profiling and tail call helpers, pinvoke incantations)
//
void Lowering::LowerCall(GenTree* node)
{
GenTreeCall* call = node->AsCall();
JITDUMP("lowering call (before):\n");
DISPTREERANGE(BlockRange(), call);
JITDUMP("\n");
call->ClearOtherRegs();
LowerArgsForCall(call);
// note that everything generated from this point might run AFTER the outgoing args are placed
GenTree* controlExpr = nullptr;
bool callWasExpandedEarly = false;
// for x86, this is where we record ESP for checking later to make sure stack is balanced
// Check for Delegate.Invoke(). If so, we inline it. We get the
// target-object and target-function from the delegate-object, and do
// an indirect call.
if (call->IsDelegateInvoke())
{
controlExpr = LowerDelegateInvoke(call);
}
else
{
// Virtual and interface calls
switch (call->gtFlags & GTF_CALL_VIRT_KIND_MASK)
{
case GTF_CALL_VIRT_STUB:
controlExpr = LowerVirtualStubCall(call);
break;
case GTF_CALL_VIRT_VTABLE:
assert(call->IsVirtualVtable());
if (!call->IsExpandedEarly())
{
assert(call->gtControlExpr == nullptr);
controlExpr = LowerVirtualVtableCall(call);
}
else
{
callWasExpandedEarly = true;
controlExpr = call->gtControlExpr;
}
break;
case GTF_CALL_NONVIRT:
if (call->IsUnmanaged())
{
controlExpr = LowerNonvirtPinvokeCall(call);
}
else if (call->gtCallType == CT_INDIRECT)
{
controlExpr = LowerIndirectNonvirtCall(call);
}
else
{
controlExpr = LowerDirectCall(call);
}
break;
default:
noway_assert(!"strange call type");
break;
}
}
// Indirect calls should always go through GenTreeCall::gtCallAddr and
// should never have a control expression as well.
assert((call->gtCallType != CT_INDIRECT) || (controlExpr == nullptr));
if (call->IsTailCallViaJitHelper())
{
// Either controlExpr or gtCallAddr must contain real call target.
if (controlExpr == nullptr)
{
assert(call->gtCallType == CT_INDIRECT);
assert(call->gtCallAddr != nullptr);
controlExpr = call->gtCallAddr;
}
controlExpr = LowerTailCallViaJitHelper(call, controlExpr);
}
// Check if we need to thread a newly created controlExpr into the LIR
//
if ((controlExpr != nullptr) && !callWasExpandedEarly)
{
LIR::Range controlExprRange = LIR::SeqTree(comp, controlExpr);
JITDUMP("results of lowering call:\n");
DISPRANGE(controlExprRange);
ContainCheckRange(controlExprRange);
BlockRange().InsertBefore(call, std::move(controlExprRange));
call->gtControlExpr = controlExpr;
}
if (comp->opts.IsCFGEnabled())
{
LowerCFGCall(call);
}
if (call->IsFastTailCall())
{
// Lower fast tail call can introduce new temps to set up args correctly for Callee.
// This involves patching LCL_VAR and LCL_VAR_ADDR nodes holding Caller stack args
// and replacing them with a new temp. Control expr also can contain nodes that need
// to be patched.
// Therefore lower fast tail call must be done after controlExpr is inserted into LIR.
// There is one side effect which is flipping the order of PME and control expression
// since LowerFastTailCall calls InsertPInvokeMethodEpilog.
LowerFastTailCall(call);
}
if (varTypeIsStruct(call))
{
LowerCallStruct(call);
}
ContainCheckCallOperands(call);
JITDUMP("lowering call (after):\n");
DISPTREERANGE(BlockRange(), call);
JITDUMP("\n");
}
// Inserts profiler hook, GT_PROF_HOOK for a tail call node.
//
// AMD64:
// We need to insert this after all nested calls, but before all the arguments to this call have been set up.
// To do this, we look for the first GT_PUTARG_STK or GT_PUTARG_REG, and insert the hook immediately before
// that. If there are no args, then it should be inserted before the call node.
//
// For example:
// * stmtExpr void (top level) (IL 0x000...0x010)
// arg0 SETUP | /--* argPlace ref REG NA $c5
// this in rcx | | /--* argPlace ref REG NA $c1
// | | | /--* call ref System.Globalization.CultureInfo.get_InvariantCulture $c2
// arg1 SETUP | | +--* st.lclVar ref V02 tmp1 REG NA $c2
// | | | /--* lclVar ref V02 tmp1 u : 2 (last use) REG NA $c2
// arg1 in rdx | | +--* putarg_reg ref REG NA
// | | | /--* lclVar ref V00 arg0 u : 2 (last use) REG NA $80
// this in rcx | | +--* putarg_reg ref REG NA
// | | /--* call nullcheck ref System.String.ToLower $c5
// | | { * stmtExpr void (embedded)(IL 0x000... ? ? ? )
// | | { \--* prof_hook void REG NA
// arg0 in rcx | +--* putarg_reg ref REG NA
// control expr | +--* const(h) long 0x7ffe8e910e98 ftn REG NA
// \--* call void System.Runtime.Remoting.Identity.RemoveAppNameOrAppGuidIfNecessary $VN.Void
//
// In this case, the GT_PUTARG_REG src is a nested call. We need to put the instructions after that call
// (as shown). We assume that of all the GT_PUTARG_*, only the first one can have a nested call.
//
// X86:
// Insert the profiler hook immediately before the call. The profiler hook will preserve
// all argument registers (ECX, EDX), but nothing else.
//
// Params:
// callNode - tail call node
// insertionPoint - if non-null, insert the profiler hook before this point.
// If null, insert the profiler hook before args are setup
// but after all arg side effects are computed.
//
void Lowering::InsertProfTailCallHook(GenTreeCall* call, GenTree* insertionPoint)
{
assert(call->IsTailCall());
assert(comp->compIsProfilerHookNeeded());
#if defined(TARGET_X86)
if (insertionPoint == nullptr)
{
insertionPoint = call;
}
#else // !defined(TARGET_X86)
if (insertionPoint == nullptr)
{
for (GenTreeCall::Use& use : call->Args())
{
assert(!use.GetNode()->OperIs(GT_PUTARG_REG)); // We don't expect to see these in gtCallArgs
if (use.GetNode()->OperIs(GT_PUTARG_STK))
{
// found it
insertionPoint = use.GetNode();
break;
}
}
if (insertionPoint == nullptr)
{
for (GenTreeCall::Use& use : call->LateArgs())
{
if (use.GetNode()->OperIs(GT_PUTARG_REG, GT_PUTARG_STK))
{
// found it
insertionPoint = use.GetNode();
break;
}
}
// If there are no args, insert before the call node
if (insertionPoint == nullptr)
{
insertionPoint = call;
}
}
}
#endif // !defined(TARGET_X86)
assert(insertionPoint != nullptr);
GenTree* profHookNode = new (comp, GT_PROF_HOOK) GenTree(GT_PROF_HOOK, TYP_VOID);
BlockRange().InsertBefore(insertionPoint, profHookNode);
}
//------------------------------------------------------------------------
// LowerFastTailCall: Lower a call node dispatched as a fast tailcall (epilog +
// jmp).
//
// Arguments:
// call - the call node that is being dispatched as a fast tailcall.
//
// Assumptions:
// call must be non-null.
//
// Notes:
// For fast tail calls it is necessary to set up stack args in the incoming
// arg stack space area. When args passed also come from this area we may
// run into problems because we may end up overwriting the stack slot before
// using it. For example, for foo(a, b) { return bar(b, a); }, if a and b
// are on incoming arg stack space in foo they need to be swapped in this
// area for the call to bar. This function detects this situation and
// introduces a temp when an outgoing argument would overwrite a later-used
// incoming argument.
//
// This function also handles inserting necessary profiler hooks and pinvoke
// method epilogs in case there are inlined pinvokes.
void Lowering::LowerFastTailCall(GenTreeCall* call)
{
#if FEATURE_FASTTAILCALL
// Tail call restrictions i.e. conditions under which tail prefix is ignored.
// Most of these checks are already done by importer or fgMorphTailCall().
// This serves as a double sanity check.
assert((comp->info.compFlags & CORINFO_FLG_SYNCH) == 0); // tail calls from synchronized methods
assert(!comp->opts.IsReversePInvoke()); // tail calls reverse pinvoke
assert(!call->IsUnmanaged()); // tail calls to unamanaged methods
assert(!comp->compLocallocUsed); // tail call from methods that also do localloc
#ifdef TARGET_AMD64
assert(!comp->getNeedsGSSecurityCookie()); // jit64 compat: tail calls from methods that need GS check
#endif // TARGET_AMD64
// We expect to see a call that meets the following conditions
assert(call->IsFastTailCall());
// VM cannot use return address hijacking when A() and B() tail call each
// other in mutual recursion. Therefore, this block is reachable through
// a GC-safe point or the whole method is marked as fully interruptible.
//
// TODO-Cleanup:
// optReachWithoutCall() depends on the fact that loop headers blocks
// will have a block number > fgLastBB. These loop headers gets added
// after dominator computation and get skipped by OptReachWithoutCall().
// The below condition cannot be asserted in lower because fgSimpleLowering()
// can add a new basic block for range check failure which becomes
// fgLastBB with block number > loop header block number.
// assert((comp->compCurBB->bbFlags & BBF_GC_SAFE_POINT) ||
// !comp->optReachWithoutCall(comp->fgFirstBB, comp->compCurBB) || comp->GetInterruptible());
// If PInvokes are in-lined, we have to remember to execute PInvoke method epilog anywhere that
// a method returns. This is a case of caller method has both PInvokes and tail calls.
if (comp->compMethodRequiresPInvokeFrame())
{
InsertPInvokeMethodEpilog(comp->compCurBB DEBUGARG(call));
}
// Args for tail call are setup in incoming arg area. The gc-ness of args of
// caller and callee (which being tail called) may not match. Therefore, everything
// from arg setup until the epilog need to be non-interuptible by GC. This is
// achieved by inserting GT_START_NONGC before the very first GT_PUTARG_STK node
// of call is setup. Note that once a stack arg is setup, it cannot have nested
// calls subsequently in execution order to setup other args, because the nested
// call could over-write the stack arg that is setup earlier.
ArrayStack<GenTree*> putargs(comp->getAllocator(CMK_ArrayStack));
for (GenTreeCall::Use& use : call->Args())
{
if (use.GetNode()->OperIs(GT_PUTARG_STK))
{
putargs.Push(use.GetNode());
}
}
for (GenTreeCall::Use& use : call->LateArgs())
{
if (use.GetNode()->OperIs(GT_PUTARG_STK))
{
putargs.Push(use.GetNode());
}
}
GenTree* startNonGCNode = nullptr;
if (!putargs.Empty())
{
// Get the earliest operand of the first PUTARG_STK node. We will make
// the requred copies of args before this node.
bool unused;
GenTree* insertionPoint = BlockRange().GetTreeRange(putargs.Bottom(), &unused).FirstNode();
// Insert GT_START_NONGC node before we evaluate the PUTARG_STK args.
// Note that if there are no args to be setup on stack, no need to
// insert GT_START_NONGC node.
startNonGCNode = new (comp, GT_START_NONGC) GenTree(GT_START_NONGC, TYP_VOID);
BlockRange().InsertBefore(insertionPoint, startNonGCNode);
// Gc-interruptability in the following case:
// foo(a, b, c, d, e) { bar(a, b, c, d, e); }
// bar(a, b, c, d, e) { foo(a, b, d, d, e); }
//
// Since the instruction group starting from the instruction that sets up first
// stack arg to the end of the tail call is marked as non-gc interruptible,
// this will form a non-interruptible tight loop causing gc-starvation. To fix
// this we insert GT_NO_OP as embedded stmt before GT_START_NONGC, if the method
// has a single basic block and is not a GC-safe point. The presence of a single
// nop outside non-gc interruptible region will prevent gc starvation.
if ((comp->fgBBcount == 1) && !(comp->compCurBB->bbFlags & BBF_GC_SAFE_POINT))
{
assert(comp->fgFirstBB == comp->compCurBB);
GenTree* noOp = new (comp, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
BlockRange().InsertBefore(startNonGCNode, noOp);
}
// Since this is a fast tailcall each PUTARG_STK will place the argument in the
// _incoming_ arg space area. This will effectively overwrite our already existing
// incoming args that live in that area. If we have later uses of those args, this
// is a problem. We introduce a defensive copy into a temp here of those args that
// potentially may cause problems.
for (int i = 0; i < putargs.Height(); i++)
{
GenTreePutArgStk* put = putargs.Bottom(i)->AsPutArgStk();
unsigned int overwrittenStart = put->getArgOffset();
unsigned int overwrittenEnd = overwrittenStart + put->GetStackByteSize();
int baseOff = -1; // Stack offset of first arg on stack
for (unsigned callerArgLclNum = 0; callerArgLclNum < comp->info.compArgsCount; callerArgLclNum++)
{
LclVarDsc* callerArgDsc = comp->lvaGetDesc(callerArgLclNum);
if (callerArgDsc->lvIsRegArg)
{
continue;
}
unsigned int argStart;
unsigned int argEnd;
#if defined(TARGET_AMD64)
if (TargetOS::IsWindows)
{
// On Windows x64, the argument position determines the stack slot uniquely, and even the
// register args take up space in the stack frame (shadow space).
argStart = callerArgLclNum * TARGET_POINTER_SIZE;
argEnd = argStart + static_cast<unsigned int>(callerArgDsc->lvArgStackSize());
}
else
#endif // TARGET_AMD64
{
assert(callerArgDsc->GetStackOffset() != BAD_STK_OFFS);
if (baseOff == -1)
{
baseOff = callerArgDsc->GetStackOffset();
}
// On all ABIs where we fast tail call the stack args should come in order.
assert(baseOff <= callerArgDsc->GetStackOffset());
// Compute offset of this stack argument relative to the first stack arg.
// This will be its offset into the incoming arg space area.
argStart = static_cast<unsigned int>(callerArgDsc->GetStackOffset() - baseOff);
argEnd = argStart + comp->lvaLclSize(callerArgLclNum);
}
// If ranges do not overlap then this PUTARG_STK will not mess up the arg.
if ((overwrittenEnd <= argStart) || (overwrittenStart >= argEnd))
{
continue;
}
// Codegen cannot handle a partially overlapping copy. For
// example, if we have
// bar(S16 stack, S32 stack2)
// foo(S32 stack, S32 stack2) { bar(..., stack) }
// then we may end up having to move 'stack' in foo 16 bytes
// ahead. It is possible that this PUTARG_STK is the only use,
// in which case we will need to introduce a temp, so look for
// uses starting from it. Note that we assume that in-place
// copies are OK.
GenTree* lookForUsesFrom = put->gtNext;
if (overwrittenStart != argStart)
{
lookForUsesFrom = insertionPoint;
}
RehomeArgForFastTailCall(callerArgLclNum, insertionPoint, lookForUsesFrom, call);
// The above call can introduce temps and invalidate the pointer.
callerArgDsc = comp->lvaGetDesc(callerArgLclNum);
// For promoted locals we have more work to do as its fields could also have been invalidated.
if (!callerArgDsc->lvPromoted)
{
continue;
}
unsigned int fieldsFirst = callerArgDsc->lvFieldLclStart;
unsigned int fieldsEnd = fieldsFirst + callerArgDsc->lvFieldCnt;
for (unsigned int j = fieldsFirst; j < fieldsEnd; j++)
{
RehomeArgForFastTailCall(j, insertionPoint, lookForUsesFrom, call);
}
}
}
}
// Insert GT_PROF_HOOK node to emit profiler tail call hook. This should be
// inserted before the args are setup but after the side effects of args are
// computed. That is, GT_PROF_HOOK node needs to be inserted before GT_START_NONGC
// node if one exists.
if (comp->compIsProfilerHookNeeded())
{
InsertProfTailCallHook(call, startNonGCNode);
}
#else // !FEATURE_FASTTAILCALL
// Platform does not implement fast tail call mechanism. This cannot be
// reached because we always choose to do a tailcall via helper on those
// platforms (or no tailcall at all).
unreached();
#endif
}
//
//------------------------------------------------------------------------
// RehomeArgForFastTailCall: Introduce temps for args that may be overwritten
// during fast tailcall sequence.
//
// Arguments:
// lclNum - the lcl num of the arg that will be overwritten.
// insertTempBefore - the node at which to copy the arg into a temp.
// lookForUsesStart - the node where to start scanning and replacing uses of
// the arg specified by lclNum.
// callNode - the call node that is being dispatched as a fast tailcall.
//
// Assumptions:
// all args must be non-null.
//
// Notes:
// This function scans for uses of the arg specified by lclNum starting
// from the lookForUsesStart node. If it finds any uses it introduces a temp
// for this argument and updates uses to use this instead. In the situation
// where it introduces a temp it can thus invalidate pointers to other
// locals.
//
void Lowering::RehomeArgForFastTailCall(unsigned int lclNum,
GenTree* insertTempBefore,
GenTree* lookForUsesStart,
GenTreeCall* callNode)
{
unsigned int tmpLclNum = BAD_VAR_NUM;
for (GenTree* treeNode = lookForUsesStart; treeNode != callNode; treeNode = treeNode->gtNext)
{
if (!treeNode->OperIsLocal() && !treeNode->OperIsLocalAddr())
{
continue;
}
GenTreeLclVarCommon* lcl = treeNode->AsLclVarCommon();
if (lcl->GetLclNum() != lclNum)
{
continue;
}
// Create tmp and use it in place of callerArgDsc
if (tmpLclNum == BAD_VAR_NUM)
{
tmpLclNum = comp->lvaGrabTemp(true DEBUGARG("Fast tail call lowering is creating a new local variable"));
LclVarDsc* callerArgDsc = comp->lvaGetDesc(lclNum);
var_types tmpTyp = genActualType(callerArgDsc->TypeGet());
comp->lvaTable[tmpLclNum].lvType = tmpTyp;
// TODO-CQ: I don't see why we should copy doNotEnreg.
comp->lvaTable[tmpLclNum].lvDoNotEnregister = callerArgDsc->lvDoNotEnregister;
#ifdef DEBUG
comp->lvaTable[tmpLclNum].SetDoNotEnregReason(callerArgDsc->GetDoNotEnregReason());
#endif // DEBUG
GenTree* value;
#ifdef TARGET_ARM
if (tmpTyp == TYP_LONG)
{
GenTree* loResult = comp->gtNewLclFldNode(lclNum, TYP_INT, 0);
GenTree* hiResult = comp->gtNewLclFldNode(lclNum, TYP_INT, 4);
value = new (comp, GT_LONG) GenTreeOp(GT_LONG, TYP_LONG, loResult, hiResult);
}
else
#endif // TARGET_ARM
{
value = comp->gtNewLclvNode(lclNum, tmpTyp);
}
if (tmpTyp == TYP_STRUCT)
{
comp->lvaSetStruct(tmpLclNum, comp->lvaGetStruct(lclNum), false);
}
GenTreeLclVar* storeLclVar = comp->gtNewStoreLclVar(tmpLclNum, value);
BlockRange().InsertBefore(insertTempBefore, LIR::SeqTree(comp, storeLclVar));
ContainCheckRange(value, storeLclVar);
LowerNode(storeLclVar);
}
lcl->SetLclNum(tmpLclNum);
}
}
//------------------------------------------------------------------------
// LowerTailCallViaJitHelper: lower a call via the tailcall JIT helper. Morph
// has already inserted tailcall helper special arguments. This function inserts
// actual data for some placeholders. This function is only used on x86.
//
// Lower
// tail.call(<function args>, int numberOfOldStackArgs, int dummyNumberOfNewStackArgs, int flags, void* dummyArg)
// as
// JIT_TailCall(<function args>, int numberOfOldStackArgsWords, int numberOfNewStackArgsWords, int flags, void*
// callTarget)
// Note that the special arguments are on the stack, whereas the function arguments follow the normal convention.
//
// Also inserts PInvoke method epilog if required.
//
// Arguments:
// call - The call node
// callTarget - The real call target. This is used to replace the dummyArg during lowering.
//
// Return Value:
// Returns control expression tree for making a call to helper Jit_TailCall.
//
GenTree* Lowering::LowerTailCallViaJitHelper(GenTreeCall* call, GenTree* callTarget)
{
// Tail call restrictions i.e. conditions under which tail prefix is ignored.
// Most of these checks are already done by importer or fgMorphTailCall().
// This serves as a double sanity check.
assert((comp->info.compFlags & CORINFO_FLG_SYNCH) == 0); // tail calls from synchronized methods
assert(!call->IsUnmanaged()); // tail calls to unamanaged methods
assert(!comp->compLocallocUsed); // tail call from methods that also do localloc
// We expect to see a call that meets the following conditions
assert(call->IsTailCallViaJitHelper());
assert(callTarget != nullptr);
// The TailCall helper call never returns to the caller and is not GC interruptible.
// Therefore the block containing the tail call should be a GC safe point to avoid
// GC starvation. It is legal for the block to be unmarked iff the entry block is a
// GC safe point, as the entry block trivially dominates every reachable block.
assert((comp->compCurBB->bbFlags & BBF_GC_SAFE_POINT) || (comp->fgFirstBB->bbFlags & BBF_GC_SAFE_POINT));
// If PInvokes are in-lined, we have to remember to execute PInvoke method epilog anywhere that
// a method returns. This is a case of caller method has both PInvokes and tail calls.
if (comp->compMethodRequiresPInvokeFrame())
{
InsertPInvokeMethodEpilog(comp->compCurBB DEBUGARG(call));
}
// Remove gtCallAddr from execution order if present.
if (call->gtCallType == CT_INDIRECT)
{
assert(call->gtCallAddr != nullptr);
bool isClosed;
LIR::ReadOnlyRange callAddrRange = BlockRange().GetTreeRange(call->gtCallAddr, &isClosed);
assert(isClosed);
BlockRange().Remove(std::move(callAddrRange));
}
// The callTarget tree needs to be sequenced.
LIR::Range callTargetRange = LIR::SeqTree(comp, callTarget);
// Verify the special args are what we expect, and replace the dummy args with real values.
// We need to figure out the size of the outgoing stack arguments, not including the special args.
// The number of 4-byte words is passed to the helper for the incoming and outgoing argument sizes.
// This number is exactly the next slot number in the call's argument info struct.
unsigned nNewStkArgsBytes = call->fgArgInfo->GetNextSlotByteOffset();
const int wordSize = 4;
unsigned nNewStkArgsWords = nNewStkArgsBytes / wordSize;
DEBUG_ARG_SLOTS_ASSERT(call->fgArgInfo->GetNextSlotNum() == nNewStkArgsWords);
assert(nNewStkArgsWords >= 4); // There must be at least the four special stack args.
nNewStkArgsWords -= 4;
unsigned numArgs = call->fgArgInfo->ArgCount();
fgArgTabEntry* argEntry;
// arg 0 == callTarget.
argEntry = comp->gtArgEntryByArgNum(call, numArgs - 1);
assert(argEntry != nullptr);
GenTree* arg0 = argEntry->GetNode()->AsPutArgStk()->gtGetOp1();
ContainCheckRange(callTargetRange);
BlockRange().InsertAfter(arg0, std::move(callTargetRange));
bool isClosed;
LIR::ReadOnlyRange secondArgRange = BlockRange().GetTreeRange(arg0, &isClosed);
assert(isClosed);
BlockRange().Remove(std::move(secondArgRange));
argEntry->GetNode()->AsPutArgStk()->gtOp1 = callTarget;
// arg 1 == flags
argEntry = comp->gtArgEntryByArgNum(call, numArgs - 2);
assert(argEntry != nullptr);
GenTree* arg1 = argEntry->GetNode()->AsPutArgStk()->gtGetOp1();
assert(arg1->gtOper == GT_CNS_INT);
ssize_t tailCallHelperFlags = 1 | // always restore EDI,ESI,EBX
(call->IsVirtualStub() ? 0x2 : 0x0); // Stub dispatch flag
arg1->AsIntCon()->gtIconVal = tailCallHelperFlags;
// arg 2 == numberOfNewStackArgsWords
argEntry = comp->gtArgEntryByArgNum(call, numArgs - 3);
assert(argEntry != nullptr);
GenTree* arg2 = argEntry->GetNode()->AsPutArgStk()->gtGetOp1();
assert(arg2->gtOper == GT_CNS_INT);
arg2->AsIntCon()->gtIconVal = nNewStkArgsWords;
#ifdef DEBUG
// arg 3 == numberOfOldStackArgsWords
argEntry = comp->gtArgEntryByArgNum(call, numArgs - 4);
assert(argEntry != nullptr);
GenTree* arg3 = argEntry->GetNode()->AsPutArgStk()->gtGetOp1();
assert(arg3->gtOper == GT_CNS_INT);
#endif // DEBUG
// Transform this call node into a call to Jit tail call helper.
call->gtCallType = CT_HELPER;
call->gtCallMethHnd = comp->eeFindHelper(CORINFO_HELP_TAILCALL);
call->gtFlags &= ~GTF_CALL_VIRT_KIND_MASK;
// Lower this as if it were a pure helper call.
call->gtCallMoreFlags &= ~(GTF_CALL_M_TAILCALL | GTF_CALL_M_TAILCALL_VIA_JIT_HELPER);
GenTree* result = LowerDirectCall(call);
// Now add back tail call flags for identifying this node as tail call dispatched via helper.
call->gtCallMoreFlags |= GTF_CALL_M_TAILCALL | GTF_CALL_M_TAILCALL_VIA_JIT_HELPER;
#ifdef PROFILING_SUPPORTED
// Insert profiler tail call hook if needed.
// Since we don't know the insertion point, pass null for second param.
if (comp->compIsProfilerHookNeeded())
{
InsertProfTailCallHook(call, nullptr);
}
#endif // PROFILING_SUPPORTED
return result;
}
//------------------------------------------------------------------------
// LowerCFGCall: Potentially lower a call to use control-flow guard. This
// expands indirect calls into either a validate+call sequence or to a dispatch
// helper taking the original target in a special register.
//
// Arguments:
// call - The call node
//
void Lowering::LowerCFGCall(GenTreeCall* call)
{
assert(!call->IsHelperCall(comp, CORINFO_HELP_DISPATCH_INDIRECT_CALL));
if (call->IsHelperCall(comp, CORINFO_HELP_VALIDATE_INDIRECT_CALL))
{
return;
}
GenTree* callTarget = call->gtCallType == CT_INDIRECT ? call->gtCallAddr : call->gtControlExpr;
if ((callTarget == nullptr) || callTarget->IsIntegralConst())
{
// This is a direct call, no CFG check is necessary.
return;
}
CFGCallKind cfgKind = call->GetCFGCallKind();
switch (cfgKind)
{
case CFGCallKind::ValidateAndCall:
{
// To safely apply CFG we need to generate a very specific pattern:
// in particular, it is a safety issue to allow the JIT to reload
// the call target from memory between calling
// CORINFO_HELP_VALIDATE_INDIRECT_CALL and the target. This is
// something that would easily occur in debug codegen if we
// produced high-level IR. Instead we will use a GT_PHYSREG node
// to get the target back from the register that contains the target.
//
// Additionally, the validator does not preserve all arg registers,
// so we have to move all GT_PUTARG_REG nodes that would otherwise
// be trashed ahead. The JIT also has an internal invariant that
// once GT_PUTARG nodes start to appear in LIR, the call is coming
// up. To avoid breaking this invariant we move _all_ GT_PUTARG
// nodes (in particular, GC info reporting relies on this).
//
// To sum up, we end up transforming
//
// ta... = <early args>
// tb... = <late args>
// tc = callTarget
// GT_CALL tc, ta..., tb...
//
// into
//
// ta... = <early args> (without GT_PUTARG_* nodes)
// tb = callTarget
// GT_CALL CORINFO_HELP_VALIDATE_INDIRECT_CALL, tb
// tc = GT_PHYSREG REG_VALIDATE_INDIRECT_CALL_ADDR (preserved by helper)
// td = <moved GT_PUTARG_* nodes>
// GT_CALL tb, ta..., td..
//
GenTree* regNode = PhysReg(REG_VALIDATE_INDIRECT_CALL_ADDR, TYP_I_IMPL);
LIR::Use useOfTar;
bool gotUse = BlockRange().TryGetUse(callTarget, &useOfTar);
assert(gotUse);
useOfTar.ReplaceWith(regNode);
GenTree* targetPlaceholder = comp->gtNewZeroConNode(callTarget->TypeGet());
// Add the call to the validator. Use a placeholder for the target while we
// morph, sequence and lower, to avoid redoing that for the actual target.
GenTreeCall::Use* args = comp->gtNewCallArgs(targetPlaceholder);
GenTreeCall* validate = comp->gtNewHelperCallNode(CORINFO_HELP_VALIDATE_INDIRECT_CALL, TYP_VOID, args);
comp->fgMorphTree(validate);
LIR::Range validateRange = LIR::SeqTree(comp, validate);
GenTree* validateFirst = validateRange.FirstNode();
GenTree* validateLast = validateRange.LastNode();
// Insert the validator with the call target before the late args.
BlockRange().InsertBefore(call, std::move(validateRange));
// Swap out the target
gotUse = BlockRange().TryGetUse(targetPlaceholder, &useOfTar);
assert(gotUse);
useOfTar.ReplaceWith(callTarget);
targetPlaceholder->SetUnusedValue();
LowerRange(validateFirst, validateLast);
// Insert the PHYSREG node that we must load right after validation.
BlockRange().InsertAfter(validate, regNode);
LowerNode(regNode);
// Finally move all GT_PUTARG_* nodes
for (GenTreeCall::Use& use : call->Args())
{
GenTree* node = use.GetNode();
if (!node->IsValue())
{
// Non-value nodes in early args are setup nodes for late args.
continue;
}
assert(node->OperIsPutArg() || node->OperIsFieldList());
MoveCFGCallArg(call, node);
}
for (GenTreeCall::Use& use : call->LateArgs())
{
GenTree* node = use.GetNode();
assert(node->OperIsPutArg() || node->OperIsFieldList());
MoveCFGCallArg(call, node);
}
break;
}
case CFGCallKind::Dispatch:
{
#ifdef REG_DISPATCH_INDIRECT_CALL_ADDR
// Now insert the call target as an extra argument.
//
// First append the early placeholder arg
GenTreeCall::Use** earlySlot = &call->gtCallArgs;
unsigned int index = call->gtCallThisArg != nullptr ? 1 : 0;
while (*earlySlot != nullptr)
{
earlySlot = &(*earlySlot)->NextRef();
index++;
}
assert(index == call->fgArgInfo->ArgCount());
GenTree* placeHolder = comp->gtNewArgPlaceHolderNode(callTarget->TypeGet(), NO_CLASS_HANDLE);
placeHolder->gtFlags |= GTF_LATE_ARG;
*earlySlot = comp->gtNewCallArgs(placeHolder);
// Append the late actual arg
GenTreeCall::Use** lateSlot = &call->gtCallLateArgs;
unsigned int lateIndex = 0;
while (*lateSlot != nullptr)
{
lateSlot = &(*lateSlot)->NextRef();
lateIndex++;
}
*lateSlot = comp->gtNewCallArgs(callTarget);
// Add an entry into the arg info
regNumber regNum = REG_DISPATCH_INDIRECT_CALL_ADDR;
unsigned numRegs = 1;
unsigned byteSize = TARGET_POINTER_SIZE;
unsigned byteAlignment = TARGET_POINTER_SIZE;
bool isStruct = false;
bool isFloatHfa = false;
bool isVararg = false;
fgArgTabEntry* entry =
call->fgArgInfo->AddRegArg(index, placeHolder, *earlySlot, regNum, numRegs, byteSize, byteAlignment,
isStruct, isFloatHfa,
isVararg UNIX_AMD64_ABI_ONLY_ARG(REG_STK) UNIX_AMD64_ABI_ONLY_ARG(0)
UNIX_AMD64_ABI_ONLY_ARG(0) UNIX_AMD64_ABI_ONLY_ARG(nullptr));
entry->lateUse = *lateSlot;
entry->SetLateArgInx(lateIndex);
// Lower the newly added args now that call is updated
LowerArg(call, &(*earlySlot)->NodeRef());
LowerArg(call, &(*lateSlot)->NodeRef());
// Finally update the call to be a helper call
call->gtCallType = CT_HELPER;
call->gtCallMethHnd = comp->eeFindHelper(CORINFO_HELP_DISPATCH_INDIRECT_CALL);
call->gtFlags &= ~GTF_CALL_VIRT_KIND_MASK;
#ifdef FEATURE_READYTORUN
call->gtEntryPoint.addr = nullptr;
call->gtEntryPoint.accessType = IAT_VALUE;
#endif
// Now relower the call target
call->gtControlExpr = LowerDirectCall(call);
if (call->gtControlExpr != nullptr)
{
LIR::Range dispatchControlExprRange = LIR::SeqTree(comp, call->gtControlExpr);
ContainCheckRange(dispatchControlExprRange);
BlockRange().InsertBefore(call, std::move(dispatchControlExprRange));
}
#else
assert(!"Unexpected CFGCallKind::Dispatch for platform without dispatcher");
#endif
break;
}
default:
unreached();
}
}
//------------------------------------------------------------------------
// IsInvariantInRange: Check if a node is invariant in the specified range. In
// other words, can 'node' be moved to right before 'endExclusive' without its
// computation changing values?
//
// Arguments:
// node - The node.
// endExclusive - The exclusive end of the range to check invariance for.
//
bool Lowering::IsInvariantInRange(GenTree* node, GenTree* endExclusive)
{
assert(node->Precedes(endExclusive));
if (node->IsInvariant())
{
return true;
}
if (!node->IsValue())
{
return false;
}
if (node->OperIsLocal())
{
GenTreeLclVarCommon* lcl = node->AsLclVarCommon();
LclVarDsc* desc = comp->lvaGetDesc(lcl);
if (desc->IsAddressExposed())
{
return false;
}
// Currently, non-address exposed locals have the property that their
// use occurs at the user, so no further interference check is
// necessary.
return true;
}
return false;
}
//------------------------------------------------------------------------
// MoveCFGCallArg: Given a call that will be CFG transformed using the
// validate+call scheme, and an argument GT_PUTARG_* or GT_FIELD_LIST node,
// move that node right before the call.
//
// Arguments:
// call - The call that is being CFG transformed
// node - The argument node
//
// Remarks:
// We can always move the GT_PUTARG_* node further ahead as the side-effects
// of these nodes are handled by LSRA. However, the operands of these nodes
// are not always safe to move further ahead; for invariant operands, we
// move them ahead as well to shorten the lifetime of these values.
//
void Lowering::MoveCFGCallArg(GenTreeCall* call, GenTree* node)
{
assert(node->OperIsPutArg() || node->OperIsFieldList());
if (node->OperIsFieldList())
{
JITDUMP("Node is a GT_FIELD_LIST; moving all operands\n");
for (GenTreeFieldList::Use& operand : node->AsFieldList()->Uses())
{
assert(operand.GetNode()->OperIsPutArg());
MoveCFGCallArg(call, operand.GetNode());
}
}
else
{
GenTree* operand = node->AsOp()->gtGetOp1();
JITDUMP("Checking if we can move operand of GT_PUTARG_* node:\n");
DISPTREE(operand);
if (((operand->gtFlags & GTF_ALL_EFFECT) == 0) && IsInvariantInRange(operand, call))
{
JITDUMP("...yes, moving to after validator call\n");
BlockRange().Remove(operand);
BlockRange().InsertBefore(call, operand);
}
else
{
JITDUMP("...no, operand has side effects or is not invariant\n");
}
}
JITDUMP("Moving\n");
DISPTREE(node);
JITDUMP("\n");
BlockRange().Remove(node);
BlockRange().InsertBefore(call, node);
}
#ifndef TARGET_64BIT
//------------------------------------------------------------------------
// Lowering::DecomposeLongCompare: Decomposes a TYP_LONG compare node.
//
// Arguments:
// cmp - the compare node
//
// Return Value:
// The next node to lower.
//
// Notes:
// This is done during lowering because DecomposeLongs handles only nodes
// that produce TYP_LONG values. Compare nodes may consume TYP_LONG values
// but produce TYP_INT values.
//
GenTree* Lowering::DecomposeLongCompare(GenTree* cmp)
{
assert(cmp->gtGetOp1()->TypeGet() == TYP_LONG);
GenTree* src1 = cmp->gtGetOp1();
GenTree* src2 = cmp->gtGetOp2();
assert(src1->OperIs(GT_LONG));
assert(src2->OperIs(GT_LONG));
GenTree* loSrc1 = src1->gtGetOp1();
GenTree* hiSrc1 = src1->gtGetOp2();
GenTree* loSrc2 = src2->gtGetOp1();
GenTree* hiSrc2 = src2->gtGetOp2();
BlockRange().Remove(src1);
BlockRange().Remove(src2);
genTreeOps condition = cmp->OperGet();
GenTree* loCmp;
GenTree* hiCmp;
if (cmp->OperIs(GT_EQ, GT_NE))
{
//
// Transform (x EQ|NE y) into (((x.lo XOR y.lo) OR (x.hi XOR y.hi)) EQ|NE 0). If y is 0 then this can
// be reduced to just ((x.lo OR x.hi) EQ|NE 0). The OR is expected to set the condition flags so we
// don't need to generate a redundant compare against 0, we only generate a SETCC|JCC instruction.
//
// XOR is used rather than SUB because it is commutative and thus allows swapping the operands when
// the first happens to be a constant. Usually only the second compare operand is a constant but it's
// still possible to have a constant on the left side. For example, when src1 is a uint->ulong cast
// then hiSrc1 would be 0.
//
if (loSrc1->OperIs(GT_CNS_INT))
{
std::swap(loSrc1, loSrc2);
}
if (loSrc2->IsIntegralConst(0))
{
BlockRange().Remove(loSrc2);
loCmp = loSrc1;
}
else
{
loCmp = comp->gtNewOperNode(GT_XOR, TYP_INT, loSrc1, loSrc2);
BlockRange().InsertBefore(cmp, loCmp);
ContainCheckBinary(loCmp->AsOp());
}
if (hiSrc1->OperIs(GT_CNS_INT))
{
std::swap(hiSrc1, hiSrc2);
}
if (hiSrc2->IsIntegralConst(0))
{
BlockRange().Remove(hiSrc2);
hiCmp = hiSrc1;
}
else
{
hiCmp = comp->gtNewOperNode(GT_XOR, TYP_INT, hiSrc1, hiSrc2);
BlockRange().InsertBefore(cmp, hiCmp);
ContainCheckBinary(hiCmp->AsOp());
}
hiCmp = comp->gtNewOperNode(GT_OR, TYP_INT, loCmp, hiCmp);
BlockRange().InsertBefore(cmp, hiCmp);
ContainCheckBinary(hiCmp->AsOp());
}
else
{
assert(cmp->OperIs(GT_LT, GT_LE, GT_GE, GT_GT));
//
// If the compare is signed then (x LT|GE y) can be transformed into ((x SUB y) LT|GE 0).
// If the compare is unsigned we can still use SUB but we need to check the Carry flag,
// not the actual result. In both cases we can simply check the appropiate condition flags
// and ignore the actual result:
// SUB_LO loSrc1, loSrc2
// SUB_HI hiSrc1, hiSrc2
// SETCC|JCC (signed|unsigned LT|GE)
// If loSrc2 happens to be 0 then the first SUB can be eliminated and the second one can
// be turned into a CMP because the first SUB would have set carry to 0. This effectively
// transforms a long compare against 0 into an int compare of the high part against 0.
//
// (x LE|GT y) can to be transformed into ((x SUB y) LE|GT 0) but checking that a long value
// is greater than 0 is not so easy. We need to turn this into a positive/negative check
// like the one we get for LT|GE compares, this can be achieved by swapping the compare:
// (x LE|GT y) becomes (y GE|LT x)
//
// Having to swap operands is problematic when the second operand is a constant. The constant
// moves to the first operand where it cannot be contained and thus needs a register. This can
// be avoided by changing the constant such that LE|GT becomes LT|GE:
// (x LE|GT 41) becomes (x LT|GE 42)
//
if (cmp->OperIs(GT_LE, GT_GT))
{
bool mustSwap = true;
if (loSrc2->OperIs(GT_CNS_INT) && hiSrc2->OperIs(GT_CNS_INT))
{
uint32_t loValue = static_cast<uint32_t>(loSrc2->AsIntCon()->IconValue());
uint32_t hiValue = static_cast<uint32_t>(hiSrc2->AsIntCon()->IconValue());
uint64_t value = static_cast<uint64_t>(loValue) | (static_cast<uint64_t>(hiValue) << 32);
uint64_t maxValue = cmp->IsUnsigned() ? UINT64_MAX : INT64_MAX;
if (value != maxValue)
{
value++;
loValue = value & UINT32_MAX;
hiValue = (value >> 32) & UINT32_MAX;
loSrc2->AsIntCon()->SetIconValue(loValue);
hiSrc2->AsIntCon()->SetIconValue(hiValue);
condition = cmp->OperIs(GT_LE) ? GT_LT : GT_GE;
mustSwap = false;
}
}
if (mustSwap)
{
std::swap(loSrc1, loSrc2);
std::swap(hiSrc1, hiSrc2);
condition = GenTree::SwapRelop(condition);
}
}
assert((condition == GT_LT) || (condition == GT_GE));
if (loSrc2->IsIntegralConst(0))
{
BlockRange().Remove(loSrc2);
// Very conservative dead code removal... but it helps.
if (loSrc1->OperIs(GT_CNS_INT, GT_LCL_VAR, GT_LCL_FLD))
{
BlockRange().Remove(loSrc1);
}
else
{
loSrc1->SetUnusedValue();
}
hiCmp = comp->gtNewOperNode(GT_CMP, TYP_VOID, hiSrc1, hiSrc2);
BlockRange().InsertBefore(cmp, hiCmp);
ContainCheckCompare(hiCmp->AsOp());
}
else
{
loCmp = comp->gtNewOperNode(GT_CMP, TYP_VOID, loSrc1, loSrc2);
hiCmp = comp->gtNewOperNode(GT_SUB_HI, TYP_INT, hiSrc1, hiSrc2);
BlockRange().InsertBefore(cmp, loCmp, hiCmp);
ContainCheckCompare(loCmp->AsOp());
ContainCheckBinary(hiCmp->AsOp());
//
// Try to move the first SUB_HI operands right in front of it, this allows using
// a single temporary register instead of 2 (one for CMP and one for SUB_HI). Do
// this only for locals as they won't change condition flags. Note that we could
// move constants (except 0 which generates XOR reg, reg) but it's extremely rare
// to have a constant as the first operand.
//
if (hiSrc1->OperIs(GT_LCL_VAR, GT_LCL_FLD) && IsInvariantInRange(hiSrc1, hiCmp))
{
BlockRange().Remove(hiSrc1);
BlockRange().InsertBefore(hiCmp, hiSrc1);
}
}
}
hiCmp->gtFlags |= GTF_SET_FLAGS;
if (hiCmp->IsValue())
{
hiCmp->SetUnusedValue();
}
LIR::Use cmpUse;
if (BlockRange().TryGetUse(cmp, &cmpUse) && cmpUse.User()->OperIs(GT_JTRUE))
{
BlockRange().Remove(cmp);
GenTree* jcc = cmpUse.User();
jcc->AsOp()->gtOp1 = nullptr;
jcc->ChangeOper(GT_JCC);
jcc->gtFlags |= GTF_USE_FLAGS;
jcc->AsCC()->gtCondition = GenCondition::FromIntegralRelop(condition, cmp->IsUnsigned());
}
else
{
cmp->AsOp()->gtOp1 = nullptr;
cmp->AsOp()->gtOp2 = nullptr;
cmp->ChangeOper(GT_SETCC);
cmp->gtFlags |= GTF_USE_FLAGS;
cmp->AsCC()->gtCondition = GenCondition::FromIntegralRelop(condition, cmp->IsUnsigned());
}
return cmp->gtNext;
}
#endif // !TARGET_64BIT
//------------------------------------------------------------------------
// Lowering::OptimizeConstCompare: Performs various "compare with const" optimizations.
//
// Arguments:
// cmp - the compare node
//
// Return Value:
// The original compare node if lowering should proceed as usual or the next node
// to lower if the compare node was changed in such a way that lowering is no
// longer needed.
//
// Notes:
// - Narrow operands to enable memory operand containment (XARCH specific).
// - Transform cmp(and(x, y), 0) into test(x, y) (XARCH/Arm64 specific but could
// be used for ARM as well if support for GT_TEST_EQ/GT_TEST_NE is added).
// - Transform TEST(x, LSH(1, y)) into BT(x, y) (XARCH specific)
// - Transform RELOP(OP, 0) into SETCC(OP) or JCC(OP) if OP can set the
// condition flags appropriately (XARCH/ARM64 specific but could be extended
// to ARM32 as well if ARM32 codegen supports GTF_SET_FLAGS).
//
GenTree* Lowering::OptimizeConstCompare(GenTree* cmp)
{
assert(cmp->gtGetOp2()->IsIntegralConst());
#if defined(TARGET_XARCH) || defined(TARGET_ARM64)
GenTree* op1 = cmp->gtGetOp1();
GenTreeIntCon* op2 = cmp->gtGetOp2()->AsIntCon();
ssize_t op2Value = op2->IconValue();
#ifdef TARGET_XARCH
var_types op1Type = op1->TypeGet();
if (IsContainableMemoryOp(op1) && varTypeIsSmall(op1Type) && FitsIn(op1Type, op2Value))
{
//
// If op1's type is small then try to narrow op2 so it has the same type as op1.
// Small types are usually used by memory loads and if both compare operands have
// the same type then the memory load can be contained. In certain situations
// (e.g "cmp ubyte, 200") we also get a smaller instruction encoding.
//
op2->gtType = op1Type;
}
else
#endif
if (op1->OperIs(GT_CAST) && !op1->gtOverflow())
{
GenTreeCast* cast = op1->AsCast();
var_types castToType = cast->CastToType();
GenTree* castOp = cast->gtGetOp1();
if (((castToType == TYP_BOOL) || (castToType == TYP_UBYTE)) && FitsIn<UINT8>(op2Value))
{
//
// Since we're going to remove the cast we need to be able to narrow the cast operand
// to the cast type. This can be done safely only for certain opers (e.g AND, OR, XOR).
// Some opers just can't be narrowed (e.g DIV, MUL) while other could be narrowed but
// doing so would produce incorrect results (e.g. RSZ, RSH).
//
// The below list of handled opers is conservative but enough to handle the most common
// situations. In particular this include CALL, sometimes the JIT unnecessarilly widens
// the result of bool returning calls.
//
bool removeCast =
#ifdef TARGET_ARM64
(op2Value == 0) && cmp->OperIs(GT_EQ, GT_NE, GT_GT) &&
#endif
(castOp->OperIs(GT_CALL, GT_LCL_VAR) || castOp->OperIs(GT_OR, GT_XOR, GT_AND)
#ifdef TARGET_XARCH
|| IsContainableMemoryOp(castOp)
#endif
);
if (removeCast)
{
assert(!castOp->gtOverflowEx()); // Must not be an overflow checking operation
#ifdef TARGET_ARM64
bool cmpEq = cmp->OperIs(GT_EQ);
cmp->SetOperRaw(cmpEq ? GT_TEST_EQ : GT_TEST_NE);
op2->SetIconValue(0xff);
op2->gtType = castOp->gtType;
#else
castOp->gtType = castToType;
op2->gtType = castToType;
#endif
// If we have any contained memory ops on castOp, they must now not be contained.
if (castOp->OperIs(GT_OR, GT_XOR, GT_AND))
{
GenTree* op1 = castOp->gtGetOp1();
if ((op1 != nullptr) && !op1->IsCnsIntOrI())
{
op1->ClearContained();
}
GenTree* op2 = castOp->gtGetOp2();
if ((op2 != nullptr) && !op2->IsCnsIntOrI())
{
op2->ClearContained();
}
}
cmp->AsOp()->gtOp1 = castOp;
BlockRange().Remove(cast);
}
}
}
else if (op1->OperIs(GT_AND) && cmp->OperIs(GT_EQ, GT_NE))
{
//
// Transform ((x AND y) EQ|NE 0) into (x TEST_EQ|TEST_NE y) when possible.
//
GenTree* andOp1 = op1->gtGetOp1();
GenTree* andOp2 = op1->gtGetOp2();
if (op2Value != 0)
{
// Optimizes (X & 1) == 1 to (X & 1)
// The compiler requires jumps to have relop operands, so we do not fold that case.
LIR::Use cmpUse;
if ((op2Value == 1) && cmp->OperIs(GT_EQ))
{
if (andOp2->IsIntegralConst(1) && (genActualType(op1) == cmp->TypeGet()) &&
BlockRange().TryGetUse(cmp, &cmpUse) && !cmpUse.User()->OperIs(GT_JTRUE))
{
GenTree* next = cmp->gtNext;
cmpUse.ReplaceWith(op1);
BlockRange().Remove(cmp->gtGetOp2());
BlockRange().Remove(cmp);
return next;
}
}
//
// If we don't have a 0 compare we can get one by transforming ((x AND mask) EQ|NE mask)
// into ((x AND mask) NE|EQ 0) when mask is a single bit.
//
if (isPow2<target_size_t>(static_cast<target_size_t>(op2Value)) && andOp2->IsIntegralConst(op2Value))
{
op2Value = 0;
op2->SetIconValue(0);
cmp->SetOperRaw(GenTree::ReverseRelop(cmp->OperGet()));
}
}
if (op2Value == 0)
{
BlockRange().Remove(op1);
BlockRange().Remove(op2);
cmp->SetOperRaw(cmp->OperIs(GT_EQ) ? GT_TEST_EQ : GT_TEST_NE);
cmp->AsOp()->gtOp1 = andOp1;
cmp->AsOp()->gtOp2 = andOp2;
// We will re-evaluate containment below
andOp1->ClearContained();
andOp2->ClearContained();
#ifdef TARGET_XARCH
if (IsContainableMemoryOp(andOp1) && andOp2->IsIntegralConst())
{
//
// For "test" we only care about the bits that are set in the second operand (mask).
// If the mask fits in a small type then we can narrow both operands to generate a "test"
// instruction with a smaller encoding ("test" does not have a r/m32, imm8 form) and avoid
// a widening load in some cases.
//
// For 16 bit operands we narrow only if the memory operand is already 16 bit. This matches
// the behavior of a previous implementation and avoids adding more cases where we generate
// 16 bit instructions that require a length changing prefix (0x66). These suffer from
// significant decoder stalls on Intel CPUs.
//
// We could also do this for 64 bit masks that fit into 32 bit but it doesn't help.
// In such cases morph narrows down the existing GT_AND by inserting a cast between it and
// the memory operand so we'd need to add more code to recognize and eliminate that cast.
//
size_t mask = static_cast<size_t>(andOp2->AsIntCon()->IconValue());
if (FitsIn<UINT8>(mask))
{
andOp1->gtType = TYP_UBYTE;
andOp2->gtType = TYP_UBYTE;
}
else if (FitsIn<UINT16>(mask) && genTypeSize(andOp1) == 2)
{
andOp1->gtType = TYP_USHORT;
andOp2->gtType = TYP_USHORT;
}
}
#endif
}
}
if (cmp->OperIs(GT_TEST_EQ, GT_TEST_NE))
{
#ifdef TARGET_XARCH
//
// Transform TEST_EQ|NE(x, LSH(1, y)) into BT(x, y) when possible. Using BT
// results in smaller and faster code. It also doesn't have special register
// requirements, unlike LSH that requires the shift count to be in ECX.
// Note that BT has the same behavior as LSH when the bit index exceeds the
// operand bit size - it uses (bit_index MOD bit_size).
//
GenTree* lsh = cmp->gtGetOp2();
LIR::Use cmpUse;
if (lsh->OperIs(GT_LSH) && varTypeIsIntOrI(lsh->TypeGet()) && lsh->gtGetOp1()->IsIntegralConst(1) &&
BlockRange().TryGetUse(cmp, &cmpUse))
{
GenCondition condition = cmp->OperIs(GT_TEST_NE) ? GenCondition::C : GenCondition::NC;
cmp->SetOper(GT_BT);
cmp->gtType = TYP_VOID;
cmp->gtFlags |= GTF_SET_FLAGS;
cmp->AsOp()->gtOp2 = lsh->gtGetOp2();
cmp->gtGetOp2()->ClearContained();
BlockRange().Remove(lsh->gtGetOp1());
BlockRange().Remove(lsh);
GenTreeCC* cc;
if (cmpUse.User()->OperIs(GT_JTRUE))
{
cmpUse.User()->ChangeOper(GT_JCC);
cc = cmpUse.User()->AsCC();
cc->gtCondition = condition;
}
else
{
cc = new (comp, GT_SETCC) GenTreeCC(GT_SETCC, condition, TYP_INT);
BlockRange().InsertAfter(cmp, cc);
cmpUse.ReplaceWith(cc);
}
cc->gtFlags |= GTF_USE_FLAGS;
return cmp->gtNext;
}
#endif // TARGET_XARCH
}
else if (cmp->OperIs(GT_EQ, GT_NE))
{
GenTree* op1 = cmp->gtGetOp1();
GenTree* op2 = cmp->gtGetOp2();
// TODO-CQ: right now the below peep is inexpensive and gets the benefit in most
// cases because in majority of cases op1, op2 and cmp would be in that order in
// execution. In general we should be able to check that all the nodes that come
// after op1 do not modify the flags so that it is safe to avoid generating a
// test instruction.
if (op2->IsIntegralConst(0) && (op1->gtNext == op2) && (op2->gtNext == cmp) &&
#ifdef TARGET_XARCH
(op1->OperIs(GT_AND, GT_OR, GT_XOR, GT_ADD, GT_SUB, GT_NEG)
#ifdef FEATURE_HW_INTRINSICS
|| (op1->OperIs(GT_HWINTRINSIC) &&
emitter::DoesWriteZeroFlag(HWIntrinsicInfo::lookupIns(op1->AsHWIntrinsic())))
#endif // FEATURE_HW_INTRINSICS
)
#else // TARGET_ARM64
op1->OperIs(GT_AND, GT_ADD, GT_SUB)
#endif
)
{
op1->gtFlags |= GTF_SET_FLAGS;
op1->SetUnusedValue();
BlockRange().Remove(op2);
GenTree* next = cmp->gtNext;
GenTree* cc;
genTreeOps ccOp;
LIR::Use cmpUse;
// Fast check for the common case - relop used by a JTRUE that immediately follows it.
if ((next != nullptr) && next->OperIs(GT_JTRUE) && (next->gtGetOp1() == cmp))
{
cc = next;
ccOp = GT_JCC;
next = nullptr;
BlockRange().Remove(cmp);
}
else if (BlockRange().TryGetUse(cmp, &cmpUse) && cmpUse.User()->OperIs(GT_JTRUE))
{
cc = cmpUse.User();
ccOp = GT_JCC;
next = nullptr;
BlockRange().Remove(cmp);
}
else // The relop is not used by a JTRUE or it is not used at all.
{
// Transform the relop node it into a SETCC. If it's not used we could remove
// it completely but that means doing more work to handle a rare case.
cc = cmp;
ccOp = GT_SETCC;
}
GenCondition condition = GenCondition::FromIntegralRelop(cmp);
cc->ChangeOper(ccOp);
cc->AsCC()->gtCondition = condition;
cc->gtFlags |= GTF_USE_FLAGS;
return next;
}
}
#endif // defined(TARGET_XARCH) || defined(TARGET_ARM64)
return cmp;
}
//------------------------------------------------------------------------
// Lowering::LowerCompare: Lowers a compare node.
//
// Arguments:
// cmp - the compare node
//
// Return Value:
// The next node to lower.
//
GenTree* Lowering::LowerCompare(GenTree* cmp)
{
#ifndef TARGET_64BIT
if (cmp->gtGetOp1()->TypeGet() == TYP_LONG)
{
return DecomposeLongCompare(cmp);
}
#endif
if (cmp->gtGetOp2()->IsIntegralConst() && !comp->opts.MinOpts())
{
GenTree* next = OptimizeConstCompare(cmp);
// If OptimizeConstCompare return the compare node as "next" then we need to continue lowering.
if (next != cmp)
{
return next;
}
}
#ifdef TARGET_XARCH
if (cmp->gtGetOp1()->TypeGet() == cmp->gtGetOp2()->TypeGet())
{
if (varTypeIsSmall(cmp->gtGetOp1()->TypeGet()) && varTypeIsUnsigned(cmp->gtGetOp1()->TypeGet()))
{
//
// If both operands have the same type then codegen will use the common operand type to
// determine the instruction type. For small types this would result in performing a
// signed comparison of two small unsigned values without zero extending them to TYP_INT
// which is incorrect. Note that making the comparison unsigned doesn't imply that codegen
// has to generate a small comparison, it can still correctly generate a TYP_INT comparison.
//
cmp->gtFlags |= GTF_UNSIGNED;
}
}
#endif // TARGET_XARCH
ContainCheckCompare(cmp->AsOp());
return cmp->gtNext;
}
//------------------------------------------------------------------------
// Lowering::LowerJTrue: Lowers a JTRUE node.
//
// Arguments:
// jtrue - the JTRUE node
//
// Return Value:
// The next node to lower (usually nullptr).
//
// Notes:
// On ARM64 this may remove the JTRUE node and transform its associated
// relop into a JCMP node.
//
GenTree* Lowering::LowerJTrue(GenTreeOp* jtrue)
{
#if defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)
GenTree* relop = jtrue->gtGetOp1();
GenTree* relopOp2 = relop->AsOp()->gtGetOp2();
if ((relop->gtNext == jtrue) && relopOp2->IsCnsIntOrI())
{
bool useJCMP = false;
GenTreeFlags flags = GTF_EMPTY;
#if defined(TARGET_LOONGARCH64)
if (relop->OperIs(GT_EQ, GT_NE))
{
// Codegen will use beq or bne.
flags = relop->OperIs(GT_EQ) ? GTF_JCMP_EQ : GTF_EMPTY;
useJCMP = true;
}
#else // TARGET_ARM64
if (relop->OperIs(GT_EQ, GT_NE) && relopOp2->IsIntegralConst(0))
{
// Codegen will use cbz or cbnz in codegen which do not affect the flag register
flags = relop->OperIs(GT_EQ) ? GTF_JCMP_EQ : GTF_EMPTY;
useJCMP = true;
}
else if (relop->OperIs(GT_TEST_EQ, GT_TEST_NE) && isPow2(relopOp2->AsIntCon()->IconValue()))
{
// Codegen will use tbz or tbnz in codegen which do not affect the flag register
flags = GTF_JCMP_TST | (relop->OperIs(GT_TEST_EQ) ? GTF_JCMP_EQ : GTF_EMPTY);
useJCMP = true;
}
#endif // TARGET_ARM64
if (useJCMP)
{
relop->SetOper(GT_JCMP);
relop->gtFlags &= ~(GTF_JCMP_TST | GTF_JCMP_EQ);
relop->gtFlags |= flags;
relop->gtType = TYP_VOID;
relopOp2->SetContained();
BlockRange().Remove(jtrue);
assert(relop->gtNext == nullptr);
return nullptr;
}
}
#endif // TARGET_ARM64 || TARGET_LOONGARCH64
ContainCheckJTrue(jtrue);
assert(jtrue->gtNext == nullptr);
return nullptr;
}
//----------------------------------------------------------------------------------------------
// LowerNodeCC: Lowers a node that produces a boolean value by setting the condition flags.
//
// Arguments:
// node - The node to lower
// condition - The condition code of the generated SETCC/JCC node
//
// Return Value:
// A SETCC/JCC node or nullptr if `node` is not used.
//
// Notes:
// This simply replaces `node`'s use with an appropiate SETCC/JCC node,
// `node` is not actually changed, except by having its GTF_SET_FLAGS set.
// It's the caller's responsibility to change `node` such that it only
// sets the condition flags, without producing a boolean value.
//
GenTreeCC* Lowering::LowerNodeCC(GenTree* node, GenCondition condition)
{
// Skip over a chain of EQ/NE(x, 0) relops. This may be present either
// because `node` is not a relop and so it cannot be used directly by a
// JTRUE, or because the frontend failed to remove a EQ/NE(x, 0) that's
// used as logical negation.
//
// Usually there's only one such relop but there's little difference
// between removing one or all so we may as well remove them all.
//
// We can't allow any other nodes between `node` and its user because we
// have no way of knowing if those nodes change flags or not. So we're looking
// to skip over a sequence of appropriately connected zero and EQ/NE nodes.
// The x in EQ/NE(x, 0)
GenTree* relop = node;
// The first node of the relop sequence
GenTree* first = node->gtNext;
// The node following the relop sequence
GenTree* next = first;
while ((next != nullptr) && next->IsIntegralConst(0) && (next->gtNext != nullptr) &&
next->gtNext->OperIs(GT_EQ, GT_NE) && (next->gtNext->AsOp()->gtGetOp1() == relop) &&
(next->gtNext->AsOp()->gtGetOp2() == next))
{
relop = next->gtNext;
next = relop->gtNext;
if (relop->OperIs(GT_EQ))
{
condition = GenCondition::Reverse(condition);
}
}
GenTreeCC* cc = nullptr;
// Next may be null if `node` is not used. In that case we don't need to generate a SETCC node.
if (next != nullptr)
{
if (next->OperIs(GT_JTRUE))
{
// If the instruction immediately following 'relop', i.e. 'next' is a conditional branch,
// it should always have 'relop' as its 'op1'. If it doesn't, then we have improperly
// constructed IL (the setting of a condition code should always immediately precede its
// use, since the JIT doesn't track dataflow for condition codes). Still, if it happens
// it's not our problem, it simply means that `node` is not used and can be removed.
if (next->AsUnOp()->gtGetOp1() == relop)
{
assert(relop->OperIsCompare());
next->ChangeOper(GT_JCC);
cc = next->AsCC();
cc->gtCondition = condition;
}
}
else
{
// If the node is used by something other than a JTRUE then we need to insert a
// SETCC node to materialize the boolean value.
LIR::Use use;
if (BlockRange().TryGetUse(relop, &use))
{
cc = new (comp, GT_SETCC) GenTreeCC(GT_SETCC, condition, TYP_INT);
BlockRange().InsertAfter(node, cc);
use.ReplaceWith(cc);
}
}
}
if (cc != nullptr)
{
node->gtFlags |= GTF_SET_FLAGS;
cc->gtFlags |= GTF_USE_FLAGS;
}
// Remove the chain of EQ/NE(x, 0) relop nodes, if any. Note that if a SETCC was
// inserted after `node`, `first` still points to the node that was initially
// after `node`.
if (relop != node)
{
BlockRange().Remove(first, relop);
}
return cc;
}
// Lower "jmp <method>" tail call to insert PInvoke method epilog if required.
void Lowering::LowerJmpMethod(GenTree* jmp)
{
assert(jmp->OperGet() == GT_JMP);
JITDUMP("lowering GT_JMP\n");
DISPNODE(jmp);
JITDUMP("============");
// If PInvokes are in-lined, we have to remember to execute PInvoke method epilog anywhere that
// a method returns.
if (comp->compMethodRequiresPInvokeFrame())
{
InsertPInvokeMethodEpilog(comp->compCurBB DEBUGARG(jmp));
}
}
// Lower GT_RETURN node to insert PInvoke method epilog if required.
void Lowering::LowerRet(GenTreeUnOp* ret)
{
assert(ret->OperGet() == GT_RETURN);
JITDUMP("lowering GT_RETURN\n");
DISPNODE(ret);
JITDUMP("============");
GenTree* retVal = ret->gtGetOp1();
// There are two kinds of retyping:
// - A simple bitcast can be inserted when:
// - We're returning a floating type as an integral type or vice-versa, or
// - If we're returning a struct as a primitive type, we change the type of
// 'retval' in 'LowerRetStructLclVar()'
bool needBitcast =
(ret->TypeGet() != TYP_VOID) && (varTypeUsesFloatReg(ret) != varTypeUsesFloatReg(ret->gtGetOp1()));
bool doPrimitiveBitcast = false;
if (needBitcast)
{
doPrimitiveBitcast = (!varTypeIsStruct(ret) && !varTypeIsStruct(retVal));
}
if (doPrimitiveBitcast)
{
// Add a simple bitcast when both types are not structs.
// If one type is a struct it will be handled below.
#if defined(DEBUG)
assert(!varTypeIsStruct(ret) && !varTypeIsStruct(retVal));
#endif
GenTree* bitcast = comp->gtNewBitCastNode(ret->TypeGet(), retVal);
ret->gtOp1 = bitcast;
BlockRange().InsertBefore(ret, bitcast);
ContainCheckBitCast(bitcast);
}
else if (ret->TypeGet() != TYP_VOID)
{
#if FEATURE_MULTIREG_RET
if (retVal->OperIs(GT_LCL_VAR) && varTypeIsStruct(retVal))
{
ReturnTypeDesc retTypeDesc;
LclVarDsc* varDsc = nullptr;
varDsc = comp->lvaGetDesc(retVal->AsLclVar());
retTypeDesc.InitializeStructReturnType(comp, varDsc->GetStructHnd(), comp->info.compCallConv);
if (retTypeDesc.GetReturnRegCount() > 1)
{
CheckMultiRegLclVar(retVal->AsLclVar(), &retTypeDesc);
}
}
#endif // FEATURE_MULTIREG_RET
#ifdef DEBUG
if (varTypeIsStruct(ret->TypeGet()) != varTypeIsStruct(retVal->TypeGet()))
{
if (varTypeIsStruct(ret->TypeGet()))
{
assert(comp->info.compRetNativeType != TYP_STRUCT);
var_types retActualType = genActualType(comp->info.compRetNativeType);
var_types retValActualType = genActualType(retVal->TypeGet());
bool constStructInit = retVal->IsConstInitVal();
bool implicitCastFromSameOrBiggerSize = (genTypeSize(retActualType) <= genTypeSize(retValActualType));
// This could happen if we have retyped op1 as a primitive type during struct promotion,
// check `retypedFieldsMap` for details.
bool actualTypesMatch = (retActualType == retValActualType);
assert(actualTypesMatch || constStructInit || implicitCastFromSameOrBiggerSize);
}
}
#endif // DEBUG
if (varTypeIsStruct(ret))
{
LowerRetStruct(ret);
}
else if (!ret->TypeIs(TYP_VOID) && varTypeIsStruct(retVal))
{
// Return struct as a primitive using Unsafe cast.
assert(retVal->OperIs(GT_LCL_VAR));
LowerRetSingleRegStructLclVar(ret);
}
}
// Method doing PInvokes has exactly one return block unless it has tail calls.
if (comp->compMethodRequiresPInvokeFrame() && (comp->compCurBB == comp->genReturnBB))
{
InsertPInvokeMethodEpilog(comp->compCurBB DEBUGARG(ret));
}
ContainCheckRet(ret);
}
//----------------------------------------------------------------------------------------------
// LowerStoreLocCommon: platform idependent part of local var or field store lowering.
//
// Arguments:
// lclStore - The store lcl node to lower.
//
void Lowering::LowerStoreLocCommon(GenTreeLclVarCommon* lclStore)
{
assert(lclStore->OperIs(GT_STORE_LCL_FLD, GT_STORE_LCL_VAR));
JITDUMP("lowering store lcl var/field (before):\n");
DISPTREERANGE(BlockRange(), lclStore);
JITDUMP("\n");
GenTree* src = lclStore->gtGetOp1();
LclVarDsc* varDsc = comp->lvaGetDesc(lclStore);
const bool srcIsMultiReg = src->IsMultiRegNode();
const bool dstIsMultiReg = lclStore->IsMultiRegLclVar();
if (!dstIsMultiReg && varTypeIsStruct(varDsc))
{
// TODO-Cleanup: we want to check `varDsc->lvRegStruct` as the last condition instead of `!varDsc->lvPromoted`,
// but we do not set it for `CSE` vars so it is currently failing.
assert(varDsc->CanBeReplacedWithItsField(comp) || varDsc->lvDoNotEnregister || !varDsc->lvPromoted);
if (varDsc->CanBeReplacedWithItsField(comp))
{
assert(varDsc->lvFieldCnt == 1);
unsigned fldNum = varDsc->lvFieldLclStart;
LclVarDsc* fldDsc = comp->lvaGetDesc(fldNum);
JITDUMP("Replacing an independently promoted local var V%02u with its only field V%02u for the store "
"from a call [%06u]\n",
lclStore->GetLclNum(), fldNum, comp->dspTreeID(lclStore));
lclStore->SetLclNum(fldNum);
lclStore->ChangeType(fldDsc->TypeGet());
varDsc = fldDsc;
}
}
if (srcIsMultiReg || dstIsMultiReg)
{
const ReturnTypeDesc* retTypeDesc = nullptr;
if (src->OperIs(GT_CALL))
{
retTypeDesc = src->AsCall()->GetReturnTypeDesc();
}
CheckMultiRegLclVar(lclStore->AsLclVar(), retTypeDesc);
}
const var_types lclRegType = varDsc->GetRegisterType(lclStore);
if ((lclStore->TypeGet() == TYP_STRUCT) && !srcIsMultiReg)
{
bool convertToStoreObj;
if (src->OperGet() == GT_CALL)
{
GenTreeCall* call = src->AsCall();
const ClassLayout* layout = varDsc->GetLayout();
#ifdef DEBUG
const unsigned slotCount = layout->GetSlotCount();
#if defined(TARGET_XARCH) && !defined(UNIX_AMD64_ABI)
// Windows x64 doesn't have multireg returns,
// x86 uses it only for long return type, not for structs.
assert(slotCount == 1);
assert(lclRegType != TYP_UNDEF);
#else // !TARGET_XARCH || UNIX_AMD64_ABI
if (!varDsc->lvIsHfa())
{
if (slotCount > 1)
{
assert(call->HasMultiRegRetVal());
}
else
{
unsigned size = layout->GetSize();
assert((size <= 8) || (size == 16));
bool isPowerOf2 = (((size - 1) & size) == 0);
bool isTypeDefined = (lclRegType != TYP_UNDEF);
assert(isPowerOf2 == isTypeDefined);
}
}
#endif // !TARGET_XARCH || UNIX_AMD64_ABI
#endif // DEBUG
#if !defined(WINDOWS_AMD64_ABI)
if (!call->HasMultiRegRetVal() && (lclRegType == TYP_UNDEF))
{
// If we have a single return register,
// but we can't retype it as a primitive type, we must spill it.
GenTreeLclVar* spilledCall = SpillStructCallResult(call);
lclStore->gtOp1 = spilledCall;
src = lclStore->gtOp1;
JITDUMP("lowering store lcl var/field has to spill call src.\n");
LowerStoreLocCommon(lclStore);
return;
}
#endif // !WINDOWS_AMD64_ABI
convertToStoreObj = false;
}
else if (!varDsc->IsEnregisterableType())
{
convertToStoreObj = true;
}
else if (src->OperIs(GT_CNS_INT))
{
assert(src->IsIntegralConst(0) && "expected an INIT_VAL for non-zero init.");
#ifdef FEATURE_SIMD
if (varTypeIsSIMD(lclRegType))
{
CorInfoType simdBaseJitType = comp->getBaseJitTypeOfSIMDLocal(lclStore);
if (simdBaseJitType == CORINFO_TYPE_UNDEF)
{
// Lie about the type if we don't know/have it.
simdBaseJitType = CORINFO_TYPE_FLOAT;
}
GenTreeSIMD* simdTree =
comp->gtNewSIMDNode(lclRegType, src, SIMDIntrinsicInit, simdBaseJitType, varDsc->lvExactSize);
BlockRange().InsertAfter(src, simdTree);
LowerSIMD(simdTree);
src = simdTree;
lclStore->gtOp1 = src;
convertToStoreObj = false;
}
else
#endif // FEATURE_SIMD
{
convertToStoreObj = false;
}
}
else if (!src->OperIs(GT_LCL_VAR))
{
convertToStoreObj = true;
}
else
{
assert(src->OperIs(GT_LCL_VAR));
convertToStoreObj = false;
}
if (convertToStoreObj)
{
const unsigned lclNum = lclStore->GetLclNum();
GenTreeLclVar* addr = comp->gtNewLclVarAddrNode(lclNum, TYP_BYREF);
comp->lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::BlockOp));
addr->gtFlags |= GTF_VAR_DEF;
assert(!addr->IsPartialLclFld(comp));
addr->gtFlags |= GTF_DONT_CSE;
// Create the assignment node.
lclStore->ChangeOper(GT_STORE_OBJ);
GenTreeBlk* objStore = lclStore->AsObj();
// Only the GTF_LATE_ARG flag (if present) is preserved.
objStore->gtFlags &= GTF_LATE_ARG;
objStore->gtFlags |= GTF_ASG | GTF_IND_NONFAULTING | GTF_IND_TGT_NOT_HEAP;
#ifndef JIT32_GCENCODER
objStore->gtBlkOpGcUnsafe = false;
#endif
objStore->gtBlkOpKind = GenTreeObj::BlkOpKindInvalid;
objStore->SetLayout(varDsc->GetLayout());
objStore->SetAddr(addr);
objStore->SetData(src);
BlockRange().InsertBefore(objStore, addr);
LowerBlockStoreCommon(objStore);
return;
}
}
// src and dst can be in registers, check if we need a bitcast.
if (!src->TypeIs(TYP_STRUCT) && (varTypeUsesFloatReg(lclRegType) != varTypeUsesFloatReg(src)))
{
assert(!srcIsMultiReg && !dstIsMultiReg);
assert(lclStore->OperIsLocalStore());
assert(lclRegType != TYP_UNDEF);
GenTree* bitcast = comp->gtNewBitCastNode(lclRegType, src);
lclStore->gtOp1 = bitcast;
src = lclStore->gtGetOp1();
BlockRange().InsertBefore(lclStore, bitcast);
ContainCheckBitCast(bitcast);
}
LowerStoreLoc(lclStore);
JITDUMP("lowering store lcl var/field (after):\n");
DISPTREERANGE(BlockRange(), lclStore);
JITDUMP("\n");
}
//----------------------------------------------------------------------------------------------
// LowerRetStructLclVar: Lowers a struct return node.
//
// Arguments:
// node - The return node to lower.
//
void Lowering::LowerRetStruct(GenTreeUnOp* ret)
{
#ifdef TARGET_ARM64
if (GlobalJitOptions::compFeatureHfa)
{
if (varTypeIsSIMD(ret))
{
if (comp->info.compRetNativeType == TYP_STRUCT)
{
assert(varTypeIsSIMD(ret->gtGetOp1()));
assert(comp->compMethodReturnsMultiRegRegTypeAlternate());
ret->ChangeType(comp->info.compRetNativeType);
}
else
{
assert(comp->info.compRetNativeType == ret->TypeGet());
GenTree* retVal = ret->gtGetOp1();
if (retVal->TypeGet() != ret->TypeGet())
{
assert(retVal->OperIs(GT_LCL_VAR));
LowerRetSingleRegStructLclVar(ret);
}
return;
}
}
}
#endif // TARGET_ARM64
if (comp->compMethodReturnsMultiRegRegTypeAlternate())
{
return;
}
assert(ret->OperIs(GT_RETURN));
assert(varTypeIsStruct(ret));
GenTree* retVal = ret->gtGetOp1();
// Note: small types are returned as INT.
var_types nativeReturnType = genActualType(comp->info.compRetNativeType);
ret->ChangeType(nativeReturnType);
switch (retVal->OperGet())
{
case GT_CALL:
assert(retVal->TypeIs(nativeReturnType)); // Type should be changed during call processing.
break;
case GT_CNS_INT:
// When we promote LCL_VAR single fields into return
// we could have all type of constans here.
if (varTypeUsesFloatReg(nativeReturnType))
{
// Do not expect `initblock` for SIMD* types,
// only 'initobj'.
assert(retVal->AsIntCon()->IconValue() == 0);
retVal->BashToConst(0.0, TYP_FLOAT);
}
break;
case GT_OBJ:
retVal->ChangeOper(GT_IND);
FALLTHROUGH;
case GT_IND:
retVal->ChangeType(nativeReturnType);
LowerIndir(retVal->AsIndir());
break;
case GT_LCL_VAR:
LowerRetSingleRegStructLclVar(ret);
break;
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
#ifdef FEATURE_SIMD
case GT_SIMD:
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
#endif // FEATURE_HW_INTRINSICS
{
assert(!retVal->TypeIs(TYP_STRUCT));
if (varTypeUsesFloatReg(ret) != varTypeUsesFloatReg(retVal))
{
GenTree* bitcast = comp->gtNewBitCastNode(ret->TypeGet(), retVal);
ret->gtOp1 = bitcast;
BlockRange().InsertBefore(ret, bitcast);
ContainCheckBitCast(bitcast);
}
}
break;
#endif // FEATURE_SIMD || FEATURE_HW_INTRINSICS
case GT_LCL_FLD:
{
#ifdef DEBUG
LclVarDsc* varDsc = comp->lvaGetDesc(retVal->AsLclFld());
assert(varDsc->lvDoNotEnregister);
#endif
retVal->ChangeType(nativeReturnType);
}
break;
default:
assert(varTypeIsEnregisterable(retVal));
if (varTypeUsesFloatReg(ret) != varTypeUsesFloatReg(retVal))
{
GenTree* bitcast = comp->gtNewBitCastNode(ret->TypeGet(), retVal);
ret->gtOp1 = bitcast;
BlockRange().InsertBefore(ret, bitcast);
ContainCheckBitCast(bitcast);
}
break;
}
}
//----------------------------------------------------------------------------------------------
// LowerRetSingleRegStructLclVar: Lowers a return node with a struct lclVar as a source.
//
// Arguments:
// node - The return node to lower.
//
// Notes:
// - the function is only for LclVars that are returned in one register;
// - if LclVar is allocated in memory then read it as return type;
// - if LclVar can be enregistered read it as register type and add a bitcast if necessary;
//
void Lowering::LowerRetSingleRegStructLclVar(GenTreeUnOp* ret)
{
assert(!comp->compMethodReturnsMultiRegRegTypeAlternate());
assert(ret->OperIs(GT_RETURN));
GenTreeLclVarCommon* lclVar = ret->gtGetOp1()->AsLclVar();
assert(lclVar->OperIs(GT_LCL_VAR));
unsigned lclNum = lclVar->GetLclNum();
LclVarDsc* varDsc = comp->lvaGetDesc(lclNum);
if (varDsc->lvPromoted)
{
// TODO-1stClassStructs: We can no longer independently promote
// or enregister this struct, since it is referenced as a whole.
comp->lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::BlockOpRet));
}
if (varDsc->lvDoNotEnregister)
{
lclVar->ChangeOper(GT_LCL_FLD);
lclVar->AsLclFld()->SetLclOffs(0);
// We are returning as a primitive type and the lcl is of struct type.
assert(comp->info.compRetNativeType != TYP_STRUCT);
assert((genTypeSize(comp->info.compRetNativeType) == genTypeSize(ret)) ||
(varTypeIsIntegral(ret) && varTypeIsIntegral(comp->info.compRetNativeType) &&
(genTypeSize(comp->info.compRetNativeType) <= genTypeSize(ret))));
// If the actual return type requires normalization, then make sure we
// do so by using the correct small type for the GT_LCL_FLD. It would
// be conservative to check just compRetNativeType for this since small
// structs are normalized to primitive types when they are returned in
// registers, so we would normalize for them as well.
if (varTypeIsSmall(comp->info.compRetType))
{
assert(genTypeSize(comp->info.compRetNativeType) == genTypeSize(comp->info.compRetType));
lclVar->ChangeType(comp->info.compRetType);
}
else
{
// Otherwise we don't mind that we leave the upper bits undefined.
lclVar->ChangeType(ret->TypeGet());
}
}
else
{
const var_types lclVarType = varDsc->GetRegisterType(lclVar);
assert(lclVarType != TYP_UNDEF);
const var_types actualType = genActualType(lclVarType);
lclVar->ChangeType(actualType);
if (varTypeUsesFloatReg(ret) != varTypeUsesFloatReg(lclVarType))
{
GenTree* bitcast = comp->gtNewBitCastNode(ret->TypeGet(), ret->gtOp1);
ret->gtOp1 = bitcast;
BlockRange().InsertBefore(ret, bitcast);
ContainCheckBitCast(bitcast);
}
}
}
//----------------------------------------------------------------------------------------------
// LowerCallStruct: Lowers a call node that returns a stuct.
//
// Arguments:
// call - The call node to lower.
//
// Notes:
// - this handles only single-register returns;
// - it transforms the call's user for `GT_STOREIND`.
//
void Lowering::LowerCallStruct(GenTreeCall* call)
{
assert(varTypeIsStruct(call));
if (call->HasMultiRegRetVal())
{
return;
}
if (GlobalJitOptions::compFeatureHfa)
{
if (comp->IsHfa(call))
{
#if defined(TARGET_ARM64)
assert(comp->GetHfaCount(call) == 1);
#elif defined(TARGET_ARM)
// ARM returns double in 2 float registers, but
// `call->HasMultiRegRetVal()` count double registers.
assert(comp->GetHfaCount(call) <= 2);
#else // !TARGET_ARM64 && !TARGET_ARM
NYI("Unknown architecture");
#endif // !TARGET_ARM64 && !TARGET_ARM
var_types hfaType = comp->GetHfaType(call);
if (call->TypeIs(hfaType))
{
return;
}
}
}
CORINFO_CLASS_HANDLE retClsHnd = call->gtRetClsHnd;
Compiler::structPassingKind howToReturnStruct;
var_types returnType = comp->getReturnTypeForStruct(retClsHnd, call->GetUnmanagedCallConv(), &howToReturnStruct);
assert(returnType != TYP_STRUCT && returnType != TYP_UNKNOWN);
var_types origType = call->TypeGet();
call->gtType = genActualType(returnType);
LIR::Use callUse;
if (BlockRange().TryGetUse(call, &callUse))
{
GenTree* user = callUse.User();
switch (user->OperGet())
{
case GT_RETURN:
case GT_STORE_LCL_VAR:
case GT_STORE_BLK:
case GT_STORE_OBJ:
// Leave as is, the user will handle it.
assert(user->TypeIs(origType) || varTypeIsSIMD(user->TypeGet()));
break;
#ifdef FEATURE_SIMD
case GT_STORE_LCL_FLD:
// If the call type was ever updated (in importer) to TYP_SIMD*, it should match the user type.
// If not, the user type should match the struct's returnType.
assert((varTypeIsSIMD(user) && user->TypeIs(origType)) || (returnType == user->TypeGet()));
break;
#endif // FEATURE_SIMD
case GT_STOREIND:
#ifdef FEATURE_SIMD
if (varTypeIsSIMD(user))
{
user->ChangeType(returnType);
break;
}
#endif // FEATURE_SIMD
// importer has a separate mechanism to retype calls to helpers,
// keep it for now.
assert(user->TypeIs(TYP_REF) || (user->TypeIs(TYP_I_IMPL) && comp->IsTargetAbi(CORINFO_CORERT_ABI)));
assert(call->IsHelperCall());
assert(returnType == user->TypeGet());
break;
default:
unreached();
}
}
}
//----------------------------------------------------------------------------------------------
// LowerStoreSingleRegCallStruct: Lowers a store block where the source is a struct typed call.
//
// Arguments:
// store - The store node to lower.
//
// Notes:
// - the function is only for calls that return one register;
// - it spills the call's result if it can be retyped as a primitive type;
//
void Lowering::LowerStoreSingleRegCallStruct(GenTreeBlk* store)
{
assert(store->Data()->IsCall());
GenTreeCall* call = store->Data()->AsCall();
assert(!call->HasMultiRegRetVal());
const ClassLayout* layout = store->GetLayout();
var_types regType = layout->GetRegisterType();
if (regType != TYP_UNDEF)
{
#if defined(TARGET_LOONGARCH64)
if (varTypeIsFloating(call->TypeGet()))
{
regType = call->TypeGet();
}
#endif
store->ChangeType(regType);
store->SetOper(GT_STOREIND);
LowerStoreIndirCommon(store->AsStoreInd());
return;
}
else
{
#if defined(WINDOWS_AMD64_ABI)
// All ABI except Windows x64 supports passing 3 byte structs in registers.
// Other 64 bites ABI-s support passing 5, 6, 7 byte structs.
unreached();
#else // !WINDOWS_AMD64_ABI
if (store->OperIs(GT_STORE_OBJ))
{
store->SetOper(GT_STORE_BLK);
}
store->gtBlkOpKind = GenTreeObj::BlkOpKindUnroll;
GenTreeLclVar* spilledCall = SpillStructCallResult(call);
store->SetData(spilledCall);
LowerBlockStoreCommon(store);
#endif // WINDOWS_AMD64_ABI
}
}
#if !defined(WINDOWS_AMD64_ABI)
//----------------------------------------------------------------------------------------------
// SpillStructCallResult: Spill call result to memory.
//
// Arguments:
// call - call with 3, 5, 6 or 7 return size that has to be spilled to memory.
//
// Return Value:
// load of the spilled variable.
//
GenTreeLclVar* Lowering::SpillStructCallResult(GenTreeCall* call) const
{
// TODO-1stClassStructs: we can support this in codegen for `GT_STORE_BLK` without new temps.
const unsigned spillNum = comp->lvaGrabTemp(true DEBUGARG("Return value temp for an odd struct return size"));
comp->lvaSetVarDoNotEnregister(spillNum DEBUGARG(DoNotEnregisterReason::LocalField));
CORINFO_CLASS_HANDLE retClsHnd = call->gtRetClsHnd;
comp->lvaSetStruct(spillNum, retClsHnd, false);
GenTreeLclFld* spill = new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, call->gtType, spillNum, 0);
spill->gtOp1 = call;
spill->gtFlags |= GTF_VAR_DEF;
BlockRange().InsertAfter(call, spill);
ContainCheckStoreLoc(spill);
GenTreeLclVar* loadCallResult = comp->gtNewLclvNode(spillNum, TYP_STRUCT)->AsLclVar();
BlockRange().InsertAfter(spill, loadCallResult);
return loadCallResult;
}
#endif // !WINDOWS_AMD64_ABI
GenTree* Lowering::LowerDirectCall(GenTreeCall* call)
{
noway_assert(call->gtCallType == CT_USER_FUNC || call->gtCallType == CT_HELPER);
// Non-virtual direct/indirect calls: Work out if the address of the
// call is known at JIT time. If not it is either an indirect call
// or the address must be accessed via an single/double indirection.
void* addr;
InfoAccessType accessType;
CorInfoHelpFunc helperNum = comp->eeGetHelperNum(call->gtCallMethHnd);
#ifdef FEATURE_READYTORUN
if (call->gtEntryPoint.addr != nullptr)
{
accessType = call->gtEntryPoint.accessType;
addr = call->gtEntryPoint.addr;
}
else
#endif
if (call->gtCallType == CT_HELPER)
{
noway_assert(helperNum != CORINFO_HELP_UNDEF);
// the convention on getHelperFtn seems to be (it's not documented)
// that it returns an address or if it returns null, pAddr is set to
// another address, which requires an indirection
void* pAddr;
addr = comp->info.compCompHnd->getHelperFtn(helperNum, (void**)&pAddr);
if (addr != nullptr)
{
assert(pAddr == nullptr);
accessType = IAT_VALUE;
}
else
{
accessType = IAT_PVALUE;
addr = pAddr;
}
}
else
{
noway_assert(helperNum == CORINFO_HELP_UNDEF);
CORINFO_ACCESS_FLAGS aflags = CORINFO_ACCESS_ANY;
if (call->IsSameThis())
{
aflags = (CORINFO_ACCESS_FLAGS)(aflags | CORINFO_ACCESS_THIS);
}
if (!call->NeedsNullCheck())
{
aflags = (CORINFO_ACCESS_FLAGS)(aflags | CORINFO_ACCESS_NONNULL);
}
CORINFO_CONST_LOOKUP addrInfo;
comp->info.compCompHnd->getFunctionEntryPoint(call->gtCallMethHnd, &addrInfo, aflags);
accessType = addrInfo.accessType;
addr = addrInfo.addr;
}
GenTree* result = nullptr;
switch (accessType)
{
case IAT_VALUE:
// Non-virtual direct call to known address.
// For JIT helper based tailcall (only used on x86) the target
// address is passed as an arg to the helper so we want a node for
// it.
if (!IsCallTargetInRange(addr) || call->IsTailCallViaJitHelper())
{
result = AddrGen(addr);
}
else
{
// a direct call within range of hardware relative call instruction
// stash the address for codegen
call->gtDirectCallAddress = addr;
}
break;
case IAT_PVALUE:
{
// If we are using an indirection cell for a direct call then apply
// an optimization that loads the call target directly from the
// indirection cell, instead of duplicating the tree.
bool hasIndirectionCell = call->GetIndirectionCellArgKind() != NonStandardArgKind::None;
if (!hasIndirectionCell)
{
// Non-virtual direct calls to addresses accessed by
// a single indirection.
GenTree* cellAddr = AddrGen(addr);
#ifdef DEBUG
cellAddr->AsIntCon()->gtTargetHandle = (size_t)call->gtCallMethHnd;
#endif
GenTree* indir = Ind(cellAddr);
result = indir;
}
break;
}
case IAT_PPVALUE:
// Non-virtual direct calls to addresses accessed by
// a double indirection.
//
// Expanding an IAT_PPVALUE here, will lose the opportunity
// to Hoist/CSE the first indirection as it is an invariant load
//
assert(!"IAT_PPVALUE case in LowerDirectCall");
noway_assert(helperNum == CORINFO_HELP_UNDEF);
result = AddrGen(addr);
// Double-indirection. Load the address into a register
// and call indirectly through the register
//
result = Ind(Ind(result));
break;
case IAT_RELPVALUE:
{
// Non-virtual direct calls to addresses accessed by
// a single relative indirection.
GenTree* cellAddr = AddrGen(addr);
GenTree* indir = Ind(cellAddr);
result = comp->gtNewOperNode(GT_ADD, TYP_I_IMPL, indir, AddrGen(addr));
break;
}
default:
noway_assert(!"Bad accessType");
break;
}
return result;
}
GenTree* Lowering::LowerDelegateInvoke(GenTreeCall* call)
{
noway_assert(call->gtCallType == CT_USER_FUNC);
assert((comp->info.compCompHnd->getMethodAttribs(call->gtCallMethHnd) &
(CORINFO_FLG_DELEGATE_INVOKE | CORINFO_FLG_FINAL)) == (CORINFO_FLG_DELEGATE_INVOKE | CORINFO_FLG_FINAL));
GenTree* thisArgNode;
if (call->IsTailCallViaJitHelper())
{
const unsigned argNum = 0;
fgArgTabEntry* thisArgTabEntry = comp->gtArgEntryByArgNum(call, argNum);
thisArgNode = thisArgTabEntry->GetNode();
}
else
{
thisArgNode = comp->gtGetThisArg(call);
}
assert(thisArgNode != nullptr);
assert(thisArgNode->gtOper == GT_PUTARG_REG);
GenTree* thisExpr = thisArgNode->AsOp()->gtOp1;
// We're going to use the 'this' expression multiple times, so make a local to copy it.
GenTree* base;
if (thisExpr->OperIs(GT_LCL_VAR))
{
base = comp->gtNewLclvNode(thisExpr->AsLclVar()->GetLclNum(), thisExpr->TypeGet());
}
else if (thisExpr->OperIs(GT_LCL_FLD))
{
base = comp->gtNewLclFldNode(thisExpr->AsLclFld()->GetLclNum(), thisExpr->TypeGet(),
thisExpr->AsLclFld()->GetLclOffs());
}
else
{
unsigned delegateInvokeTmp = comp->lvaGrabTemp(true DEBUGARG("delegate invoke call"));
base = comp->gtNewLclvNode(delegateInvokeTmp, thisExpr->TypeGet());
LIR::Use thisExprUse(BlockRange(), &thisArgNode->AsOp()->gtOp1, thisArgNode);
ReplaceWithLclVar(thisExprUse, delegateInvokeTmp);
thisExpr = thisExprUse.Def(); // it's changed; reload it.
}
// replace original expression feeding into thisPtr with
// [originalThis + offsetOfDelegateInstance]
GenTree* newThisAddr = new (comp, GT_LEA)
GenTreeAddrMode(TYP_BYREF, thisExpr, nullptr, 0, comp->eeGetEEInfo()->offsetOfDelegateInstance);
GenTree* newThis = comp->gtNewOperNode(GT_IND, TYP_REF, newThisAddr);
BlockRange().InsertAfter(thisExpr, newThisAddr, newThis);
thisArgNode->AsOp()->gtOp1 = newThis;
ContainCheckIndir(newThis->AsIndir());
// the control target is
// [originalThis + firstTgtOffs]
unsigned targetOffs = comp->eeGetEEInfo()->offsetOfDelegateFirstTarget;
GenTree* result = new (comp, GT_LEA) GenTreeAddrMode(TYP_REF, base, nullptr, 0, targetOffs);
GenTree* callTarget = Ind(result);
// don't need to sequence and insert this tree, caller will do it
return callTarget;
}
GenTree* Lowering::LowerIndirectNonvirtCall(GenTreeCall* call)
{
#ifdef TARGET_X86
if (call->gtCallCookie != nullptr)
{
NYI_X86("Morphing indirect non-virtual call with non-standard args");
}
#endif
// Indirect cookie calls gets transformed by fgMorphArgs as indirect call with non-standard args.
// Hence we should never see this type of call in lower.
noway_assert(call->gtCallCookie == nullptr);
return nullptr;
}
//------------------------------------------------------------------------
// CreateReturnTrapSeq: Create a tree to perform a "return trap", used in PInvoke
// epilogs to invoke a GC under a condition. The return trap checks some global
// location (the runtime tells us where that is and how many indirections to make),
// then, based on the result, conditionally calls a GC helper. We use a special node
// for this because at this time (late in the compilation phases), introducing flow
// is tedious/difficult.
//
// This is used for PInvoke inlining.
//
// Return Value:
// Code tree to perform the action.
//
GenTree* Lowering::CreateReturnTrapSeq()
{
// The GT_RETURNTRAP node expands to this:
// if (g_TrapReturningThreads)
// {
// RareDisablePreemptiveGC();
// }
// The only thing to do here is build up the expression that evaluates 'g_TrapReturningThreads'.
void* pAddrOfCaptureThreadGlobal = nullptr;
int32_t* addrOfCaptureThreadGlobal =
comp->info.compCompHnd->getAddrOfCaptureThreadGlobal(&pAddrOfCaptureThreadGlobal);
GenTree* testTree;
if (addrOfCaptureThreadGlobal != nullptr)
{
testTree = AddrGen(addrOfCaptureThreadGlobal);
}
else
{
testTree = Ind(AddrGen(pAddrOfCaptureThreadGlobal));
}
return comp->gtNewOperNode(GT_RETURNTRAP, TYP_INT, Ind(testTree, TYP_INT));
}
//------------------------------------------------------------------------
// SetGCState: Create a tree that stores the given constant (0 or 1) into the
// thread's GC state field.
//
// This is used for PInvoke inlining.
//
// Arguments:
// state - constant (0 or 1) to store into the thread's GC state field.
//
// Return Value:
// Code tree to perform the action.
//
GenTree* Lowering::SetGCState(int state)
{
// Thread.offsetOfGcState = 0/1
assert(state == 0 || state == 1);
const CORINFO_EE_INFO* pInfo = comp->eeGetEEInfo();
GenTree* base = new (comp, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, TYP_I_IMPL, comp->info.compLvFrameListRoot);
GenTree* stateNode = new (comp, GT_CNS_INT) GenTreeIntCon(TYP_BYTE, state);
GenTree* addr = new (comp, GT_LEA) GenTreeAddrMode(TYP_I_IMPL, base, nullptr, 1, pInfo->offsetOfGCState);
GenTree* storeGcState = new (comp, GT_STOREIND) GenTreeStoreInd(TYP_BYTE, addr, stateNode);
return storeGcState;
}
//------------------------------------------------------------------------
// CreateFrameLinkUpdate: Create a tree that either links or unlinks the
// locally-allocated InlinedCallFrame from the Frame list.
//
// This is used for PInvoke inlining.
//
// Arguments:
// action - whether to link (push) or unlink (pop) the Frame
//
// Return Value:
// Code tree to perform the action.
//
GenTree* Lowering::CreateFrameLinkUpdate(FrameLinkAction action)
{
const CORINFO_EE_INFO* pInfo = comp->eeGetEEInfo();
const CORINFO_EE_INFO::InlinedCallFrameInfo& callFrameInfo = pInfo->inlinedCallFrameInfo;
GenTree* TCB = new (comp, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, TYP_I_IMPL, comp->info.compLvFrameListRoot);
// Thread->m_pFrame
GenTree* addr = new (comp, GT_LEA) GenTreeAddrMode(TYP_I_IMPL, TCB, nullptr, 1, pInfo->offsetOfThreadFrame);
GenTree* data = nullptr;
if (action == PushFrame)
{
// Thread->m_pFrame = &inlinedCallFrame;
data = new (comp, GT_LCL_FLD_ADDR)
GenTreeLclFld(GT_LCL_FLD_ADDR, TYP_BYREF, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfFrameVptr);
}
else
{
assert(action == PopFrame);
// Thread->m_pFrame = inlinedCallFrame.m_pNext;
data = new (comp, GT_LCL_FLD) GenTreeLclFld(GT_LCL_FLD, TYP_BYREF, comp->lvaInlinedPInvokeFrameVar,
pInfo->inlinedCallFrameInfo.offsetOfFrameLink);
}
GenTree* storeInd = new (comp, GT_STOREIND) GenTreeStoreInd(TYP_I_IMPL, addr, data);
return storeInd;
}
//------------------------------------------------------------------------
// InsertPInvokeMethodProlog: Create the code that runs at the start of
// every method that has PInvoke calls.
//
// Initialize the TCB local and the InlinedCallFrame object. Then link ("push")
// the InlinedCallFrame object on the Frame chain. The layout of InlinedCallFrame
// is defined in vm/frames.h. See also vm/jitinterface.cpp for more information.
// The offsets of these fields is returned by the VM in a call to ICorStaticInfo::getEEInfo().
//
// The (current) layout is as follows:
//
// 64-bit 32-bit CORINFO_EE_INFO
// offset offset field name offset when set
// -----------------------------------------------------------------------------------------
// +00h +00h GS cookie offsetOfGSCookie
// +08h +04h vptr for class InlinedCallFrame offsetOfFrameVptr method prolog
// +10h +08h m_Next offsetOfFrameLink method prolog
// +18h +0Ch m_Datum offsetOfCallTarget call site
// +20h n/a m_StubSecretArg not set by JIT
// +28h +10h m_pCallSiteSP offsetOfCallSiteSP x86: call site, and zeroed in method
// prolog;
// non-x86: method prolog (SP remains
// constant in function, after prolog: no
// localloc and PInvoke in same function)
// +30h +14h m_pCallerReturnAddress offsetOfReturnAddress call site
// +38h +18h m_pCalleeSavedFP offsetOfCalleeSavedFP not set by JIT
// +1Ch m_pThread
// +20h m_pSPAfterProlog offsetOfSPAfterProlog arm only
// +20/24h JIT retval spill area (int) before call_gc ???
// +24/28h JIT retval spill area (long) before call_gc ???
// +28/2Ch Saved value of EBP method prolog ???
//
// Note that in the VM, InlinedCallFrame is a C++ class whose objects have a 'this' pointer that points
// to the InlinedCallFrame vptr (the 2nd field listed above), and the GS cookie is stored *before*
// the object. When we link the InlinedCallFrame onto the Frame chain, we must point at this location,
// and not at the beginning of the InlinedCallFrame local, which is actually the GS cookie.
//
// Return Value:
// none
//
void Lowering::InsertPInvokeMethodProlog()
{
noway_assert(comp->info.compUnmanagedCallCountWithGCTransition);
noway_assert(comp->lvaInlinedPInvokeFrameVar != BAD_VAR_NUM);
if (comp->opts.ShouldUsePInvokeHelpers())
{
return;
}
JITDUMP("======= Inserting PInvoke method prolog\n");
// The first BB must be a scratch BB in order for us to be able to safely insert the P/Invoke prolog.
assert(comp->fgFirstBBisScratch());
LIR::Range& firstBlockRange = LIR::AsRange(comp->fgFirstBB);
const CORINFO_EE_INFO* pInfo = comp->eeGetEEInfo();
const CORINFO_EE_INFO::InlinedCallFrameInfo& callFrameInfo = pInfo->inlinedCallFrameInfo;
// First arg: &compiler->lvaInlinedPInvokeFrameVar + callFrameInfo.offsetOfFrameVptr
#if defined(DEBUG)
const LclVarDsc* inlinedPInvokeDsc = comp->lvaGetDesc(comp->lvaInlinedPInvokeFrameVar);
assert(inlinedPInvokeDsc->IsAddressExposed());
#endif // DEBUG
GenTree* frameAddr = new (comp, GT_LCL_FLD_ADDR)
GenTreeLclFld(GT_LCL_FLD_ADDR, TYP_BYREF, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfFrameVptr);
// Call runtime helper to fill in our InlinedCallFrame and push it on the Frame list:
// TCB = CORINFO_HELP_INIT_PINVOKE_FRAME(&symFrameStart, secretArg);
// for x86, don't pass the secretArg.
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_X86) || defined(TARGET_ARM)
GenTreeCall::Use* argList = comp->gtNewCallArgs(frameAddr);
#else
GenTreeCall::Use* argList = comp->gtNewCallArgs(frameAddr, PhysReg(REG_SECRET_STUB_PARAM));
#endif
GenTree* call = comp->gtNewHelperCallNode(CORINFO_HELP_INIT_PINVOKE_FRAME, TYP_I_IMPL, argList);
// some sanity checks on the frame list root vardsc
const unsigned lclNum = comp->info.compLvFrameListRoot;
const LclVarDsc* varDsc = comp->lvaGetDesc(lclNum);
noway_assert(!varDsc->lvIsParam);
noway_assert(varDsc->lvType == TYP_I_IMPL);
GenTree* store = new (comp, GT_STORE_LCL_VAR) GenTreeLclVar(GT_STORE_LCL_VAR, TYP_I_IMPL, lclNum);
store->AsOp()->gtOp1 = call;
store->gtFlags |= GTF_VAR_DEF;
GenTree* const insertionPoint = firstBlockRange.FirstNonCatchArgNode();
comp->fgMorphTree(store);
firstBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, store));
DISPTREERANGE(firstBlockRange, store);
#if !defined(TARGET_X86) && !defined(TARGET_ARM)
// For x86, this step is done at the call site (due to stack pointer not being static in the function).
// For arm32, CallSiteSP is set up by the call to CORINFO_HELP_INIT_PINVOKE_FRAME.
// --------------------------------------------------------
// InlinedCallFrame.m_pCallSiteSP = @RSP;
GenTreeLclFld* storeSP = new (comp, GT_STORE_LCL_FLD)
GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfCallSiteSP);
storeSP->gtOp1 = PhysReg(REG_SPBASE);
storeSP->gtFlags |= GTF_VAR_DEF;
assert(inlinedPInvokeDsc->lvDoNotEnregister);
firstBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, storeSP));
DISPTREERANGE(firstBlockRange, storeSP);
#endif // !defined(TARGET_X86) && !defined(TARGET_ARM)
#if !defined(TARGET_ARM)
// For arm32, CalleeSavedFP is set up by the call to CORINFO_HELP_INIT_PINVOKE_FRAME.
// --------------------------------------------------------
// InlinedCallFrame.m_pCalleeSavedEBP = @RBP;
GenTreeLclFld* storeFP =
new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar,
callFrameInfo.offsetOfCalleeSavedFP);
assert(inlinedPInvokeDsc->lvDoNotEnregister);
storeFP->gtOp1 = PhysReg(REG_FPBASE);
storeFP->gtFlags |= GTF_VAR_DEF;
firstBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, storeFP));
DISPTREERANGE(firstBlockRange, storeFP);
#endif // !defined(TARGET_ARM)
// --------------------------------------------------------
// On 32-bit targets, CORINFO_HELP_INIT_PINVOKE_FRAME initializes the PInvoke frame and then pushes it onto
// the current thread's Frame stack. On 64-bit targets, it only initializes the PInvoke frame.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_64BIT
if (comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB))
{
// Push a frame - if we are NOT in an IL stub, this is done right before the call
// The init routine sets InlinedCallFrame's m_pNext, so we just set the thead's top-of-stack
GenTree* frameUpd = CreateFrameLinkUpdate(PushFrame);
firstBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, frameUpd));
ContainCheckStoreIndir(frameUpd->AsStoreInd());
DISPTREERANGE(firstBlockRange, frameUpd);
}
#endif // TARGET_64BIT
}
//------------------------------------------------------------------------
// InsertPInvokeMethodEpilog: Code that needs to be run when exiting any method
// that has PInvoke inlines. This needs to be inserted any place you can exit the
// function: returns, tailcalls and jmps.
//
// Arguments:
// returnBB - basic block from which a method can return
// lastExpr - GenTree of the last top level stmnt of returnBB (debug only arg)
//
// Return Value:
// Code tree to perform the action.
//
void Lowering::InsertPInvokeMethodEpilog(BasicBlock* returnBB DEBUGARG(GenTree* lastExpr))
{
assert(returnBB != nullptr);
assert(comp->info.compUnmanagedCallCountWithGCTransition);
if (comp->opts.ShouldUsePInvokeHelpers())
{
return;
}
JITDUMP("======= Inserting PInvoke method epilog\n");
// Method doing PInvoke calls has exactly one return block unless it has "jmp" or tail calls.
assert(((returnBB == comp->genReturnBB) && (returnBB->bbJumpKind == BBJ_RETURN)) ||
returnBB->endsWithTailCallOrJmp(comp));
LIR::Range& returnBlockRange = LIR::AsRange(returnBB);
GenTree* insertionPoint = returnBlockRange.LastNode();
assert(insertionPoint == lastExpr);
// Note: PInvoke Method Epilog (PME) needs to be inserted just before GT_RETURN, GT_JMP or GT_CALL node in execution
// order so that it is guaranteed that there will be no further PInvokes after that point in the method.
//
// Example1: GT_RETURN(op1) - say execution order is: Op1, GT_RETURN. After inserting PME, execution order would be
// Op1, PME, GT_RETURN
//
// Example2: GT_CALL(arg side effect computing nodes, Stk Args Setup, Reg Args setup). The execution order would be
// arg side effect computing nodes, Stk Args setup, Reg Args setup, GT_CALL
// After inserting PME execution order would be:
// arg side effect computing nodes, Stk Args setup, Reg Args setup, PME, GT_CALL
//
// Example3: GT_JMP. After inserting PME execution order would be: PME, GT_JMP
// That is after PME, args for GT_JMP call will be setup.
// Pop the frame if necessary. This always happens in the epilog on 32-bit targets. For 64-bit targets, we only do
// this in the epilog for IL stubs; for non-IL stubs the frame is popped after every PInvoke call.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_64BIT
if (comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB))
#endif // TARGET_64BIT
{
GenTree* frameUpd = CreateFrameLinkUpdate(PopFrame);
returnBlockRange.InsertBefore(insertionPoint, LIR::SeqTree(comp, frameUpd));
ContainCheckStoreIndir(frameUpd->AsStoreInd());
}
}
//------------------------------------------------------------------------
// InsertPInvokeCallProlog: Emit the call-site prolog for direct calls to unmanaged code.
// It does all the necessary call-site setup of the InlinedCallFrame.
//
// Arguments:
// call - the call for which we are inserting the PInvoke prolog.
//
// Return Value:
// None.
//
void Lowering::InsertPInvokeCallProlog(GenTreeCall* call)
{
JITDUMP("======= Inserting PInvoke call prolog\n");
GenTree* insertBefore = call;
if (call->gtCallType == CT_INDIRECT)
{
bool isClosed;
insertBefore = BlockRange().GetTreeRange(call->gtCallAddr, &isClosed).FirstNode();
assert(isClosed);
}
const CORINFO_EE_INFO::InlinedCallFrameInfo& callFrameInfo = comp->eeGetEEInfo()->inlinedCallFrameInfo;
gtCallTypes callType = (gtCallTypes)call->gtCallType;
noway_assert(comp->lvaInlinedPInvokeFrameVar != BAD_VAR_NUM);
if (comp->opts.ShouldUsePInvokeHelpers())
{
// First argument is the address of the frame variable.
GenTree* frameAddr =
new (comp, GT_LCL_VAR_ADDR) GenTreeLclVar(GT_LCL_VAR_ADDR, TYP_BYREF, comp->lvaInlinedPInvokeFrameVar);
#if defined(TARGET_X86) && !defined(UNIX_X86_ABI)
// On x86 targets, PInvoke calls need the size of the stack args in InlinedCallFrame.m_Datum.
// This is because the callee pops stack arguments, and we need to keep track of this during stack
// walking
const unsigned numStkArgBytes = call->fgArgInfo->GetNextSlotByteOffset();
GenTree* stackBytes = comp->gtNewIconNode(numStkArgBytes, TYP_INT);
GenTreeCall::Use* args = comp->gtNewCallArgs(frameAddr, stackBytes);
#else
GenTreeCall::Use* args = comp->gtNewCallArgs(frameAddr);
#endif
// Insert call to CORINFO_HELP_JIT_PINVOKE_BEGIN
GenTree* helperCall = comp->gtNewHelperCallNode(CORINFO_HELP_JIT_PINVOKE_BEGIN, TYP_VOID, args);
comp->fgMorphTree(helperCall);
BlockRange().InsertBefore(insertBefore, LIR::SeqTree(comp, helperCall));
LowerNode(helperCall); // helper call is inserted before current node and should be lowered here.
return;
}
// Emit the following sequence:
//
// InlinedCallFrame.callTarget = methodHandle // stored in m_Datum
// InlinedCallFrame.m_pCallSiteSP = SP // x86 only
// InlinedCallFrame.m_pCallerReturnAddress = return address
// GT_START_PREEEMPTC
// Thread.gcState = 0
// (non-stub) - update top Frame on TCB // 64-bit targets only
// ----------------------------------------------------------------------------------
// Setup InlinedCallFrame.callSiteTarget (which is how the JIT refers to it).
// The actual field is InlinedCallFrame.m_Datum which has many different uses and meanings.
GenTree* src = nullptr;
if (callType == CT_INDIRECT)
{
#if !defined(TARGET_64BIT)
// On 32-bit targets, indirect calls need the size of the stack args in InlinedCallFrame.m_Datum.
const unsigned stackByteOffset = call->fgArgInfo->GetNextSlotByteOffset();
src = comp->gtNewIconNode(stackByteOffset, TYP_INT);
#else
// On 64-bit targets, indirect calls may need the stub parameter value in InlinedCallFrame.m_Datum.
// If the stub parameter value is not needed, m_Datum will be initialized by the VM.
if (comp->info.compPublishStubParam)
{
src = comp->gtNewLclvNode(comp->lvaStubArgumentVar, TYP_I_IMPL);
}
#endif // !defined(TARGET_64BIT)
}
else
{
assert(callType == CT_USER_FUNC);
void* pEmbedMethodHandle = nullptr;
CORINFO_METHOD_HANDLE embedMethodHandle =
comp->info.compCompHnd->embedMethodHandle(call->gtCallMethHnd, &pEmbedMethodHandle);
noway_assert((!embedMethodHandle) != (!pEmbedMethodHandle));
if (embedMethodHandle != nullptr)
{
// InlinedCallFrame.callSiteTarget = methodHandle
src = AddrGen(embedMethodHandle);
}
else
{
// InlinedCallFrame.callSiteTarget = *pEmbedMethodHandle
src = Ind(AddrGen(pEmbedMethodHandle));
}
}
if (src != nullptr)
{
// Store into InlinedCallFrame.m_Datum, the offset of which is given by offsetOfCallTarget.
GenTreeLclFld* store =
new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar,
callFrameInfo.offsetOfCallTarget);
store->gtOp1 = src;
store->gtFlags |= GTF_VAR_DEF;
InsertTreeBeforeAndContainCheck(insertBefore, store);
}
#ifdef TARGET_X86
// ----------------------------------------------------------------------------------
// InlinedCallFrame.m_pCallSiteSP = SP
GenTreeLclFld* storeCallSiteSP = new (comp, GT_STORE_LCL_FLD)
GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar, callFrameInfo.offsetOfCallSiteSP);
storeCallSiteSP->gtOp1 = PhysReg(REG_SPBASE);
storeCallSiteSP->gtFlags |= GTF_VAR_DEF;
InsertTreeBeforeAndContainCheck(insertBefore, storeCallSiteSP);
#endif
// ----------------------------------------------------------------------------------
// InlinedCallFrame.m_pCallerReturnAddress = &label (the address of the instruction immediately following the call)
GenTreeLclFld* storeLab =
new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar,
callFrameInfo.offsetOfReturnAddress);
storeLab->gtOp1 = new (comp, GT_LABEL) GenTree(GT_LABEL, TYP_I_IMPL);
storeLab->gtFlags |= GTF_VAR_DEF;
InsertTreeBeforeAndContainCheck(insertBefore, storeLab);
// Push the PInvoke frame if necessary. On 32-bit targets this only happens in the method prolog if a method
// contains PInvokes; on 64-bit targets this is necessary in non-stubs.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_64BIT
if (!comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB))
{
// Set the TCB's frame to be the one we just created.
// Note the init routine for the InlinedCallFrame (CORINFO_HELP_INIT_PINVOKE_FRAME)
// has prepended it to the linked list to maintain the stack of Frames.
//
// Stubs do this once per stub, not once per call.
GenTree* frameUpd = CreateFrameLinkUpdate(PushFrame);
BlockRange().InsertBefore(insertBefore, LIR::SeqTree(comp, frameUpd));
ContainCheckStoreIndir(frameUpd->AsStoreInd());
}
#endif // TARGET_64BIT
// IMPORTANT **** This instruction must be the last real instruction ****
// It changes the thread's state to Preemptive mode
// ----------------------------------------------------------------------------------
// [tcb + offsetOfGcState] = 0
GenTree* storeGCState = SetGCState(0);
BlockRange().InsertBefore(insertBefore, LIR::SeqTree(comp, storeGCState));
ContainCheckStoreIndir(storeGCState->AsStoreInd());
// Indicate that codegen has switched this thread to preemptive GC.
// This tree node doesn't generate any code, but impacts LSRA and gc reporting.
// This tree node is simple so doesn't require sequencing.
GenTree* preemptiveGCNode = new (comp, GT_START_PREEMPTGC) GenTree(GT_START_PREEMPTGC, TYP_VOID);
BlockRange().InsertBefore(insertBefore, preemptiveGCNode);
}
//------------------------------------------------------------------------
// InsertPInvokeCallEpilog: Insert the code that goes after every inlined pinvoke call.
//
// Arguments:
// call - the call for which we are inserting the PInvoke epilog.
//
// Return Value:
// None.
//
void Lowering::InsertPInvokeCallEpilog(GenTreeCall* call)
{
JITDUMP("======= Inserting PInvoke call epilog\n");
if (comp->opts.ShouldUsePInvokeHelpers())
{
noway_assert(comp->lvaInlinedPInvokeFrameVar != BAD_VAR_NUM);
// First argument is the address of the frame variable.
GenTree* frameAddr = comp->gtNewLclVarAddrNode(comp->lvaInlinedPInvokeFrameVar, TYP_BYREF);
#if defined(DEBUG)
const LclVarDsc* inlinedPInvokeDsc = comp->lvaGetDesc(comp->lvaInlinedPInvokeFrameVar);
assert(inlinedPInvokeDsc->IsAddressExposed());
#endif // DEBUG
// Insert call to CORINFO_HELP_JIT_PINVOKE_END
GenTreeCall* helperCall =
comp->gtNewHelperCallNode(CORINFO_HELP_JIT_PINVOKE_END, TYP_VOID, comp->gtNewCallArgs(frameAddr));
comp->fgMorphTree(helperCall);
BlockRange().InsertAfter(call, LIR::SeqTree(comp, helperCall));
ContainCheckCallOperands(helperCall);
return;
}
// gcstate = 1
GenTree* insertionPoint = call->gtNext;
GenTree* tree = SetGCState(1);
BlockRange().InsertBefore(insertionPoint, LIR::SeqTree(comp, tree));
ContainCheckStoreIndir(tree->AsStoreInd());
tree = CreateReturnTrapSeq();
BlockRange().InsertBefore(insertionPoint, LIR::SeqTree(comp, tree));
ContainCheckReturnTrap(tree->AsOp());
// Pop the frame if necessary. On 32-bit targets this only happens in the method epilog; on 64-bit targets thi
// happens after every PInvoke call in non-stubs. 32-bit targets instead mark the frame as inactive.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_64BIT
if (!comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB))
{
tree = CreateFrameLinkUpdate(PopFrame);
BlockRange().InsertBefore(insertionPoint, LIR::SeqTree(comp, tree));
ContainCheckStoreIndir(tree->AsStoreInd());
}
#else
const CORINFO_EE_INFO::InlinedCallFrameInfo& callFrameInfo = comp->eeGetEEInfo()->inlinedCallFrameInfo;
// ----------------------------------------------------------------------------------
// InlinedCallFrame.m_pCallerReturnAddress = nullptr
GenTreeLclFld* const storeCallSiteTracker =
new (comp, GT_STORE_LCL_FLD) GenTreeLclFld(GT_STORE_LCL_FLD, TYP_I_IMPL, comp->lvaInlinedPInvokeFrameVar,
callFrameInfo.offsetOfReturnAddress);
GenTreeIntCon* const constantZero = new (comp, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, 0);
storeCallSiteTracker->gtOp1 = constantZero;
storeCallSiteTracker->gtFlags |= GTF_VAR_DEF;
BlockRange().InsertBefore(insertionPoint, constantZero, storeCallSiteTracker);
ContainCheckStoreLoc(storeCallSiteTracker);
#endif // TARGET_64BIT
}
//------------------------------------------------------------------------
// LowerNonvirtPinvokeCall: Lower a non-virtual / indirect PInvoke call
//
// Arguments:
// call - The call to lower.
//
// Return Value:
// The lowered call tree.
//
GenTree* Lowering::LowerNonvirtPinvokeCall(GenTreeCall* call)
{
// PInvoke lowering varies depending on the flags passed in by the EE. By default,
// GC transitions are generated inline; if CORJIT_FLAG_USE_PINVOKE_HELPERS is specified,
// GC transitions are instead performed using helper calls. Examples of each case are given
// below. Note that the data structure that is used to store information about a call frame
// containing any P/Invoke calls is initialized in the method prolog (see
// InsertPInvokeMethod{Prolog,Epilog} for details).
//
// Inline transitions:
// InlinedCallFrame inlinedCallFrame;
//
// ...
//
// // Set up frame information
// inlinedCallFrame.callTarget = methodHandle; // stored in m_Datum
// inlinedCallFrame.m_pCallSiteSP = SP; // x86 only
// inlinedCallFrame.m_pCallerReturnAddress = &label; (the address of the instruction immediately following the
// call)
// Thread.m_pFrame = &inlinedCallFrame; (non-IL-stub only)
//
// // Switch the thread's GC mode to preemptive mode
// thread->m_fPreemptiveGCDisabled = 0;
//
// // Call the unmanaged method
// target();
//
// // Switch the thread's GC mode back to cooperative mode
// thread->m_fPreemptiveGCDisabled = 1;
//
// // Rendezvous with a running collection if necessary
// if (g_TrapReturningThreads)
// RareDisablePreemptiveGC();
//
// Transistions using helpers:
//
// OpaqueFrame opaqueFrame;
//
// ...
//
// // Call the JIT_PINVOKE_BEGIN helper
// JIT_PINVOKE_BEGIN(&opaqueFrame);
//
// // Call the unmanaged method
// target();
//
// // Call the JIT_PINVOKE_END helper
// JIT_PINVOKE_END(&opaqueFrame);
//
// Note that the JIT_PINVOKE_{BEGIN.END} helpers currently use the default calling convention for the target
// platform. They may be changed in the future such that they preserve all register values.
GenTree* result = nullptr;
// All code generated by this function must not contain the randomly-inserted NOPs
// that we insert to inhibit JIT spraying in partial trust scenarios.
// The PINVOKE_PROLOG op signals this to the code generator/emitter.
GenTree* prolog = new (comp, GT_NOP) GenTree(GT_PINVOKE_PROLOG, TYP_VOID);
BlockRange().InsertBefore(call, prolog);
bool addPInvokePrologEpilog = !call->IsSuppressGCTransition();
if (addPInvokePrologEpilog)
{
InsertPInvokeCallProlog(call);
}
if (call->gtCallType != CT_INDIRECT)
{
noway_assert(call->gtCallType == CT_USER_FUNC);
CORINFO_METHOD_HANDLE methHnd = call->gtCallMethHnd;
CORINFO_CONST_LOOKUP lookup;
comp->info.compCompHnd->getAddressOfPInvokeTarget(methHnd, &lookup);
void* addr = lookup.addr;
GenTree* addrTree;
switch (lookup.accessType)
{
case IAT_VALUE:
// IsCallTargetInRange always return true on x64. It wants to use rip-based addressing
// for this call. Unfortunately, in case of pinvokes (+suppressgctransition) to external libs
// (e.g. kernel32.dll) the relative offset is unlikely to fit into int32 and we will have to
// turn fAllowRel32 off globally.
if ((call->IsSuppressGCTransition() && !comp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_PREJIT)) ||
!IsCallTargetInRange(addr))
{
result = AddrGen(addr);
}
else
{
// a direct call within range of hardware relative call instruction
// stash the address for codegen
call->gtDirectCallAddress = addr;
#ifdef FEATURE_READYTORUN
call->gtEntryPoint.addr = nullptr;
call->gtEntryPoint.accessType = IAT_VALUE;
#endif
}
break;
case IAT_PVALUE:
addrTree = AddrGen(addr);
#ifdef DEBUG
addrTree->AsIntCon()->gtTargetHandle = (size_t)methHnd;
#endif
result = Ind(addrTree);
break;
case IAT_PPVALUE:
// ToDo: Expanding an IAT_PPVALUE here, loses the opportunity
// to Hoist/CSE the first indirection as it is an invariant load
//
// This case currently occurs today when we make PInvoke calls in crossgen
//
// assert(!"IAT_PPVALUE in Lowering::LowerNonvirtPinvokeCall");
addrTree = AddrGen(addr);
#ifdef DEBUG
addrTree->AsIntCon()->gtTargetHandle = (size_t)methHnd;
#endif
// Double-indirection. Load the address into a register
// and call indirectly through the register
//
result = Ind(Ind(addrTree));
break;
case IAT_RELPVALUE:
unreached();
}
}
if (addPInvokePrologEpilog)
{
InsertPInvokeCallEpilog(call);
}
return result;
}
// Expand the code necessary to calculate the control target.
// Returns: the expression needed to calculate the control target
// May insert embedded statements
GenTree* Lowering::LowerVirtualVtableCall(GenTreeCall* call)
{
noway_assert(call->gtCallType == CT_USER_FUNC);
regNumber thisPtrArgReg = comp->codeGen->genGetThisArgReg(call);
// get a reference to the thisPtr being passed
fgArgTabEntry* argEntry = comp->gtArgEntryByArgNum(call, 0);
assert(argEntry->GetRegNum() == thisPtrArgReg);
assert(argEntry->GetNode()->OperIs(GT_PUTARG_REG));
GenTree* thisPtr = argEntry->GetNode()->AsUnOp()->gtGetOp1();
// If what we are passing as the thisptr is not already a local, make a new local to place it in
// because we will be creating expressions based on it.
unsigned lclNum;
if (thisPtr->OperIsLocal())
{
lclNum = thisPtr->AsLclVarCommon()->GetLclNum();
}
else
{
// Split off the thisPtr and store to a temporary variable.
if (vtableCallTemp == BAD_VAR_NUM)
{
vtableCallTemp = comp->lvaGrabTemp(true DEBUGARG("virtual vtable call"));
}
LIR::Use thisPtrUse(BlockRange(), &(argEntry->GetNode()->AsUnOp()->gtOp1), argEntry->GetNode());
ReplaceWithLclVar(thisPtrUse, vtableCallTemp);
lclNum = vtableCallTemp;
}
// Get hold of the vtable offset (note: this might be expensive)
unsigned vtabOffsOfIndirection;
unsigned vtabOffsAfterIndirection;
bool isRelative;
comp->info.compCompHnd->getMethodVTableOffset(call->gtCallMethHnd, &vtabOffsOfIndirection,
&vtabOffsAfterIndirection, &isRelative);
// If the thisPtr is a local field, then construct a local field type node
GenTree* local;
if (thisPtr->isLclField())
{
local = new (comp, GT_LCL_FLD)
GenTreeLclFld(GT_LCL_FLD, thisPtr->TypeGet(), lclNum, thisPtr->AsLclFld()->GetLclOffs());
}
else
{
local = new (comp, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, thisPtr->TypeGet(), lclNum);
}
// pointer to virtual table = [REG_CALL_THIS + offs]
GenTree* result = Ind(Offset(local, VPTR_OFFS));
// Get the appropriate vtable chunk
if (vtabOffsOfIndirection != CORINFO_VIRTUALCALL_NO_CHUNK)
{
if (isRelative)
{
// MethodTable offset is a relative pointer.
//
// Additional temporary variable is used to store virtual table pointer.
// Address of method is obtained by the next computations:
//
// Save relative offset to tmp (vtab is virtual table pointer, vtabOffsOfIndirection is offset of
// vtable-1st-level-indirection):
// tmp = vtab
//
// Save address of method to result (vtabOffsAfterIndirection is offset of vtable-2nd-level-indirection):
// result = [tmp + vtabOffsOfIndirection + vtabOffsAfterIndirection + [tmp + vtabOffsOfIndirection]]
//
//
// If relative pointers are also in second level indirection, additional temporary is used:
// tmp1 = vtab
// tmp2 = tmp1 + vtabOffsOfIndirection + vtabOffsAfterIndirection + [tmp1 + vtabOffsOfIndirection]
// result = tmp2 + [tmp2]
//
unsigned lclNumTmp = comp->lvaGrabTemp(true DEBUGARG("lclNumTmp"));
unsigned lclNumTmp2 = comp->lvaGrabTemp(true DEBUGARG("lclNumTmp2"));
GenTree* lclvNodeStore = comp->gtNewTempAssign(lclNumTmp, result);
GenTree* tmpTree = comp->gtNewLclvNode(lclNumTmp, result->TypeGet());
tmpTree = Offset(tmpTree, vtabOffsOfIndirection);
tmpTree = comp->gtNewOperNode(GT_IND, TYP_I_IMPL, tmpTree, false);
GenTree* offs = comp->gtNewIconNode(vtabOffsOfIndirection + vtabOffsAfterIndirection, TYP_INT);
result = comp->gtNewOperNode(GT_ADD, TYP_I_IMPL, comp->gtNewLclvNode(lclNumTmp, result->TypeGet()), offs);
GenTree* base = OffsetByIndexWithScale(result, tmpTree, 1);
GenTree* lclvNodeStore2 = comp->gtNewTempAssign(lclNumTmp2, base);
LIR::Range range = LIR::SeqTree(comp, lclvNodeStore);
JITDUMP("result of obtaining pointer to virtual table:\n");
DISPRANGE(range);
BlockRange().InsertBefore(call, std::move(range));
LIR::Range range2 = LIR::SeqTree(comp, lclvNodeStore2);
ContainCheckIndir(tmpTree->AsIndir());
JITDUMP("result of obtaining pointer to virtual table 2nd level indirection:\n");
DISPRANGE(range2);
BlockRange().InsertAfter(lclvNodeStore, std::move(range2));
result = Ind(comp->gtNewLclvNode(lclNumTmp2, result->TypeGet()));
result =
comp->gtNewOperNode(GT_ADD, TYP_I_IMPL, result, comp->gtNewLclvNode(lclNumTmp2, result->TypeGet()));
}
else
{
// result = [REG_CALL_IND_SCRATCH + vtabOffsOfIndirection]
result = Ind(Offset(result, vtabOffsOfIndirection));
}
}
else
{
assert(!isRelative);
}
// Load the function address
// result = [reg+vtabOffs]
if (!isRelative)
{
result = Ind(Offset(result, vtabOffsAfterIndirection));
}
return result;
}
// Lower stub dispatched virtual calls.
GenTree* Lowering::LowerVirtualStubCall(GenTreeCall* call)
{
assert(call->IsVirtualStub());
// An x86 JIT which uses full stub dispatch must generate only
// the following stub dispatch calls:
//
// (1) isCallRelativeIndirect:
// call dword ptr [rel32] ; FF 15 ---rel32----
// (2) isCallRelative:
// call abc ; E8 ---rel32----
// (3) isCallRegisterIndirect:
// 3-byte nop ;
// call dword ptr [eax] ; FF 10
//
// THIS IS VERY TIGHTLY TIED TO THE PREDICATES IN
// vm\i386\cGenCpu.h, esp. isCallRegisterIndirect.
GenTree* result = nullptr;
// This is code to set up an indirect call to a stub address computed
// via dictionary lookup.
if (call->gtCallType == CT_INDIRECT)
{
// The importer decided we needed a stub call via a computed
// stub dispatch address, i.e. an address which came from a dictionary lookup.
// - The dictionary lookup produces an indirected address, suitable for call
// via "call [VirtualStubParam.reg]"
//
// This combination will only be generated for shared generic code and when
// stub dispatch is active.
// fgMorphArgs will have created trees to pass the address in VirtualStubParam.reg.
// All we have to do here is add an indirection to generate the actual call target.
GenTree* ind = Ind(call->gtCallAddr);
BlockRange().InsertAfter(call->gtCallAddr, ind);
call->gtCallAddr = ind;
ind->gtFlags |= GTF_IND_REQ_ADDR_IN_REG;
ContainCheckIndir(ind->AsIndir());
}
else
{
// Direct stub call.
// Get stub addr. This will return NULL if virtual call stubs are not active
void* stubAddr = call->gtStubCallStubAddr;
noway_assert(stubAddr != nullptr);
// If not CT_INDIRECT, then it should always be relative indir call.
// This is ensured by VM.
noway_assert(call->IsVirtualStubRelativeIndir());
// Direct stub calls, though the stubAddr itself may still need to be
// accessed via an indirection.
GenTree* addr = AddrGen(stubAddr);
// On x86, for tailcall via helper, the JIT_TailCall helper takes the stubAddr as
// the target address, and we set a flag that it's a VSD call. The helper then
// handles any necessary indirection.
if (call->IsTailCallViaJitHelper())
{
result = addr;
}
else
{
bool shouldOptimizeVirtualStubCall = false;
#if defined(TARGET_ARMARCH) || defined(TARGET_AMD64)
// Skip inserting the indirection node to load the address that is already
// computed in the VSD stub arg register as a hidden parameter. Instead during the
// codegen, just load the call target from there.
shouldOptimizeVirtualStubCall = !comp->opts.IsCFGEnabled();
#endif
if (!shouldOptimizeVirtualStubCall)
{
result = Ind(addr);
}
}
}
// TODO-Cleanup: start emitting random NOPS
return result;
}
//------------------------------------------------------------------------
// Lowering::AreSourcesPossibleModifiedLocals:
// Given two nodes which will be used in an addressing mode (base,
// index), check to see if they are lclVar reads, and if so, walk
// backwards from the use until both reads have been visited to
// determine if they are potentially modified in that range.
//
// Arguments:
// addr - the node that uses the base and index nodes
// base - the base node
// index - the index node
//
// Returns: true if either the base or index may be modified between the
// node and addr.
//
bool Lowering::AreSourcesPossiblyModifiedLocals(GenTree* addr, GenTree* base, GenTree* index)
{
assert(addr != nullptr);
SideEffectSet baseSideEffects;
if (base != nullptr)
{
if (base->OperIsLocalRead())
{
baseSideEffects.AddNode(comp, base);
}
else
{
base = nullptr;
}
}
SideEffectSet indexSideEffects;
if (index != nullptr)
{
if (index->OperIsLocalRead())
{
indexSideEffects.AddNode(comp, index);
}
else
{
index = nullptr;
}
}
for (GenTree* cursor = addr;; cursor = cursor->gtPrev)
{
assert(cursor != nullptr);
if (cursor == base)
{
base = nullptr;
}
if (cursor == index)
{
index = nullptr;
}
if ((base == nullptr) && (index == nullptr))
{
return false;
}
m_scratchSideEffects.Clear();
m_scratchSideEffects.AddNode(comp, cursor);
if ((base != nullptr) && m_scratchSideEffects.InterferesWith(baseSideEffects, false))
{
return true;
}
if ((index != nullptr) && m_scratchSideEffects.InterferesWith(indexSideEffects, false))
{
return true;
}
}
}
//------------------------------------------------------------------------
// TryCreateAddrMode: recognize trees which can be implemented using an
// addressing mode and transform them to a GT_LEA
//
// Arguments:
// addr - the use of the address we want to transform
// isContainable - true if this addressing mode can be contained
// parent - the node that consumes the given addr (most likely it's an IND)
//
// Returns:
// true if the address node was changed to a LEA, false otherwise.
//
bool Lowering::TryCreateAddrMode(GenTree* addr, bool isContainable, GenTree* parent)
{
if (!addr->OperIs(GT_ADD) || addr->gtOverflow())
{
#ifdef TARGET_ARM64
if (!addr->OperIs(GT_ADDEX))
{
return false;
}
#else
return false;
#endif
}
#ifdef TARGET_ARM64
if (parent->OperIsIndir() && parent->AsIndir()->IsVolatile() && !varTypeIsGC(addr))
{
// For Arm64 we avoid using LEA for volatile INDs
// because we won't be able to use ldar/star
return false;
}
#endif
GenTree* base = nullptr;
GenTree* index = nullptr;
unsigned scale = 0;
ssize_t offset = 0;
bool rev = false;
// Find out if an addressing mode can be constructed
bool doAddrMode = comp->codeGen->genCreateAddrMode(addr, // address
true, // fold
&rev, // reverse ops
&base, // base addr
&index, // index val
&scale, // scaling
&offset); // displacement
var_types targetType = parent->OperIsIndir() ? parent->TypeGet() : TYP_UNDEF;
#ifdef TARGET_ARMARCH
// Multiplier should be a "natural-scale" power of two number which is equal to target's width.
//
// *(ulong*)(data + index * 8); - can be optimized
// *(ulong*)(data + index * 7); - can not be optimized
// *(int*)(data + index * 2); - can not be optimized
//
if ((scale > 0) && (genTypeSize(targetType) != scale))
{
return false;
}
if (((scale | offset) > 0) && parent->OperIsHWIntrinsic())
{
// For now we only support unscaled indices for SIMD loads
return false;
}
#endif
if (scale == 0)
{
scale = 1;
}
if (!isContainable)
{
// this is just a reg-const add
if (index == nullptr)
{
return false;
}
// this is just a reg-reg add
if ((scale == 1) && (offset == 0))
{
return false;
}
}
// make sure there are not any side effects between def of leaves and use
if (!doAddrMode || AreSourcesPossiblyModifiedLocals(addr, base, index))
{
JITDUMP("No addressing mode:\n ");
DISPNODE(addr);
return false;
}
JITDUMP("Addressing mode:\n");
JITDUMP(" Base\n ");
DISPNODE(base);
if (index != nullptr)
{
JITDUMP(" + Index * %u + %d\n ", scale, offset);
DISPNODE(index);
}
else
{
JITDUMP(" + %d\n", offset);
}
// Save the (potentially) unused operands before changing the address to LEA.
ArrayStack<GenTree*> unusedStack(comp->getAllocator(CMK_ArrayStack));
unusedStack.Push(addr->AsOp()->gtGetOp1());
unusedStack.Push(addr->AsOp()->gtGetOp2());
addr->ChangeOper(GT_LEA);
// Make sure there are no leftover side effects (though the existing ADD we're
// changing shouldn't have any at this point, but sometimes it does).
addr->gtFlags &= ~GTF_ALL_EFFECT;
GenTreeAddrMode* addrMode = addr->AsAddrMode();
addrMode->SetBase(base);
addrMode->SetIndex(index);
addrMode->SetScale(scale);
addrMode->SetOffset(static_cast<int>(offset));
// Neither the base nor the index should now be contained.
if (base != nullptr)
{
base->ClearContained();
}
if (index != nullptr)
{
index->ClearContained();
}
// Remove all the nodes that are no longer used.
while (!unusedStack.Empty())
{
GenTree* unused = unusedStack.Pop();
// Use a loop to process some of the nodes iteratively
// instead of pushing them on the stack.
while ((unused != base) && (unused != index))
{
JITDUMP("Removing unused node:\n ");
DISPNODE(unused);
BlockRange().Remove(unused);
if (unused->OperIs(GT_ADD, GT_MUL, GT_LSH))
{
// Push the first operand and loop back to process the second one.
// This minimizes the stack depth because the second one tends to be
// a constant so it gets processed and then the first one gets popped.
unusedStack.Push(unused->AsOp()->gtGetOp1());
unused = unused->AsOp()->gtGetOp2();
}
else
{
assert(unused->OperIs(GT_CNS_INT));
break;
}
}
}
#ifdef TARGET_ARM64
if ((index != nullptr) && index->OperIs(GT_CAST) && (scale == 1) && (offset == 0) && varTypeIsByte(targetType))
{
MakeSrcContained(addrMode, index);
}
// Check if we can "contain" LEA(BFIZ) in order to extend 32bit index to 64bit as part of load/store.
if ((index != nullptr) && index->OperIs(GT_BFIZ) && index->gtGetOp1()->OperIs(GT_CAST) &&
index->gtGetOp2()->IsCnsIntOrI() && (varTypeIsIntegral(targetType) || varTypeIsFloating(targetType)))
{
// BFIZ node is a binary op where op1 is GT_CAST and op2 is GT_CNS_INT
GenTreeCast* cast = index->gtGetOp1()->AsCast();
assert(cast->isContained());
const unsigned shiftBy = (unsigned)index->gtGetOp2()->AsIntCon()->IconValue();
// 'scale' and 'offset' have to be unset since we're going to use [base + index * SXTW/UXTW scale] form
// where there is no room for additional offsets/scales on ARM64. 'shiftBy' has to match target's width.
if (cast->CastOp()->TypeIs(TYP_INT) && cast->TypeIs(TYP_LONG) && (genTypeSize(targetType) == (1U << shiftBy)) &&
(scale == 1) && (offset == 0))
{
// TODO: Make sure that genCreateAddrMode marks such BFIZ candidates as GTF_DONT_CSE for better CQ.
MakeSrcContained(addrMode, index);
}
}
#endif
JITDUMP("New addressing mode node:\n ");
DISPNODE(addrMode);
JITDUMP("\n");
return true;
}
//------------------------------------------------------------------------
// LowerAdd: turn this add into a GT_LEA if that would be profitable
//
// Arguments:
// node - the node we care about
//
// Returns:
// nullptr if no transformation was done, or the next node in the transformed node sequence that
// needs to be lowered.
//
GenTree* Lowering::LowerAdd(GenTreeOp* node)
{
if (varTypeIsIntegralOrI(node->TypeGet()))
{
GenTree* op1 = node->gtGetOp1();
GenTree* op2 = node->gtGetOp2();
LIR::Use use;
// It is not the best place to do such simple arithmetic optimizations,
// but it allows us to avoid `LEA(addr, 0)` nodes and doing that in morph
// requires more changes. Delete that part if we get an expression optimizer.
if (op2->IsIntegralConst(0))
{
JITDUMP("Lower: optimize val + 0: ");
DISPNODE(node);
JITDUMP("Replaced with: ");
DISPNODE(op1);
if (BlockRange().TryGetUse(node, &use))
{
use.ReplaceWith(op1);
}
else
{
op1->SetUnusedValue();
}
GenTree* next = node->gtNext;
BlockRange().Remove(op2);
BlockRange().Remove(node);
JITDUMP("Remove [%06u], [%06u]\n", op2->gtTreeID, node->gtTreeID);
return next;
}
#ifdef TARGET_XARCH
if (BlockRange().TryGetUse(node, &use))
{
// If this is a child of an indir, let the parent handle it.
// If there is a chain of adds, only look at the topmost one.
GenTree* parent = use.User();
if (!parent->OperIsIndir() && !parent->OperIs(GT_ADD))
{
TryCreateAddrMode(node, false, parent);
}
}
#endif // TARGET_XARCH
}
if (node->OperIs(GT_ADD))
{
ContainCheckBinary(node);
}
return nullptr;
}
//------------------------------------------------------------------------
// LowerUnsignedDivOrMod: Lowers a GT_UDIV/GT_UMOD node.
//
// Arguments:
// divMod - pointer to the GT_UDIV/GT_UMOD node to be lowered
//
// Return Value:
// Returns a boolean indicating whether the node was transformed.
//
// Notes:
// - Transform UDIV/UMOD by power of 2 into RSZ/AND
// - Transform UDIV by constant >= 2^(N-1) into GE
// - Transform UDIV/UMOD by constant >= 3 into "magic division"
//
bool Lowering::LowerUnsignedDivOrMod(GenTreeOp* divMod)
{
assert(divMod->OperIs(GT_UDIV, GT_UMOD));
#if defined(USE_HELPERS_FOR_INT_DIV)
if (!varTypeIsIntegral(divMod->TypeGet()))
{
assert(!"unreachable: integral GT_UDIV/GT_UMOD should get morphed into helper calls");
}
assert(varTypeIsFloating(divMod->TypeGet()));
#endif // USE_HELPERS_FOR_INT_DIV
#if defined(TARGET_ARM64)
assert(divMod->OperGet() != GT_UMOD);
#endif // TARGET_ARM64
GenTree* dividend = divMod->gtGetOp1();
GenTree* divisor = divMod->gtGetOp2();
#if !defined(TARGET_64BIT)
if (dividend->OperIs(GT_LONG))
{
return false;
}
#endif
if (!divisor->IsCnsIntOrI())
{
return false;
}
if (dividend->IsCnsIntOrI())
{
// We shouldn't see a divmod with constant operands here but if we do then it's likely
// because optimizations are disabled or it's a case that's supposed to throw an exception.
// Don't optimize this.
return false;
}
const var_types type = divMod->TypeGet();
assert((type == TYP_INT) || (type == TYP_I_IMPL));
size_t divisorValue = static_cast<size_t>(divisor->AsIntCon()->IconValue());
if (type == TYP_INT)
{
// Clear up the upper 32 bits of the value, they may be set to 1 because constants
// are treated as signed and stored in ssize_t which is 64 bit in size on 64 bit targets.
divisorValue &= UINT32_MAX;
}
if (divisorValue == 0)
{
return false;
}
const bool isDiv = divMod->OperIs(GT_UDIV);
if (isPow2(divisorValue))
{
genTreeOps newOper;
if (isDiv)
{
newOper = GT_RSZ;
divisorValue = genLog2(divisorValue);
}
else
{
newOper = GT_AND;
divisorValue -= 1;
}
divMod->SetOper(newOper);
divisor->AsIntCon()->SetIconValue(divisorValue);
ContainCheckNode(divMod);
return true;
}
if (isDiv)
{
// If the divisor is greater or equal than 2^(N - 1) then the result is 1
// iff the dividend is greater or equal than the divisor.
if (((type == TYP_INT) && (divisorValue > (UINT32_MAX / 2))) ||
((type == TYP_LONG) && (divisorValue > (UINT64_MAX / 2))))
{
divMod->SetOper(GT_GE);
divMod->gtFlags |= GTF_UNSIGNED;
ContainCheckNode(divMod);
return true;
}
}
// TODO-ARM-CQ: Currently there's no GT_MULHI for ARM32
#if defined(TARGET_XARCH) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)
if (!comp->opts.MinOpts() && (divisorValue >= 3))
{
size_t magic;
bool increment;
int preShift;
int postShift;
bool simpleMul = false;
unsigned bits = type == TYP_INT ? 32 : 64;
// if the dividend operand is AND or RSZ with a constant then the number of input bits can be reduced
if (dividend->OperIs(GT_AND) && dividend->gtGetOp2()->IsCnsIntOrI())
{
size_t maskCns = static_cast<size_t>(dividend->gtGetOp2()->AsIntCon()->IconValue());
if (maskCns != 0)
{
unsigned maskBits = 1;
while (maskCns >>= 1)
maskBits++;
if (maskBits < bits)
bits = maskBits;
}
}
else if (dividend->OperIs(GT_RSZ) && dividend->gtGetOp2()->IsCnsIntOrI())
{
size_t shiftCns = static_cast<size_t>(dividend->gtGetOp2()->AsIntCon()->IconValue());
if (shiftCns < bits)
{
bits -= static_cast<unsigned>(shiftCns);
}
}
if (type == TYP_INT)
{
magic = MagicDivide::GetUnsigned32Magic(static_cast<uint32_t>(divisorValue), &increment, &preShift,
&postShift, bits);
#ifdef TARGET_64BIT
// avoid inc_saturate/multiple shifts by widening to 32x64 MULHI
if (increment || (preShift
#ifdef TARGET_XARCH
// IMUL reg,reg,imm32 can't be used if magic<0 because of sign-extension
&& static_cast<int32_t>(magic) < 0
#endif
))
{
magic = MagicDivide::GetUnsigned64Magic(static_cast<uint64_t>(divisorValue), &increment, &preShift,
&postShift, bits);
}
// otherwise just widen to regular multiplication
else
{
postShift += 32;
simpleMul = true;
}
#endif
}
else
{
#ifdef TARGET_64BIT
magic = MagicDivide::GetUnsigned64Magic(static_cast<uint64_t>(divisorValue), &increment, &preShift,
&postShift, bits);
#else
unreached();
#endif
}
assert(divMod->MarkedDivideByConstOptimized());
const bool requiresDividendMultiuse = !isDiv;
const weight_t curBBWeight = m_block->getBBWeight(comp);
if (requiresDividendMultiuse)
{
LIR::Use dividendUse(BlockRange(), &divMod->gtOp1, divMod);
dividend = ReplaceWithLclVar(dividendUse);
}
GenTree* firstNode = nullptr;
GenTree* adjustedDividend = dividend;
#if defined(TARGET_ARM64)
// On ARM64 we will use a 32x32->64 bit multiply instead of a 64x64->64 one.
bool widenToNativeIntForMul = (type != TYP_I_IMPL) && !simpleMul;
#else
CLANG_FORMAT_COMMENT_ANCHOR;
bool widenToNativeIntForMul = (type != TYP_I_IMPL);
#endif
// If "increment" flag is returned by GetUnsignedMagic we need to do Saturating Increment first
if (increment)
{
adjustedDividend = comp->gtNewOperNode(GT_INC_SATURATE, type, adjustedDividend);
BlockRange().InsertBefore(divMod, adjustedDividend);
firstNode = adjustedDividend;
assert(!preShift);
}
// if "preShift" is required, then do a right shift before
else if (preShift)
{
GenTree* preShiftBy = comp->gtNewIconNode(preShift, TYP_INT);
adjustedDividend = comp->gtNewOperNode(GT_RSZ, type, adjustedDividend, preShiftBy);
BlockRange().InsertBefore(divMod, preShiftBy, adjustedDividend);
firstNode = preShiftBy;
}
else if (widenToNativeIntForMul)
{
adjustedDividend = comp->gtNewCastNode(TYP_I_IMPL, adjustedDividend, true, TYP_I_IMPL);
BlockRange().InsertBefore(divMod, adjustedDividend);
firstNode = adjustedDividend;
}
#ifdef TARGET_XARCH
// force input transformation to RAX because the following MULHI will kill RDX:RAX anyway and LSRA often causes
// reduntant copies otherwise
if (firstNode && !simpleMul)
{
adjustedDividend->SetRegNum(REG_RAX);
}
#endif
if (widenToNativeIntForMul)
{
divisor->gtType = TYP_I_IMPL;
}
divisor->AsIntCon()->SetIconValue(magic);
if (isDiv && !postShift && (type == TYP_I_IMPL))
{
divMod->SetOper(GT_MULHI);
divMod->gtOp1 = adjustedDividend;
divMod->SetUnsigned();
}
else
{
#ifdef TARGET_ARM64
// 64-bit MUL is more expensive than UMULL on ARM64.
genTreeOps mulOper = simpleMul ? GT_MUL_LONG : GT_MULHI;
#else
// 64-bit IMUL is less expensive than MUL eax:edx on x64.
genTreeOps mulOper = simpleMul ? GT_MUL : GT_MULHI;
#endif
// Insert a new multiplication node before the existing GT_UDIV/GT_UMOD node.
// The existing node will later be transformed into a GT_RSZ/GT_SUB that
// computes the final result. This way don't need to find and change the use
// of the existing node.
GenTree* mulhi = comp->gtNewOperNode(mulOper, TYP_I_IMPL, adjustedDividend, divisor);
mulhi->SetUnsigned();
BlockRange().InsertBefore(divMod, mulhi);
if (firstNode == nullptr)
{
firstNode = mulhi;
}
if (postShift)
{
GenTree* shiftBy = comp->gtNewIconNode(postShift, TYP_INT);
BlockRange().InsertBefore(divMod, shiftBy);
if (isDiv && (type == TYP_I_IMPL))
{
divMod->SetOper(GT_RSZ);
divMod->gtOp1 = mulhi;
divMod->gtOp2 = shiftBy;
}
else
{
mulhi = comp->gtNewOperNode(GT_RSZ, TYP_I_IMPL, mulhi, shiftBy);
BlockRange().InsertBefore(divMod, mulhi);
}
}
if (!isDiv)
{
// divisor UMOD dividend = dividend SUB (div MUL divisor)
GenTree* divisor = comp->gtNewIconNode(divisorValue, type);
GenTree* mul = comp->gtNewOperNode(GT_MUL, type, mulhi, divisor);
dividend = comp->gtNewLclvNode(dividend->AsLclVar()->GetLclNum(), dividend->TypeGet());
divMod->SetOper(GT_SUB);
divMod->gtOp1 = dividend;
divMod->gtOp2 = mul;
BlockRange().InsertBefore(divMod, divisor, mul, dividend);
}
else if (type != TYP_I_IMPL)
{
#ifdef TARGET_ARMARCH
divMod->SetOper(GT_CAST);
divMod->SetUnsigned();
divMod->AsCast()->gtCastType = TYP_INT;
#else
divMod->SetOper(GT_BITCAST);
#endif
divMod->gtOp1 = mulhi;
divMod->gtOp2 = nullptr;
}
}
if (firstNode != nullptr)
{
ContainCheckRange(firstNode, divMod);
}
return true;
}
#endif
return false;
}
// LowerConstIntDivOrMod: Transform integer GT_DIV/GT_MOD nodes with a power of 2
// const divisor into equivalent but faster sequences.
//
// Arguments:
// node - pointer to the DIV or MOD node
//
// Returns:
// nullptr if no transformation is done, or the next node in the transformed node sequence that
// needs to be lowered.
//
GenTree* Lowering::LowerConstIntDivOrMod(GenTree* node)
{
assert((node->OperGet() == GT_DIV) || (node->OperGet() == GT_MOD));
GenTree* divMod = node;
GenTree* dividend = divMod->gtGetOp1();
GenTree* divisor = divMod->gtGetOp2();
const var_types type = divMod->TypeGet();
assert((type == TYP_INT) || (type == TYP_LONG));
#if defined(USE_HELPERS_FOR_INT_DIV)
assert(!"unreachable: integral GT_DIV/GT_MOD should get morphed into helper calls");
#endif // USE_HELPERS_FOR_INT_DIV
#if defined(TARGET_ARM64)
if (divMod->OperIs(GT_MOD) && divisor->IsIntegralConstPow2())
{
return LowerModPow2(node);
}
assert(node->OperGet() != GT_MOD);
#endif // TARGET_ARM64
if (!divisor->IsCnsIntOrI())
{
return nullptr; // no transformations to make
}
if (dividend->IsCnsIntOrI())
{
// We shouldn't see a divmod with constant operands here but if we do then it's likely
// because optimizations are disabled or it's a case that's supposed to throw an exception.
// Don't optimize this.
return nullptr;
}
ssize_t divisorValue = divisor->AsIntCon()->IconValue();
if (divisorValue == -1 || divisorValue == 0)
{
// x / 0 and x % 0 can't be optimized because they are required to throw an exception.
// x / -1 can't be optimized because INT_MIN / -1 is required to throw an exception.
// x % -1 is always 0 and the IL spec says that the rem instruction "can" throw an exception if x is
// the minimum representable integer. However, the C# spec says that an exception "is" thrown in this
// case so optimizing this case would break C# code.
// A runtime check could be used to handle this case but it's probably too rare to matter.
return nullptr;
}
bool isDiv = divMod->OperGet() == GT_DIV;
if (isDiv)
{
if ((type == TYP_INT && divisorValue == INT_MIN) || (type == TYP_LONG && divisorValue == INT64_MIN))
{
// If the divisor is the minimum representable integer value then we can use a compare,
// the result is 1 iff the dividend equals divisor.
divMod->SetOper(GT_EQ);
return node;
}
}
size_t absDivisorValue =
(divisorValue == SSIZE_T_MIN) ? static_cast<size_t>(divisorValue) : static_cast<size_t>(abs(divisorValue));
if (!isPow2(absDivisorValue))
{
if (comp->opts.MinOpts())
{
return nullptr;
}
#if defined(TARGET_XARCH) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)
ssize_t magic;
int shift;
if (type == TYP_INT)
{
magic = MagicDivide::GetSigned32Magic(static_cast<int32_t>(divisorValue), &shift);
}
else
{
#ifdef TARGET_64BIT
magic = MagicDivide::GetSigned64Magic(static_cast<int64_t>(divisorValue), &shift);
#else // !TARGET_64BIT
unreached();
#endif // !TARGET_64BIT
}
divisor->AsIntConCommon()->SetIconValue(magic);
// Insert a new GT_MULHI node in front of the existing GT_DIV/GT_MOD node.
// The existing node will later be transformed into a GT_ADD/GT_SUB that
// computes the final result. This way don't need to find and change the
// use of the existing node.
GenTree* mulhi = comp->gtNewOperNode(GT_MULHI, type, divisor, dividend);
BlockRange().InsertBefore(divMod, mulhi);
// mulhi was the easy part. Now we need to generate different code depending
// on the divisor value:
// For 3 we need:
// div = signbit(mulhi) + mulhi
// For 5 we need:
// div = signbit(mulhi) + sar(mulhi, 1) ; requires shift adjust
// For 7 we need:
// mulhi += dividend ; requires add adjust
// div = signbit(mulhi) + sar(mulhi, 2) ; requires shift adjust
// For -3 we need:
// mulhi -= dividend ; requires sub adjust
// div = signbit(mulhi) + sar(mulhi, 1) ; requires shift adjust
bool requiresAddSubAdjust = signum(divisorValue) != signum(magic);
bool requiresShiftAdjust = shift != 0;
bool requiresDividendMultiuse = requiresAddSubAdjust || !isDiv;
if (requiresDividendMultiuse)
{
LIR::Use dividendUse(BlockRange(), &mulhi->AsOp()->gtOp2, mulhi);
dividend = ReplaceWithLclVar(dividendUse);
}
GenTree* adjusted;
if (requiresAddSubAdjust)
{
dividend = comp->gtNewLclvNode(dividend->AsLclVar()->GetLclNum(), dividend->TypeGet());
adjusted = comp->gtNewOperNode(divisorValue > 0 ? GT_ADD : GT_SUB, type, mulhi, dividend);
BlockRange().InsertBefore(divMod, dividend, adjusted);
}
else
{
adjusted = mulhi;
}
GenTree* shiftBy = comp->gtNewIconNode(genTypeSize(type) * 8 - 1, type);
GenTree* signBit = comp->gtNewOperNode(GT_RSZ, type, adjusted, shiftBy);
BlockRange().InsertBefore(divMod, shiftBy, signBit);
LIR::Use adjustedUse(BlockRange(), &signBit->AsOp()->gtOp1, signBit);
adjusted = ReplaceWithLclVar(adjustedUse);
adjusted = comp->gtNewLclvNode(adjusted->AsLclVar()->GetLclNum(), adjusted->TypeGet());
BlockRange().InsertBefore(divMod, adjusted);
if (requiresShiftAdjust)
{
shiftBy = comp->gtNewIconNode(shift, TYP_INT);
adjusted = comp->gtNewOperNode(GT_RSH, type, adjusted, shiftBy);
BlockRange().InsertBefore(divMod, shiftBy, adjusted);
}
if (isDiv)
{
divMod->SetOperRaw(GT_ADD);
divMod->AsOp()->gtOp1 = adjusted;
divMod->AsOp()->gtOp2 = signBit;
}
else
{
GenTree* div = comp->gtNewOperNode(GT_ADD, type, adjusted, signBit);
dividend = comp->gtNewLclvNode(dividend->AsLclVar()->GetLclNum(), dividend->TypeGet());
// divisor % dividend = dividend - divisor x div
GenTree* divisor = comp->gtNewIconNode(divisorValue, type);
GenTree* mul = comp->gtNewOperNode(GT_MUL, type, div, divisor);
BlockRange().InsertBefore(divMod, dividend, div, divisor, mul);
divMod->SetOperRaw(GT_SUB);
divMod->AsOp()->gtOp1 = dividend;
divMod->AsOp()->gtOp2 = mul;
}
return mulhi;
#elif defined(TARGET_ARM)
// Currently there's no GT_MULHI for ARM32
return nullptr;
#else
#error Unsupported or unset target architecture
#endif
}
// We're committed to the conversion now. Go find the use if any.
LIR::Use use;
if (!BlockRange().TryGetUse(node, &use))
{
return nullptr;
}
// We need to use the dividend node multiple times so its value needs to be
// computed once and stored in a temp variable.
LIR::Use opDividend(BlockRange(), &divMod->AsOp()->gtOp1, divMod);
dividend = ReplaceWithLclVar(opDividend);
GenTree* adjustment = comp->gtNewOperNode(GT_RSH, type, dividend, comp->gtNewIconNode(type == TYP_INT ? 31 : 63));
if (absDivisorValue == 2)
{
// If the divisor is +/-2 then we'd end up with a bitwise and between 0/-1 and 1.
// We can get the same result by using GT_RSZ instead of GT_RSH.
adjustment->SetOper(GT_RSZ);
}
else
{
adjustment = comp->gtNewOperNode(GT_AND, type, adjustment, comp->gtNewIconNode(absDivisorValue - 1, type));
}
GenTree* adjustedDividend =
comp->gtNewOperNode(GT_ADD, type, adjustment,
comp->gtNewLclvNode(dividend->AsLclVar()->GetLclNum(), dividend->TypeGet()));
GenTree* newDivMod;
if (isDiv)
{
// perform the division by right shifting the adjusted dividend
divisor->AsIntCon()->SetIconValue(genLog2(absDivisorValue));
newDivMod = comp->gtNewOperNode(GT_RSH, type, adjustedDividend, divisor);
ContainCheckShiftRotate(newDivMod->AsOp());
if (divisorValue < 0)
{
// negate the result if the divisor is negative
newDivMod = comp->gtNewOperNode(GT_NEG, type, newDivMod);
ContainCheckNode(newDivMod);
}
}
else
{
// divisor % dividend = dividend - divisor x (dividend / divisor)
// divisor x (dividend / divisor) translates to (dividend >> log2(divisor)) << log2(divisor)
// which simply discards the low log2(divisor) bits, that's just dividend & ~(divisor - 1)
divisor->AsIntCon()->SetIconValue(~(absDivisorValue - 1));
newDivMod = comp->gtNewOperNode(GT_SUB, type,
comp->gtNewLclvNode(dividend->AsLclVar()->GetLclNum(), dividend->TypeGet()),
comp->gtNewOperNode(GT_AND, type, adjustedDividend, divisor));
}
// Remove the divisor and dividend nodes from the linear order,
// since we have reused them and will resequence the tree
BlockRange().Remove(divisor);
BlockRange().Remove(dividend);
// linearize and insert the new tree before the original divMod node
InsertTreeBeforeAndContainCheck(divMod, newDivMod);
BlockRange().Remove(divMod);
// replace the original divmod node with the new divmod tree
use.ReplaceWith(newDivMod);
return newDivMod->gtNext;
}
//------------------------------------------------------------------------
// LowerSignedDivOrMod: transform integer GT_DIV/GT_MOD nodes with a power of 2
// const divisor into equivalent but faster sequences.
//
// Arguments:
// node - the DIV or MOD node
//
// Returns:
// The next node to lower.
//
GenTree* Lowering::LowerSignedDivOrMod(GenTree* node)
{
assert((node->OperGet() == GT_DIV) || (node->OperGet() == GT_MOD));
GenTree* next = node->gtNext;
if (varTypeIsIntegral(node->TypeGet()))
{
// LowerConstIntDivOrMod will return nullptr if it doesn't transform the node.
GenTree* newNode = LowerConstIntDivOrMod(node);
if (newNode != nullptr)
{
return newNode;
}
}
ContainCheckDivOrMod(node->AsOp());
return next;
}
//------------------------------------------------------------------------
// LowerShift: Lower shift nodes
//
// Arguments:
// shift - the shift node (GT_LSH, GT_RSH or GT_RSZ)
//
// Notes:
// Remove unnecessary shift count masking, xarch shift instructions
// mask the shift count to 5 bits (or 6 bits for 64 bit operations).
void Lowering::LowerShift(GenTreeOp* shift)
{
assert(shift->OperIs(GT_LSH, GT_RSH, GT_RSZ));
size_t mask = 0x1f;
#ifdef TARGET_64BIT
if (varTypeIsLong(shift->TypeGet()))
{
mask = 0x3f;
}
#else
assert(!varTypeIsLong(shift->TypeGet()));
#endif
for (GenTree* andOp = shift->gtGetOp2(); andOp->OperIs(GT_AND); andOp = andOp->gtGetOp1())
{
GenTree* maskOp = andOp->gtGetOp2();
if (!maskOp->IsCnsIntOrI())
{
break;
}
if ((static_cast<size_t>(maskOp->AsIntCon()->IconValue()) & mask) != mask)
{
break;
}
shift->gtOp2 = andOp->gtGetOp1();
BlockRange().Remove(andOp);
BlockRange().Remove(maskOp);
// The parent was replaced, clear contain and regOpt flag.
shift->gtOp2->ClearContained();
}
ContainCheckShiftRotate(shift);
#ifdef TARGET_ARM64
// Try to recognize ubfiz/sbfiz idiom in LSH(CAST(X), CNS) tree
if (comp->opts.OptimizationEnabled() && shift->OperIs(GT_LSH) && shift->gtGetOp1()->OperIs(GT_CAST) &&
shift->gtGetOp2()->IsCnsIntOrI() && !shift->isContained())
{
GenTreeIntCon* cns = shift->gtGetOp2()->AsIntCon();
GenTreeCast* cast = shift->gtGetOp1()->AsCast();
if (!cast->isContained() && !cast->IsRegOptional() && !cast->gtOverflow() &&
// Smaller CastOp is most likely an IND(X) node which is lowered to a zero-extend load
cast->CastOp()->TypeIs(TYP_LONG, TYP_INT))
{
// Cast is either "TYP_LONG <- TYP_INT" or "TYP_INT <- %SMALL_INT% <- TYP_INT" (signed or unsigned)
unsigned dstBits = genTypeSize(cast) * BITS_PER_BYTE;
unsigned srcBits = varTypeIsSmall(cast->CastToType()) ? genTypeSize(cast->CastToType()) * BITS_PER_BYTE
: genTypeSize(cast->CastOp()) * BITS_PER_BYTE;
assert(!cast->CastOp()->isContained());
// It has to be an upcast and CNS must be in [1..srcBits) range
if ((srcBits < dstBits) && (cns->IconValue() > 0) && (cns->IconValue() < srcBits))
{
JITDUMP("Recognized ubfix/sbfix pattern in LSH(CAST, CNS). Changing op to GT_BFIZ");
shift->ChangeOper(GT_BFIZ);
MakeSrcContained(shift, cast);
}
}
}
#endif
}
void Lowering::WidenSIMD12IfNecessary(GenTreeLclVarCommon* node)
{
#ifdef FEATURE_SIMD
if (node->TypeGet() == TYP_SIMD12)
{
// Assumption 1:
// RyuJit backend depends on the assumption that on 64-Bit targets Vector3 size is rounded off
// to TARGET_POINTER_SIZE and hence Vector3 locals on stack can be treated as TYP_SIMD16 for
// reading and writing purposes.
//
// Assumption 2:
// RyuJit backend is making another implicit assumption that Vector3 type args when passed in
// registers or on stack, the upper most 4-bytes will be zero.
//
// For P/Invoke return and Reverse P/Invoke argument passing, native compiler doesn't guarantee
// that upper 4-bytes of a Vector3 type struct is zero initialized and hence assumption 2 is
// invalid.
//
// RyuJIT x64 Windows: arguments are treated as passed by ref and hence read/written just 12
// bytes. In case of Vector3 returns, Caller allocates a zero initialized Vector3 local and
// passes it retBuf arg and Callee method writes only 12 bytes to retBuf. For this reason,
// there is no need to clear upper 4-bytes of Vector3 type args.
//
// RyuJIT x64 Unix: arguments are treated as passed by value and read/writen as if TYP_SIMD16.
// Vector3 return values are returned two return registers and Caller assembles them into a
// single xmm reg. Hence RyuJIT explicitly generates code to clears upper 4-bytes of Vector3
// type args in prolog and Vector3 type return value of a call
//
// RyuJIT x86 Windows: all non-param Vector3 local vars are allocated as 16 bytes. Vector3 arguments
// are pushed as 12 bytes. For return values, a 16-byte local is allocated and the address passed
// as a return buffer pointer. The callee doesn't write the high 4 bytes, and we don't need to clear
// it either.
LclVarDsc* varDsc = comp->lvaGetDesc(node->AsLclVarCommon());
if (comp->lvaMapSimd12ToSimd16(varDsc))
{
JITDUMP("Mapping TYP_SIMD12 lclvar node to TYP_SIMD16:\n");
DISPNODE(node);
JITDUMP("============");
node->gtType = TYP_SIMD16;
}
}
#endif // FEATURE_SIMD
}
//------------------------------------------------------------------------
// LowerArrElem: Lower a GT_ARR_ELEM node
//
// Arguments:
// node - the GT_ARR_ELEM node to lower.
//
// Return Value:
// The next node to lower.
//
// Assumptions:
// pTree points to a pointer to a GT_ARR_ELEM node.
//
// Notes:
// This performs the following lowering. We start with a node of the form:
// /--* <arrObj>
// +--* <index0>
// +--* <index1>
// /--* arrMD&[,]
//
// First, we create temps for arrObj if it is not already a lclVar, and for any of the index
// expressions that have side-effects.
// We then transform the tree into:
// <offset is null - no accumulated offset for the first index>
// /--* <arrObj>
// +--* <index0>
// /--* ArrIndex[i, ]
// +--* <arrObj>
// /--| arrOffs[i, ]
// | +--* <arrObj>
// | +--* <index1>
// +--* ArrIndex[*,j]
// +--* <arrObj>
// /--| arrOffs[*,j]
// +--* lclVar NewTemp
// /--* lea (scale = element size, offset = offset of first element)
//
// The new stmtExpr may be omitted if the <arrObj> is a lclVar.
// The new stmtExpr may be embedded if the <arrObj> is not the first tree in linear order for
// the statement containing the original arrMD.
// Note that the arrMDOffs is the INDEX of the lea, but is evaluated before the BASE (which is the second
// reference to NewTemp), because that provides more accurate lifetimes.
// There may be 1, 2 or 3 dimensions, with 1, 2 or 3 arrMDIdx nodes, respectively.
//
GenTree* Lowering::LowerArrElem(GenTree* node)
{
// This will assert if we don't have an ArrElem node
GenTreeArrElem* arrElem = node->AsArrElem();
const unsigned char rank = arrElem->gtArrRank;
JITDUMP("Lowering ArrElem\n");
JITDUMP("============\n");
DISPTREERANGE(BlockRange(), arrElem);
JITDUMP("\n");
assert(arrElem->gtArrObj->TypeGet() == TYP_REF);
// We need to have the array object in a lclVar.
if (!arrElem->gtArrObj->IsLocal())
{
LIR::Use arrObjUse(BlockRange(), &arrElem->gtArrObj, arrElem);
ReplaceWithLclVar(arrObjUse);
}
GenTree* arrObjNode = arrElem->gtArrObj;
assert(arrObjNode->IsLocal());
GenTree* insertionPoint = arrElem;
// The first ArrOffs node will have 0 for the offset of the previous dimension.
GenTree* prevArrOffs = new (comp, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, 0);
BlockRange().InsertBefore(insertionPoint, prevArrOffs);
GenTree* nextToLower = prevArrOffs;
for (unsigned char dim = 0; dim < rank; dim++)
{
GenTree* indexNode = arrElem->gtArrInds[dim];
// Use the original arrObjNode on the 0th ArrIndex node, and clone it for subsequent ones.
GenTree* idxArrObjNode;
if (dim == 0)
{
idxArrObjNode = arrObjNode;
}
else
{
idxArrObjNode = comp->gtClone(arrObjNode);
BlockRange().InsertBefore(insertionPoint, idxArrObjNode);
}
// Next comes the GT_ARR_INDEX node.
GenTreeArrIndex* arrMDIdx = new (comp, GT_ARR_INDEX)
GenTreeArrIndex(TYP_INT, idxArrObjNode, indexNode, dim, rank, arrElem->gtArrElemType);
arrMDIdx->gtFlags |= ((idxArrObjNode->gtFlags | indexNode->gtFlags) & GTF_ALL_EFFECT);
BlockRange().InsertBefore(insertionPoint, arrMDIdx);
GenTree* offsArrObjNode = comp->gtClone(arrObjNode);
BlockRange().InsertBefore(insertionPoint, offsArrObjNode);
GenTreeArrOffs* arrOffs = new (comp, GT_ARR_OFFSET)
GenTreeArrOffs(TYP_I_IMPL, prevArrOffs, arrMDIdx, offsArrObjNode, dim, rank, arrElem->gtArrElemType);
arrOffs->gtFlags |= ((prevArrOffs->gtFlags | arrMDIdx->gtFlags | offsArrObjNode->gtFlags) & GTF_ALL_EFFECT);
BlockRange().InsertBefore(insertionPoint, arrOffs);
prevArrOffs = arrOffs;
}
// Generate the LEA and make it reverse evaluation, because we want to evaluate the index expression before the
// base.
unsigned scale = arrElem->gtArrElemSize;
unsigned offset = comp->eeGetMDArrayDataOffset(arrElem->gtArrRank);
GenTree* leaIndexNode = prevArrOffs;
if (!jitIsScaleIndexMul(scale))
{
// We do the address arithmetic in TYP_I_IMPL, though note that the lower bounds and lengths in memory are
// TYP_INT
GenTree* scaleNode = new (comp, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, scale);
GenTree* mulNode = new (comp, GT_MUL) GenTreeOp(GT_MUL, TYP_I_IMPL, leaIndexNode, scaleNode);
BlockRange().InsertBefore(insertionPoint, scaleNode, mulNode);
leaIndexNode = mulNode;
scale = 1;
}
GenTree* leaBase = comp->gtClone(arrObjNode);
BlockRange().InsertBefore(insertionPoint, leaBase);
GenTree* leaNode = new (comp, GT_LEA) GenTreeAddrMode(arrElem->TypeGet(), leaBase, leaIndexNode, scale, offset);
BlockRange().InsertBefore(insertionPoint, leaNode);
LIR::Use arrElemUse;
if (BlockRange().TryGetUse(arrElem, &arrElemUse))
{
arrElemUse.ReplaceWith(leaNode);
}
else
{
leaNode->SetUnusedValue();
}
BlockRange().Remove(arrElem);
JITDUMP("Results of lowering ArrElem:\n");
DISPTREERANGE(BlockRange(), leaNode);
JITDUMP("\n\n");
return nextToLower;
}
PhaseStatus Lowering::DoPhase()
{
// If we have any PInvoke calls, insert the one-time prolog code. We'll insert the epilog code in the
// appropriate spots later. NOTE: there is a minor optimization opportunity here, as we still create p/invoke
// data structures and setup/teardown even if we've eliminated all p/invoke calls due to dead code elimination.
if (comp->compMethodRequiresPInvokeFrame())
{
InsertPInvokeMethodProlog();
}
#if !defined(TARGET_64BIT)
DecomposeLongs decomp(comp); // Initialize the long decomposition class.
if (comp->compLongUsed)
{
decomp.PrepareForDecomposition();
}
#endif // !defined(TARGET_64BIT)
if (!comp->compEnregLocals())
{
// Lowering is checking if lvDoNotEnregister is already set for contained optimizations.
// If we are running without `CLFLG_REGVAR` flag set (`compEnregLocals() == false`)
// then we already know that we won't enregister any locals and it is better to set
// `lvDoNotEnregister` flag before we start reading it.
// The main reason why this flag is not set is that we are running in minOpts.
comp->lvSetMinOptsDoNotEnreg();
}
for (BasicBlock* const block : comp->Blocks())
{
/* Make the block publicly available */
comp->compCurBB = block;
#if !defined(TARGET_64BIT)
if (comp->compLongUsed)
{
decomp.DecomposeBlock(block);
}
#endif //! TARGET_64BIT
LowerBlock(block);
}
#ifdef DEBUG
JITDUMP("Lower has completed modifying nodes.\n");
if (VERBOSE)
{
comp->fgDispBasicBlocks(true);
}
#endif
// Recompute local var ref counts before potentially sorting for liveness.
// Note this does minimal work in cases where we are not going to sort.
const bool isRecompute = true;
const bool setSlotNumbers = false;
comp->lvaComputeRefCounts(isRecompute, setSlotNumbers);
comp->fgLocalVarLiveness();
// local var liveness can delete code, which may create empty blocks
if (comp->opts.OptimizationEnabled())
{
comp->optLoopsMarked = false;
bool modified = comp->fgUpdateFlowGraph();
if (modified)
{
JITDUMP("had to run another liveness pass:\n");
comp->fgLocalVarLiveness();
}
}
// Recompute local var ref counts again after liveness to reflect
// impact of any dead code removal. Note this may leave us with
// tracked vars that have zero refs.
comp->lvaComputeRefCounts(isRecompute, setSlotNumbers);
return PhaseStatus::MODIFIED_EVERYTHING;
}
#ifdef DEBUG
//------------------------------------------------------------------------
// Lowering::CheckCallArg: check that a call argument is in an expected
// form after lowering.
//
// Arguments:
// arg - the argument to check.
//
void Lowering::CheckCallArg(GenTree* arg)
{
if (!arg->IsValue() && !arg->OperIsPutArgStk())
{
assert((arg->OperIsStore() && !arg->IsValue()) || arg->IsArgPlaceHolderNode() || arg->IsNothingNode() ||
arg->OperIsCopyBlkOp());
return;
}
switch (arg->OperGet())
{
case GT_FIELD_LIST:
{
GenTreeFieldList* list = arg->AsFieldList();
assert(list->isContained());
for (GenTreeFieldList::Use& use : list->Uses())
{
assert(use.GetNode()->OperIsPutArg());
}
}
break;
default:
assert(arg->OperIsPutArg());
break;
}
}
//------------------------------------------------------------------------
// Lowering::CheckCall: check that a call is in an expected form after
// lowering. Currently this amounts to checking its
// arguments, but could be expanded to verify more
// properties in the future.
//
// Arguments:
// call - the call to check.
//
void Lowering::CheckCall(GenTreeCall* call)
{
if (call->gtCallThisArg != nullptr)
{
CheckCallArg(call->gtCallThisArg->GetNode());
}
for (GenTreeCall::Use& use : call->Args())
{
CheckCallArg(use.GetNode());
}
for (GenTreeCall::Use& use : call->LateArgs())
{
CheckCallArg(use.GetNode());
}
}
//------------------------------------------------------------------------
// Lowering::CheckNode: check that an LIR node is in an expected form
// after lowering.
//
// Arguments:
// compiler - the compiler context.
// node - the node to check.
//
void Lowering::CheckNode(Compiler* compiler, GenTree* node)
{
switch (node->OperGet())
{
case GT_CALL:
CheckCall(node->AsCall());
break;
#ifdef FEATURE_SIMD
case GT_SIMD:
case GT_HWINTRINSIC:
assert(node->TypeGet() != TYP_SIMD12);
break;
#endif // FEATURE_SIMD
case GT_LCL_VAR:
case GT_STORE_LCL_VAR:
{
const LclVarDsc* varDsc = compiler->lvaGetDesc(node->AsLclVar());
#if defined(FEATURE_SIMD) && defined(TARGET_64BIT)
if (node->TypeIs(TYP_SIMD12))
{
assert(compiler->lvaIsFieldOfDependentlyPromotedStruct(varDsc) || (varDsc->lvSize() == 12));
}
#endif // FEATURE_SIMD && TARGET_64BIT
if (varDsc->lvPromoted)
{
assert(varDsc->lvDoNotEnregister || varDsc->lvIsMultiRegRet);
}
}
break;
case GT_LCL_VAR_ADDR:
case GT_LCL_FLD_ADDR:
{
const GenTreeLclVarCommon* lclVarAddr = node->AsLclVarCommon();
const LclVarDsc* varDsc = compiler->lvaGetDesc(lclVarAddr);
if (((lclVarAddr->gtFlags & GTF_VAR_DEF) != 0) && varDsc->HasGCPtr())
{
// Emitter does not correctly handle live updates for LCL_VAR_ADDR
// when they are not contained, for example, `STOREIND byref(GT_LCL_VAR_ADDR not-contained)`
// would generate:
// add r1, sp, 48 // r1 contains address of a lclVar V01.
// str r0, [r1] // a gc ref becomes live in V01, but emitter would not report it.
// Make sure that we use uncontained address nodes only for variables
// that will be marked as mustInit and will be alive throughout the whole block even when tracked.
assert(lclVarAddr->isContained() || !varDsc->lvTracked || varTypeIsStruct(varDsc));
// TODO: support this assert for uses, see https://github.com/dotnet/runtime/issues/51900.
}
assert(varDsc->lvDoNotEnregister);
break;
}
case GT_PHI:
case GT_PHI_ARG:
assert(!"Should not see phi nodes after rationalize");
break;
case GT_LCL_FLD:
case GT_STORE_LCL_FLD:
{
const LclVarDsc* varDsc = compiler->lvaGetDesc(node->AsLclFld());
assert(varDsc->lvDoNotEnregister);
}
break;
default:
break;
}
}
//------------------------------------------------------------------------
// Lowering::CheckBlock: check that the contents of an LIR block are in an
// expected form after lowering.
//
// Arguments:
// compiler - the compiler context.
// block - the block to check.
//
bool Lowering::CheckBlock(Compiler* compiler, BasicBlock* block)
{
assert(block->isEmpty() || block->IsLIR());
LIR::Range& blockRange = LIR::AsRange(block);
for (GenTree* node : blockRange)
{
CheckNode(compiler, node);
}
assert(blockRange.CheckLIR(compiler, true));
return true;
}
#endif
//------------------------------------------------------------------------
// Lowering::LowerBlock: Lower all the nodes in a BasicBlock
//
// Arguments:
// block - the block to lower.
//
void Lowering::LowerBlock(BasicBlock* block)
{
assert(block == comp->compCurBB); // compCurBB must already be set.
assert(block->isEmpty() || block->IsLIR());
m_block = block;
// NOTE: some of the lowering methods insert calls before the node being
// lowered (See e.g. InsertPInvoke{Method,Call}{Prolog,Epilog}). In
// general, any code that is inserted before the current node should be
// "pre-lowered" as they won't be subject to further processing.
// Lowering::CheckBlock() runs some extra checks on call arguments in
// order to help catch unlowered nodes.
GenTree* node = BlockRange().FirstNode();
while (node != nullptr)
{
node = LowerNode(node);
}
assert(CheckBlock(comp, block));
}
/** Verifies if both of these trees represent the same indirection.
* Used by Lower to annotate if CodeGen generate an instruction of the
* form *addrMode BinOp= expr
*
* Preconditions: both trees are children of GT_INDs and their underlying children
* have the same gtOper.
*
* This is a first iteration to actually recognize trees that can be code-generated
* as a single read-modify-write instruction on AMD64/x86. For now
* this method only supports the recognition of simple addressing modes (through GT_LEA)
* or local var indirections. Local fields, array access and other more complex nodes are
* not yet supported.
*
* TODO-CQ: Perform tree recognition by using the Value Numbering Package, that way we can recognize
* arbitrary complex trees and support much more addressing patterns.
*/
bool Lowering::IndirsAreEquivalent(GenTree* candidate, GenTree* storeInd)
{
assert(candidate->OperGet() == GT_IND);
assert(storeInd->OperGet() == GT_STOREIND);
// We should check the size of the indirections. If they are
// different, say because of a cast, then we can't call them equivalent. Doing so could cause us
// to drop a cast.
// Signed-ness difference is okay and expected since a store indirection must always
// be signed based on the CIL spec, but a load could be unsigned.
if (genTypeSize(candidate->gtType) != genTypeSize(storeInd->gtType))
{
return false;
}
GenTree* pTreeA = candidate->gtGetOp1();
GenTree* pTreeB = storeInd->gtGetOp1();
// This method will be called by codegen (as well as during lowering).
// After register allocation, the sources may have been spilled and reloaded
// to a different register, indicated by an inserted GT_RELOAD node.
pTreeA = pTreeA->gtSkipReloadOrCopy();
pTreeB = pTreeB->gtSkipReloadOrCopy();
genTreeOps oper;
if (pTreeA->OperGet() != pTreeB->OperGet())
{
return false;
}
oper = pTreeA->OperGet();
switch (oper)
{
case GT_LCL_VAR:
case GT_LCL_VAR_ADDR:
case GT_CLS_VAR_ADDR:
case GT_CNS_INT:
return NodesAreEquivalentLeaves(pTreeA, pTreeB);
case GT_LEA:
{
GenTreeAddrMode* gtAddr1 = pTreeA->AsAddrMode();
GenTreeAddrMode* gtAddr2 = pTreeB->AsAddrMode();
return NodesAreEquivalentLeaves(gtAddr1->Base(), gtAddr2->Base()) &&
NodesAreEquivalentLeaves(gtAddr1->Index(), gtAddr2->Index()) &&
(gtAddr1->gtScale == gtAddr2->gtScale) && (gtAddr1->Offset() == gtAddr2->Offset());
}
default:
// We don't handle anything that is not either a constant,
// a local var or LEA.
return false;
}
}
//------------------------------------------------------------------------
// NodesAreEquivalentLeaves: Check whether the two given nodes are the same leaves.
//
// Arguments:
// tree1 and tree2 are nodes to be checked.
// Return Value:
// Returns true if they are same leaves, false otherwise.
//
// static
bool Lowering::NodesAreEquivalentLeaves(GenTree* tree1, GenTree* tree2)
{
if (tree1 == tree2)
{
return true;
}
if (tree1 == nullptr || tree2 == nullptr)
{
return false;
}
tree1 = tree1->gtSkipReloadOrCopy();
tree2 = tree2->gtSkipReloadOrCopy();
if (tree1->TypeGet() != tree2->TypeGet())
{
return false;
}
if (tree1->OperGet() != tree2->OperGet())
{
return false;
}
if (!tree1->OperIsLeaf() || !tree2->OperIsLeaf())
{
return false;
}
switch (tree1->OperGet())
{
case GT_CNS_INT:
return tree1->AsIntCon()->IconValue() == tree2->AsIntCon()->IconValue() &&
tree1->IsIconHandle() == tree2->IsIconHandle();
case GT_LCL_VAR:
case GT_LCL_VAR_ADDR:
return tree1->AsLclVarCommon()->GetLclNum() == tree2->AsLclVarCommon()->GetLclNum();
case GT_CLS_VAR_ADDR:
return tree1->AsClsVar()->gtClsVarHnd == tree2->AsClsVar()->gtClsVarHnd;
default:
return false;
}
}
//------------------------------------------------------------------------
// Lowering::CheckMultiRegLclVar: Check whether a MultiReg GT_LCL_VAR node can
// remain a multi-reg.
//
// Arguments:
// lclNode - the GT_LCL_VAR or GT_STORE_LCL_VAR node.
// retTypeDesc - a return type descriptor either for a call source of a store of
// the local, or for the GT_RETURN consumer of the local.
//
// Notes:
// If retTypeDesc is non-null, this method will check that the fields are compatible.
// Otherwise, it will only check that the lclVar is independently promoted
// (i.e. it is marked lvPromoted and not lvDoNotEnregister).
//
bool Lowering::CheckMultiRegLclVar(GenTreeLclVar* lclNode, const ReturnTypeDesc* retTypeDesc)
{
bool canEnregister = false;
#if FEATURE_MULTIREG_RET
LclVarDsc* varDsc = comp->lvaGetDesc(lclNode->GetLclNum());
if ((comp->lvaEnregMultiRegVars) && varDsc->lvPromoted)
{
// We can enregister if we have a promoted struct and all the fields' types match the ABI requirements.
// Note that we don't promote structs with explicit layout, so we don't need to check field offsets, and
// if we have multiple types packed into a single register, we won't have matching reg and field counts,
// so we can tolerate mismatches of integer size.
if (varDsc->lvPromoted && (comp->lvaGetPromotionType(varDsc) == Compiler::PROMOTION_TYPE_INDEPENDENT))
{
// If we have no retTypeDesc, we only care that it is independently promoted.
if (retTypeDesc == nullptr)
{
canEnregister = true;
}
else
{
unsigned regCount = retTypeDesc->GetReturnRegCount();
if (regCount == varDsc->lvFieldCnt)
{
canEnregister = true;
}
}
}
}
#ifdef TARGET_XARCH
// For local stores on XARCH we only handle mismatched src/dest register count for
// calls of SIMD type. If the source was another lclVar similarly promoted, we would
// have broken it into multiple stores.
if (lclNode->OperIs(GT_STORE_LCL_VAR) && !lclNode->gtGetOp1()->OperIs(GT_CALL))
{
canEnregister = false;
}
#endif // TARGET_XARCH
if (canEnregister)
{
lclNode->SetMultiReg();
}
else
{
lclNode->ClearMultiReg();
if (varDsc->lvPromoted && !varDsc->lvDoNotEnregister)
{
comp->lvaSetVarDoNotEnregister(lclNode->GetLclNum() DEBUGARG(DoNotEnregisterReason::BlockOp));
}
}
#endif
return canEnregister;
}
//------------------------------------------------------------------------
// Containment Analysis
//------------------------------------------------------------------------
void Lowering::ContainCheckNode(GenTree* node)
{
switch (node->gtOper)
{
case GT_STORE_LCL_VAR:
case GT_STORE_LCL_FLD:
ContainCheckStoreLoc(node->AsLclVarCommon());
break;
case GT_EQ:
case GT_NE:
case GT_LT:
case GT_LE:
case GT_GE:
case GT_GT:
case GT_TEST_EQ:
case GT_TEST_NE:
case GT_CMP:
case GT_JCMP:
ContainCheckCompare(node->AsOp());
break;
case GT_JTRUE:
ContainCheckJTrue(node->AsOp());
break;
case GT_ADD:
case GT_SUB:
#if !defined(TARGET_64BIT)
case GT_ADD_LO:
case GT_ADD_HI:
case GT_SUB_LO:
case GT_SUB_HI:
#endif
case GT_AND:
case GT_OR:
case GT_XOR:
ContainCheckBinary(node->AsOp());
break;
#if defined(TARGET_X86)
case GT_MUL_LONG:
#endif
case GT_MUL:
case GT_MULHI:
ContainCheckMul(node->AsOp());
break;
case GT_DIV:
case GT_MOD:
case GT_UDIV:
case GT_UMOD:
ContainCheckDivOrMod(node->AsOp());
break;
case GT_LSH:
case GT_RSH:
case GT_RSZ:
case GT_ROL:
case GT_ROR:
#ifndef TARGET_64BIT
case GT_LSH_HI:
case GT_RSH_LO:
#endif
ContainCheckShiftRotate(node->AsOp());
break;
case GT_ARR_OFFSET:
ContainCheckArrOffset(node->AsArrOffs());
break;
case GT_LCLHEAP:
ContainCheckLclHeap(node->AsOp());
break;
case GT_RETURN:
ContainCheckRet(node->AsOp());
break;
case GT_RETURNTRAP:
ContainCheckReturnTrap(node->AsOp());
break;
case GT_STOREIND:
ContainCheckStoreIndir(node->AsStoreInd());
break;
case GT_IND:
ContainCheckIndir(node->AsIndir());
break;
case GT_PUTARG_REG:
case GT_PUTARG_STK:
#if FEATURE_ARG_SPLIT
case GT_PUTARG_SPLIT:
#endif // FEATURE_ARG_SPLIT
// The regNum must have been set by the lowering of the call.
assert(node->GetRegNum() != REG_NA);
break;
#ifdef TARGET_XARCH
case GT_INTRINSIC:
ContainCheckIntrinsic(node->AsOp());
break;
#endif // TARGET_XARCH
#ifdef FEATURE_SIMD
case GT_SIMD:
ContainCheckSIMD(node->AsSIMD());
break;
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
ContainCheckHWIntrinsic(node->AsHWIntrinsic());
break;
#endif // FEATURE_HW_INTRINSICS
default:
break;
}
}
//------------------------------------------------------------------------
// ContainCheckReturnTrap: determine whether the source of a RETURNTRAP should be contained.
//
// Arguments:
// node - pointer to the GT_RETURNTRAP node
//
void Lowering::ContainCheckReturnTrap(GenTreeOp* node)
{
#ifdef TARGET_XARCH
assert(node->OperIs(GT_RETURNTRAP));
// This just turns into a compare of its child with an int + a conditional call
if (node->gtOp1->isIndir())
{
MakeSrcContained(node, node->gtOp1);
}
#endif // TARGET_XARCH
}
//------------------------------------------------------------------------
// ContainCheckArrOffset: determine whether the source of an ARR_OFFSET should be contained.
//
// Arguments:
// node - pointer to the GT_ARR_OFFSET node
//
void Lowering::ContainCheckArrOffset(GenTreeArrOffs* node)
{
assert(node->OperIs(GT_ARR_OFFSET));
// we don't want to generate code for this
if (node->gtOffset->IsIntegralConst(0))
{
MakeSrcContained(node, node->AsArrOffs()->gtOffset);
}
}
//------------------------------------------------------------------------
// ContainCheckLclHeap: determine whether the source of a GT_LCLHEAP node should be contained.
//
// Arguments:
// node - pointer to the node
//
void Lowering::ContainCheckLclHeap(GenTreeOp* node)
{
assert(node->OperIs(GT_LCLHEAP));
GenTree* size = node->AsOp()->gtOp1;
if (size->IsCnsIntOrI())
{
MakeSrcContained(node, size);
}
}
//------------------------------------------------------------------------
// ContainCheckRet: determine whether the source of a node should be contained.
//
// Arguments:
// node - pointer to the node
//
void Lowering::ContainCheckRet(GenTreeUnOp* ret)
{
assert(ret->OperIs(GT_RETURN));
#if !defined(TARGET_64BIT)
if (ret->TypeGet() == TYP_LONG)
{
GenTree* op1 = ret->gtGetOp1();
noway_assert(op1->OperGet() == GT_LONG);
MakeSrcContained(ret, op1);
}
#endif // !defined(TARGET_64BIT)
#if FEATURE_MULTIREG_RET
if (ret->TypeIs(TYP_STRUCT))
{
GenTree* op1 = ret->gtGetOp1();
// op1 must be either a lclvar or a multi-reg returning call
if (op1->OperGet() == GT_LCL_VAR)
{
const LclVarDsc* varDsc = comp->lvaGetDesc(op1->AsLclVarCommon());
// This must be a multi-reg return or an HFA of a single element.
assert(varDsc->lvIsMultiRegRet || (varDsc->lvIsHfa() && varTypeIsValidHfaType(varDsc->lvType)));
// Mark var as contained if not enregisterable.
if (!varDsc->IsEnregisterableLcl())
{
if (!op1->IsMultiRegLclVar())
{
MakeSrcContained(ret, op1);
}
}
}
}
#endif // FEATURE_MULTIREG_RET
}
//------------------------------------------------------------------------
// ContainCheckJTrue: determine whether the source of a JTRUE should be contained.
//
// Arguments:
// node - pointer to the node
//
void Lowering::ContainCheckJTrue(GenTreeOp* node)
{
// The compare does not need to be generated into a register.
GenTree* cmp = node->gtGetOp1();
cmp->gtType = TYP_VOID;
cmp->gtFlags |= GTF_SET_FLAGS;
}
//------------------------------------------------------------------------
// ContainCheckBitCast: determine whether the source of a BITCAST should be contained.
//
// Arguments:
// node - pointer to the node
//
void Lowering::ContainCheckBitCast(GenTree* node)
{
GenTree* const op1 = node->AsOp()->gtOp1;
if (op1->isMemoryOp())
{
op1->SetContained();
}
else if (op1->OperIs(GT_LCL_VAR))
{
if (!m_lsra->willEnregisterLocalVars())
{
op1->SetContained();
}
const LclVarDsc* varDsc = comp->lvaGetDesc(op1->AsLclVar());
// TODO-Cleanup: we want to check if the local is already known not
// to be on reg, for example, because local enreg is disabled.
if (varDsc->lvDoNotEnregister)
{
op1->SetContained();
}
else
{
op1->SetRegOptional();
}
}
else if (op1->IsLocal())
{
op1->SetContained();
}
}
//------------------------------------------------------------------------
// LowerStoreIndirCommon: a common logic to lower StoreIndir.
//
// Arguments:
// ind - the store indirection node we are lowering.
//
void Lowering::LowerStoreIndirCommon(GenTreeStoreInd* ind)
{
assert(ind->TypeGet() != TYP_STRUCT);
#if defined(TARGET_ARM64)
// Verify containment safety before creating an LEA that must be contained.
//
const bool isContainable = IsSafeToContainMem(ind, ind->Addr());
#else
const bool isContainable = true;
#endif
TryCreateAddrMode(ind->Addr(), isContainable, ind);
if (!comp->codeGen->gcInfo.gcIsWriteBarrierStoreIndNode(ind))
{
if (varTypeIsFloating(ind) && ind->Data()->IsCnsFltOrDbl())
{
// Optimize *x = DCON to *x = ICON which can be slightly faster and/or smaller.
GenTree* data = ind->Data();
double dblCns = data->AsDblCon()->gtDconVal;
ssize_t intCns = 0;
var_types type = TYP_UNKNOWN;
// XARCH: we can always contain the immediates.
// ARM64: zero can always be contained, other cases will use immediates from the data
// section and it is not a clear win to switch them to inline integers.
// ARM: FP constants are assembled from integral ones, so it is always profitable
// to directly use the integers as it avoids the int -> float conversion.
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_XARCH) || defined(TARGET_ARM)
bool shouldSwitchToInteger = true;
#else // TARGET_ARM64
bool shouldSwitchToInteger = !data->IsCnsNonZeroFltOrDbl();
#endif
if (shouldSwitchToInteger)
{
if (ind->TypeIs(TYP_FLOAT))
{
float fltCns = static_cast<float>(dblCns); // should be a safe round-trip
intCns = static_cast<ssize_t>(*reinterpret_cast<INT32*>(&fltCns));
type = TYP_INT;
}
#ifdef TARGET_64BIT
else
{
assert(ind->TypeIs(TYP_DOUBLE));
intCns = static_cast<ssize_t>(*reinterpret_cast<INT64*>(&dblCns));
type = TYP_LONG;
}
#endif
}
if (type != TYP_UNKNOWN)
{
data->BashToConst(intCns, type);
ind->ChangeType(type);
}
}
LowerStoreIndir(ind);
}
}
//------------------------------------------------------------------------
// LowerIndir: a common logic to lower IND load or NullCheck.
//
// Arguments:
// ind - the ind node we are lowering.
//
void Lowering::LowerIndir(GenTreeIndir* ind)
{
assert(ind->OperIs(GT_IND, GT_NULLCHECK));
// Process struct typed indirs separately unless they are unused;
// they only appear as the source of a block copy operation or a return node.
if (!ind->TypeIs(TYP_STRUCT) || ind->IsUnusedValue())
{
// TODO-Cleanup: We're passing isContainable = true but ContainCheckIndir rejects
// address containment in some cases so we end up creating trivial (reg + offfset)
// or (reg + reg) LEAs that are not necessary.
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_ARM64)
// Verify containment safety before creating an LEA that must be contained.
//
const bool isContainable = IsSafeToContainMem(ind, ind->Addr());
#else
const bool isContainable = true;
#endif
TryCreateAddrMode(ind->Addr(), isContainable, ind);
ContainCheckIndir(ind);
if (ind->OperIs(GT_NULLCHECK) || ind->IsUnusedValue())
{
TransformUnusedIndirection(ind, comp, m_block);
}
}
else
{
// If the `ADDR` node under `STORE_OBJ(dstAddr, IND(struct(ADDR))`
// is a complex one it could benefit from an `LEA` that is not contained.
const bool isContainable = false;
TryCreateAddrMode(ind->Addr(), isContainable, ind);
}
}
//------------------------------------------------------------------------
// TransformUnusedIndirection: change the opcode and the type of the unused indirection.
//
// Arguments:
// ind - Indirection to transform.
// comp - Compiler instance.
// block - Basic block of the indirection.
//
void Lowering::TransformUnusedIndirection(GenTreeIndir* ind, Compiler* comp, BasicBlock* block)
{
// A nullcheck is essentially the same as an indirection with no use.
// The difference lies in whether a target register must be allocated.
// On XARCH we can generate a compare with no target register as long as the address
// is not contained.
// On ARM64 we can generate a load to REG_ZR in all cases.
// However, on ARM we must always generate a load to a register.
// In the case where we require a target register, it is better to use GT_IND, since
// GT_NULLCHECK is a non-value node and would therefore require an internal register
// to use as the target. That is non-optimal because it will be modeled as conflicting
// with the source register(s).
// So, to summarize:
// - On ARM64, always use GT_NULLCHECK for a dead indirection.
// - On ARM, always use GT_IND.
// - On XARCH, use GT_IND if we have a contained address, and GT_NULLCHECK otherwise.
// In all cases we try to preserve the original type and never make it wider to avoid AVEs.
// For structs we conservatively lower it to BYTE. For 8-byte primitives we lower it to TYP_INT
// on XARCH as an optimization.
//
assert(ind->OperIs(GT_NULLCHECK, GT_IND, GT_BLK, GT_OBJ));
ind->ChangeType(comp->gtTypeForNullCheck(ind));
#ifdef TARGET_ARM64
bool useNullCheck = true;
#elif TARGET_ARM
bool useNullCheck = false;
#else // TARGET_XARCH
bool useNullCheck = !ind->Addr()->isContained();
#endif // !TARGET_XARCH
if (useNullCheck && !ind->OperIs(GT_NULLCHECK))
{
comp->gtChangeOperToNullCheck(ind, block);
ind->ClearUnusedValue();
}
else if (!useNullCheck && !ind->OperIs(GT_IND))
{
ind->ChangeOper(GT_IND);
ind->SetUnusedValue();
}
}
//------------------------------------------------------------------------
// LowerBlockStoreCommon: a common logic to lower STORE_OBJ/BLK/DYN_BLK.
//
// Arguments:
// blkNode - the store blk/obj node we are lowering.
//
void Lowering::LowerBlockStoreCommon(GenTreeBlk* blkNode)
{
assert(blkNode->OperIs(GT_STORE_BLK, GT_STORE_DYN_BLK, GT_STORE_OBJ));
// Lose the type information stored in the source - we no longer need it.
if (blkNode->Data()->OperIs(GT_OBJ, GT_BLK))
{
blkNode->Data()->SetOper(GT_IND);
LowerIndir(blkNode->Data()->AsIndir());
}
if (TryTransformStoreObjAsStoreInd(blkNode))
{
return;
}
LowerBlockStore(blkNode);
}
//------------------------------------------------------------------------
// TryTransformStoreObjAsStoreInd: try to replace STORE_OBJ/BLK as STOREIND.
//
// Arguments:
// blkNode - the store node.
//
// Return value:
// true if the replacement was made, false otherwise.
//
// Notes:
// TODO-CQ: this method should do the transformation when possible
// and STOREIND should always generate better or the same code as
// STORE_OBJ/BLK for the same copy.
//
bool Lowering::TryTransformStoreObjAsStoreInd(GenTreeBlk* blkNode)
{
assert(blkNode->OperIs(GT_STORE_BLK, GT_STORE_DYN_BLK, GT_STORE_OBJ));
if (!comp->opts.OptimizationEnabled())
{
return false;
}
if (blkNode->OperIs(GT_STORE_DYN_BLK))
{
return false;
}
ClassLayout* layout = blkNode->GetLayout();
if (layout == nullptr)
{
return false;
}
var_types regType = layout->GetRegisterType();
if (regType == TYP_UNDEF)
{
return false;
}
GenTree* src = blkNode->Data();
if (varTypeIsSIMD(regType) && src->IsConstInitVal())
{
// TODO-CQ: support STORE_IND SIMD16(SIMD16, CNT_INT 0).
return false;
}
if (varTypeIsGC(regType))
{
// TODO-CQ: STOREIND does not try to contain src if we need a barrier,
// STORE_OBJ generates better code currently.
return false;
}
if (src->OperIsInitVal() && !src->IsConstInitVal())
{
return false;
}
if (varTypeIsSmall(regType) && !src->IsConstInitVal() && !src->IsLocal())
{
// source operand INDIR will use a widening instruction
// and generate worse code, like `movzx` instead of `mov`
// on x64.
return false;
}
JITDUMP("Replacing STORE_OBJ with STOREIND for [%06u]\n", blkNode->gtTreeID);
blkNode->ChangeOper(GT_STOREIND);
blkNode->ChangeType(regType);
if ((blkNode->gtFlags & GTF_IND_TGT_NOT_HEAP) == 0)
{
blkNode->gtFlags |= GTF_IND_TGTANYWHERE;
}
if (varTypeIsStruct(src))
{
src->ChangeType(regType);
LowerNode(blkNode->Data());
}
else if (src->OperIsInitVal())
{
GenTreeUnOp* initVal = src->AsUnOp();
src = src->gtGetOp1();
assert(src->IsCnsIntOrI());
src->AsIntCon()->FixupInitBlkValue(regType);
blkNode->SetData(src);
BlockRange().Remove(initVal);
}
else
{
assert(src->TypeIs(regType) || src->IsCnsIntOrI() || src->IsCall());
}
LowerStoreIndirCommon(blkNode->AsStoreInd());
return true;
}
#ifdef FEATURE_SIMD
//----------------------------------------------------------------------------------------------
// Lowering::LowerSIMD: Perform containment analysis for a SIMD intrinsic node.
//
// Arguments:
// simdNode - The SIMD intrinsic node.
//
void Lowering::LowerSIMD(GenTreeSIMD* simdNode)
{
if (simdNode->TypeGet() == TYP_SIMD12)
{
// GT_SIMD node requiring to produce TYP_SIMD12 in fact
// produces a TYP_SIMD16 result
simdNode->gtType = TYP_SIMD16;
}
if (simdNode->GetSIMDIntrinsicId() == SIMDIntrinsicInitN)
{
assert(simdNode->GetSimdBaseType() == TYP_FLOAT);
size_t argCount = simdNode->GetOperandCount();
size_t constArgCount = 0;
float constArgValues[4]{0, 0, 0, 0};
for (GenTree* arg : simdNode->Operands())
{
assert(arg->TypeIs(simdNode->GetSimdBaseType()));
if (arg->IsCnsFltOrDbl())
{
constArgValues[constArgCount] = static_cast<float>(arg->AsDblCon()->gtDconVal);
constArgCount++;
}
}
if (constArgCount == argCount)
{
for (GenTree* arg : simdNode->Operands())
{
BlockRange().Remove(arg);
}
assert(sizeof(constArgValues) == 16);
unsigned cnsSize = sizeof(constArgValues);
unsigned cnsAlign = (comp->compCodeOpt() != Compiler::SMALL_CODE) ? cnsSize : 1;
CORINFO_FIELD_HANDLE hnd =
comp->GetEmitter()->emitBlkConst(constArgValues, cnsSize, cnsAlign, simdNode->GetSimdBaseType());
GenTree* clsVarAddr = new (comp, GT_CLS_VAR_ADDR) GenTreeClsVar(GT_CLS_VAR_ADDR, TYP_I_IMPL, hnd, nullptr);
BlockRange().InsertBefore(simdNode, clsVarAddr);
simdNode->ChangeOper(GT_IND);
simdNode->AsOp()->gtOp1 = clsVarAddr;
ContainCheckIndir(simdNode->AsIndir());
return;
}
}
ContainCheckSIMD(simdNode);
}
#endif // FEATURE_SIMD
| 1 |
dotnet/runtime
| 66,407 |
ARM64 - Optimizing a % b operations part 2
|
Addressing part of this issue: https://github.com/dotnet/runtime/issues/34937
**Description**
There are various ways to optimize `%` for integers on ARM64.
`a % b` can be transformed into a specific sequence of instructions for ARM64 if the operation is signed and `b` is a constant with the power of 2.
**Acceptance Criteria**
- [x] Merge https://github.com/dotnet/runtime/pull/65535
- [x] Add Tests
ARM64 diffs based on tests
```diff
- asr w1, w0, #31
- and w1, w1, #15
- add w1, w1, w0
- asr w1, w1, #4
- lsl w1, w1, #4
- sub w0, w0, w1
- ;; bbWeight=1 PerfScore 4.50
+ and w1, w0, #15
+ negs w0, w0
+ and w0, w0, #15
+ csneg w0, w1, w0, mi
+ ;; bbWeight=1 PerfScore 2.00
```
|
TIHan
| 2022-03-09T20:33:02Z | 2022-04-13T20:01:26Z |
3b6a539c7219383ec6181a112465f904fe9e2edb
|
a81f4bc9d53dedefe03f49da53ed198abbf9c467
|
ARM64 - Optimizing a % b operations part 2. Addressing part of this issue: https://github.com/dotnet/runtime/issues/34937
**Description**
There are various ways to optimize `%` for integers on ARM64.
`a % b` can be transformed into a specific sequence of instructions for ARM64 if the operation is signed and `b` is a constant with the power of 2.
**Acceptance Criteria**
- [x] Merge https://github.com/dotnet/runtime/pull/65535
- [x] Add Tests
ARM64 diffs based on tests
```diff
- asr w1, w0, #31
- and w1, w1, #15
- add w1, w1, w0
- asr w1, w1, #4
- lsl w1, w1, #4
- sub w0, w0, w1
- ;; bbWeight=1 PerfScore 4.50
+ and w1, w0, #15
+ negs w0, w0
+ and w0, w0, #15
+ csneg w0, w1, w0, mi
+ ;; bbWeight=1 PerfScore 2.00
```
|
./src/coreclr/jit/lower.h
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Lower XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#ifndef _LOWER_H_
#define _LOWER_H_
#include "compiler.h"
#include "phase.h"
#include "lsra.h"
#include "sideeffects.h"
class Lowering final : public Phase
{
public:
inline Lowering(Compiler* compiler, LinearScanInterface* lsra)
: Phase(compiler, PHASE_LOWERING), vtableCallTemp(BAD_VAR_NUM)
{
m_lsra = (LinearScan*)lsra;
assert(m_lsra);
}
virtual PhaseStatus DoPhase() override;
// This variant of LowerRange is called from outside of the main Lowering pass,
// so it creates its own instance of Lowering to do so.
void LowerRange(BasicBlock* block, LIR::ReadOnlyRange& range)
{
Lowering lowerer(comp, m_lsra);
lowerer.m_block = block;
lowerer.LowerRange(range);
}
private:
// LowerRange handles new code that is introduced by or after Lowering.
void LowerRange(LIR::ReadOnlyRange& range)
{
for (GenTree* newNode : range)
{
LowerNode(newNode);
}
}
void LowerRange(GenTree* firstNode, GenTree* lastNode)
{
LIR::ReadOnlyRange range(firstNode, lastNode);
LowerRange(range);
}
// ContainCheckRange handles new code that is introduced by or after Lowering,
// and that is known to be already in Lowered form.
void ContainCheckRange(LIR::ReadOnlyRange& range)
{
for (GenTree* newNode : range)
{
ContainCheckNode(newNode);
}
}
void ContainCheckRange(GenTree* firstNode, GenTree* lastNode)
{
LIR::ReadOnlyRange range(firstNode, lastNode);
ContainCheckRange(range);
}
void InsertTreeBeforeAndContainCheck(GenTree* insertionPoint, GenTree* tree)
{
LIR::Range range = LIR::SeqTree(comp, tree);
ContainCheckRange(range);
BlockRange().InsertBefore(insertionPoint, std::move(range));
}
void ContainCheckNode(GenTree* node);
void ContainCheckDivOrMod(GenTreeOp* node);
void ContainCheckReturnTrap(GenTreeOp* node);
void ContainCheckArrOffset(GenTreeArrOffs* node);
void ContainCheckLclHeap(GenTreeOp* node);
void ContainCheckRet(GenTreeUnOp* ret);
void ContainCheckJTrue(GenTreeOp* node);
void ContainCheckBitCast(GenTree* node);
void ContainCheckCallOperands(GenTreeCall* call);
void ContainCheckIndir(GenTreeIndir* indirNode);
void ContainCheckStoreIndir(GenTreeStoreInd* indirNode);
void ContainCheckMul(GenTreeOp* node);
void ContainCheckShiftRotate(GenTreeOp* node);
void ContainCheckStoreLoc(GenTreeLclVarCommon* storeLoc) const;
void ContainCheckCast(GenTreeCast* node);
void ContainCheckCompare(GenTreeOp* node);
void ContainCheckBinary(GenTreeOp* node);
void ContainCheckBoundsChk(GenTreeBoundsChk* node);
#ifdef TARGET_XARCH
void ContainCheckFloatBinary(GenTreeOp* node);
void ContainCheckIntrinsic(GenTreeOp* node);
#endif // TARGET_XARCH
#ifdef FEATURE_SIMD
void ContainCheckSIMD(GenTreeSIMD* simdNode);
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
void ContainCheckHWIntrinsicAddr(GenTreeHWIntrinsic* node, GenTree* addr);
void ContainCheckHWIntrinsic(GenTreeHWIntrinsic* node);
#endif // FEATURE_HW_INTRINSICS
#ifdef DEBUG
static void CheckCallArg(GenTree* arg);
static void CheckCall(GenTreeCall* call);
static void CheckNode(Compiler* compiler, GenTree* node);
static bool CheckBlock(Compiler* compiler, BasicBlock* block);
#endif // DEBUG
void LowerBlock(BasicBlock* block);
GenTree* LowerNode(GenTree* node);
bool IsInvariantInRange(GenTree* node, GenTree* endExclusive);
// ------------------------------
// Call Lowering
// ------------------------------
void LowerCall(GenTree* call);
void LowerCFGCall(GenTreeCall* call);
void MoveCFGCallArg(GenTreeCall* call, GenTree* node);
#ifndef TARGET_64BIT
GenTree* DecomposeLongCompare(GenTree* cmp);
#endif
GenTree* OptimizeConstCompare(GenTree* cmp);
GenTree* LowerCompare(GenTree* cmp);
GenTree* LowerJTrue(GenTreeOp* jtrue);
GenTreeCC* LowerNodeCC(GenTree* node, GenCondition condition);
void LowerJmpMethod(GenTree* jmp);
void LowerRet(GenTreeUnOp* ret);
void LowerStoreLocCommon(GenTreeLclVarCommon* lclVar);
void LowerRetStruct(GenTreeUnOp* ret);
void LowerRetSingleRegStructLclVar(GenTreeUnOp* ret);
void LowerCallStruct(GenTreeCall* call);
void LowerStoreSingleRegCallStruct(GenTreeBlk* store);
#if !defined(WINDOWS_AMD64_ABI)
GenTreeLclVar* SpillStructCallResult(GenTreeCall* call) const;
#endif // WINDOWS_AMD64_ABI
GenTree* LowerDelegateInvoke(GenTreeCall* call);
GenTree* LowerIndirectNonvirtCall(GenTreeCall* call);
GenTree* LowerDirectCall(GenTreeCall* call);
GenTree* LowerNonvirtPinvokeCall(GenTreeCall* call);
GenTree* LowerTailCallViaJitHelper(GenTreeCall* callNode, GenTree* callTarget);
void LowerFastTailCall(GenTreeCall* callNode);
void RehomeArgForFastTailCall(unsigned int lclNum,
GenTree* insertTempBefore,
GenTree* lookForUsesStart,
GenTreeCall* callNode);
void InsertProfTailCallHook(GenTreeCall* callNode, GenTree* insertionPoint);
GenTree* LowerVirtualVtableCall(GenTreeCall* call);
GenTree* LowerVirtualStubCall(GenTreeCall* call);
void LowerArgsForCall(GenTreeCall* call);
void ReplaceArgWithPutArgOrBitcast(GenTree** ppChild, GenTree* newNode);
GenTree* NewPutArg(GenTreeCall* call, GenTree* arg, fgArgTabEntry* info, var_types type);
void LowerArg(GenTreeCall* call, GenTree** ppTree);
#if defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64)
GenTree* LowerFloatArg(GenTree** pArg, fgArgTabEntry* info);
GenTree* LowerFloatArgReg(GenTree* arg, regNumber regNum);
#endif
void InsertPInvokeCallProlog(GenTreeCall* call);
void InsertPInvokeCallEpilog(GenTreeCall* call);
void InsertPInvokeMethodProlog();
void InsertPInvokeMethodEpilog(BasicBlock* returnBB DEBUGARG(GenTree* lastExpr));
GenTree* SetGCState(int cns);
GenTree* CreateReturnTrapSeq();
enum FrameLinkAction
{
PushFrame,
PopFrame
};
GenTree* CreateFrameLinkUpdate(FrameLinkAction);
GenTree* AddrGen(ssize_t addr);
GenTree* AddrGen(void* addr);
GenTree* Ind(GenTree* tree, var_types type = TYP_I_IMPL)
{
return comp->gtNewOperNode(GT_IND, type, tree);
}
GenTree* PhysReg(regNumber reg, var_types type = TYP_I_IMPL)
{
return comp->gtNewPhysRegNode(reg, type);
}
GenTree* ThisReg(GenTreeCall* call)
{
return PhysReg(comp->codeGen->genGetThisArgReg(call), TYP_REF);
}
GenTree* Offset(GenTree* base, unsigned offset)
{
var_types resultType = (base->TypeGet() == TYP_REF) ? TYP_BYREF : base->TypeGet();
return new (comp, GT_LEA) GenTreeAddrMode(resultType, base, nullptr, 0, offset);
}
GenTree* OffsetByIndex(GenTree* base, GenTree* index)
{
var_types resultType = (base->TypeGet() == TYP_REF) ? TYP_BYREF : base->TypeGet();
return new (comp, GT_LEA) GenTreeAddrMode(resultType, base, index, 0, 0);
}
GenTree* OffsetByIndexWithScale(GenTree* base, GenTree* index, unsigned scale)
{
var_types resultType = (base->TypeGet() == TYP_REF) ? TYP_BYREF : base->TypeGet();
return new (comp, GT_LEA) GenTreeAddrMode(resultType, base, index, scale, 0);
}
// Replace the definition of the given use with a lclVar, allocating a new temp
// if 'tempNum' is BAD_VAR_NUM. Returns the LclVar node.
GenTreeLclVar* ReplaceWithLclVar(LIR::Use& use, unsigned tempNum = BAD_VAR_NUM)
{
GenTree* oldUseNode = use.Def();
if ((oldUseNode->gtOper != GT_LCL_VAR) || (tempNum != BAD_VAR_NUM))
{
GenTree* assign;
use.ReplaceWithLclVar(comp, tempNum, &assign);
GenTree* newUseNode = use.Def();
ContainCheckRange(oldUseNode->gtNext, newUseNode);
// We need to lower the LclVar and assignment since there may be certain
// types or scenarios, such as TYP_SIMD12, that need special handling
LowerNode(assign);
LowerNode(newUseNode);
return newUseNode->AsLclVar();
}
return oldUseNode->AsLclVar();
}
// return true if this call target is within range of a pc-rel call on the machine
bool IsCallTargetInRange(void* addr);
#if defined(TARGET_XARCH)
GenTree* PreferredRegOptionalOperand(GenTree* tree);
// ------------------------------------------------------------------
// SetRegOptionalBinOp - Indicates which of the operands of a bin-op
// register requirement is optional. Xarch instruction set allows
// either of op1 or op2 of binary operation (e.g. add, mul etc) to be
// a memory operand. This routine provides info to register allocator
// which of its operands optionally require a register. Lsra might not
// allocate a register to RefTypeUse positions of such operands if it
// is beneficial. In such a case codegen will treat them as memory
// operands.
//
// Arguments:
// tree - Gentree of a binary operation.
// isSafeToMarkOp1 True if it's safe to mark op1 as register optional
// isSafeToMarkOp2 True if it's safe to mark op2 as register optional
//
// Returns
// The caller is expected to get isSafeToMarkOp1 and isSafeToMarkOp2
// by calling IsSafeToContainMem.
//
// Note: On xarch at most only one of the operands will be marked as
// reg optional, even when both operands could be considered register
// optional.
void SetRegOptionalForBinOp(GenTree* tree, bool isSafeToMarkOp1, bool isSafeToMarkOp2)
{
assert(GenTree::OperIsBinary(tree->OperGet()));
GenTree* const op1 = tree->gtGetOp1();
GenTree* const op2 = tree->gtGetOp2();
const unsigned operatorSize = genTypeSize(tree->TypeGet());
const bool op1Legal =
isSafeToMarkOp1 && tree->OperIsCommutative() && (operatorSize == genTypeSize(op1->TypeGet()));
const bool op2Legal = isSafeToMarkOp2 && (operatorSize == genTypeSize(op2->TypeGet()));
GenTree* regOptionalOperand = nullptr;
if (op1Legal)
{
regOptionalOperand = op2Legal ? PreferredRegOptionalOperand(tree) : op1;
}
else if (op2Legal)
{
regOptionalOperand = op2;
}
if (regOptionalOperand != nullptr)
{
regOptionalOperand->SetRegOptional();
}
}
#endif // defined(TARGET_XARCH)
// Per tree node member functions
void LowerStoreIndirCommon(GenTreeStoreInd* ind);
void LowerIndir(GenTreeIndir* ind);
void LowerStoreIndir(GenTreeStoreInd* node);
GenTree* LowerAdd(GenTreeOp* node);
GenTree* LowerMul(GenTreeOp* mul);
GenTree* LowerBinaryArithmetic(GenTreeOp* binOp);
bool LowerUnsignedDivOrMod(GenTreeOp* divMod);
GenTree* LowerConstIntDivOrMod(GenTree* node);
GenTree* LowerSignedDivOrMod(GenTree* node);
void LowerBlockStore(GenTreeBlk* blkNode);
void LowerBlockStoreCommon(GenTreeBlk* blkNode);
void ContainBlockStoreAddress(GenTreeBlk* blkNode, unsigned size, GenTree* addr);
void LowerPutArgStk(GenTreePutArgStk* tree);
bool TryCreateAddrMode(GenTree* addr, bool isContainable, GenTree* parent);
bool TryTransformStoreObjAsStoreInd(GenTreeBlk* blkNode);
GenTree* LowerSwitch(GenTree* node);
bool TryLowerSwitchToBitTest(
BasicBlock* jumpTable[], unsigned jumpCount, unsigned targetCount, BasicBlock* bbSwitch, GenTree* switchValue);
void LowerCast(GenTree* node);
#if !CPU_LOAD_STORE_ARCH
bool IsRMWIndirCandidate(GenTree* operand, GenTree* storeInd);
bool IsBinOpInRMWStoreInd(GenTree* tree);
bool IsRMWMemOpRootedAtStoreInd(GenTree* storeIndTree, GenTree** indirCandidate, GenTree** indirOpSource);
bool LowerRMWMemOp(GenTreeIndir* storeInd);
#endif
void WidenSIMD12IfNecessary(GenTreeLclVarCommon* node);
bool CheckMultiRegLclVar(GenTreeLclVar* lclNode, const ReturnTypeDesc* retTypeDesc);
void LowerStoreLoc(GenTreeLclVarCommon* tree);
GenTree* LowerArrElem(GenTree* node);
void LowerRotate(GenTree* tree);
void LowerShift(GenTreeOp* shift);
#ifdef FEATURE_SIMD
void LowerSIMD(GenTreeSIMD* simdNode);
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
void LowerHWIntrinsic(GenTreeHWIntrinsic* node);
void LowerHWIntrinsicCC(GenTreeHWIntrinsic* node, NamedIntrinsic newIntrinsicId, GenCondition condition);
void LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cmpOp);
void LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node);
void LowerHWIntrinsicDot(GenTreeHWIntrinsic* node);
#if defined(TARGET_XARCH)
void LowerFusedMultiplyAdd(GenTreeHWIntrinsic* node);
void LowerHWIntrinsicToScalar(GenTreeHWIntrinsic* node);
void LowerHWIntrinsicGetElement(GenTreeHWIntrinsic* node);
void LowerHWIntrinsicWithElement(GenTreeHWIntrinsic* node);
GenTree* TryLowerAndOpToResetLowestSetBit(GenTreeOp* andNode);
GenTree* TryLowerAndOpToExtractLowestSetBit(GenTreeOp* andNode);
GenTree* TryLowerAndOpToAndNot(GenTreeOp* andNode);
#elif defined(TARGET_ARM64)
bool IsValidConstForMovImm(GenTreeHWIntrinsic* node);
void LowerHWIntrinsicFusedMultiplyAddScalar(GenTreeHWIntrinsic* node);
#endif // !TARGET_XARCH && !TARGET_ARM64
union VectorConstant {
int8_t i8[32];
uint8_t u8[32];
int16_t i16[16];
uint16_t u16[16];
int32_t i32[8];
uint32_t u32[8];
int64_t i64[4];
uint64_t u64[4];
float f32[8];
double f64[4];
};
//----------------------------------------------------------------------------------------------
// VectorConstantIsBroadcastedI64: Check N i64 elements in a constant vector for equality
//
// Arguments:
// vecCns - Constant vector
// count - Amount of i64 components to compare
//
// Returns:
// true if N i64 elements of the given vector are equal
static bool VectorConstantIsBroadcastedI64(VectorConstant& vecCns, int count)
{
assert(count >= 1 && count <= 4);
for (int i = 1; i < count; i++)
{
if (vecCns.i64[i] != vecCns.i64[0])
{
return false;
}
}
return true;
}
//----------------------------------------------------------------------------------------------
// ProcessArgForHWIntrinsicCreate: Processes an argument for the Lowering::LowerHWIntrinsicCreate method
//
// Arguments:
// arg - The argument to process
// argIdx - The index of the argument being processed
// vecCns - The vector constant being constructed
// baseType - The base type of the vector constant
//
// Returns:
// true if arg was a constant; otherwise, false
static bool HandleArgForHWIntrinsicCreate(GenTree* arg, int argIdx, VectorConstant& vecCns, var_types baseType)
{
switch (baseType)
{
case TYP_BYTE:
case TYP_UBYTE:
{
if (arg->IsCnsIntOrI())
{
vecCns.i8[argIdx] = static_cast<int8_t>(arg->AsIntCon()->gtIconVal);
return true;
}
else
{
// We expect the VectorConstant to have been already zeroed
assert(vecCns.i8[argIdx] == 0);
}
break;
}
case TYP_SHORT:
case TYP_USHORT:
{
if (arg->IsCnsIntOrI())
{
vecCns.i16[argIdx] = static_cast<int16_t>(arg->AsIntCon()->gtIconVal);
return true;
}
else
{
// We expect the VectorConstant to have been already zeroed
assert(vecCns.i16[argIdx] == 0);
}
break;
}
case TYP_INT:
case TYP_UINT:
{
if (arg->IsCnsIntOrI())
{
vecCns.i32[argIdx] = static_cast<int32_t>(arg->AsIntCon()->gtIconVal);
return true;
}
else
{
// We expect the VectorConstant to have been already zeroed
assert(vecCns.i32[argIdx] == 0);
}
break;
}
case TYP_LONG:
case TYP_ULONG:
{
#if defined(TARGET_64BIT)
if (arg->IsCnsIntOrI())
{
vecCns.i64[argIdx] = static_cast<int64_t>(arg->AsIntCon()->gtIconVal);
return true;
}
#else
if (arg->OperIsLong() && arg->AsOp()->gtOp1->IsCnsIntOrI() && arg->AsOp()->gtOp2->IsCnsIntOrI())
{
// 32-bit targets will decompose GT_CNS_LNG into two GT_CNS_INT
// We need to reconstruct the 64-bit value in order to handle this
INT64 gtLconVal = arg->AsOp()->gtOp2->AsIntCon()->gtIconVal;
gtLconVal <<= 32;
gtLconVal |= arg->AsOp()->gtOp1->AsIntCon()->gtIconVal;
vecCns.i64[argIdx] = gtLconVal;
return true;
}
#endif // TARGET_64BIT
else
{
// We expect the VectorConstant to have been already zeroed
assert(vecCns.i64[argIdx] == 0);
}
break;
}
case TYP_FLOAT:
{
if (arg->IsCnsFltOrDbl())
{
vecCns.f32[argIdx] = static_cast<float>(arg->AsDblCon()->gtDconVal);
return true;
}
else
{
// We expect the VectorConstant to have been already zeroed
// We check against the i32, rather than f32, to account for -0.0
assert(vecCns.i32[argIdx] == 0);
}
break;
}
case TYP_DOUBLE:
{
if (arg->IsCnsFltOrDbl())
{
vecCns.f64[argIdx] = static_cast<double>(arg->AsDblCon()->gtDconVal);
return true;
}
else
{
// We expect the VectorConstant to have been already zeroed
// We check against the i64, rather than f64, to account for -0.0
assert(vecCns.i64[argIdx] == 0);
}
break;
}
default:
{
unreached();
}
}
return false;
}
#endif // FEATURE_HW_INTRINSICS
//----------------------------------------------------------------------------------------------
// TryRemoveCastIfPresent: Removes op it is a cast operation and the size of its input is at
// least the size of expectedType
//
// Arguments:
// expectedType - The expected type of the cast operation input if it is to be removed
// op - The tree to remove if it is a cast op whose input is at least the size of expectedType
//
// Returns:
// op if it was not a cast node or if its input is not at least the size of expected type;
// Otherwise, it returns the underlying operation that was being casted
GenTree* TryRemoveCastIfPresent(var_types expectedType, GenTree* op)
{
if (!op->OperIs(GT_CAST))
{
return op;
}
GenTree* castOp = op->AsCast()->CastOp();
if (genTypeSize(castOp->gtType) >= genTypeSize(expectedType))
{
BlockRange().Remove(op);
return castOp;
}
return op;
}
// Utility functions
public:
static bool IndirsAreEquivalent(GenTree* pTreeA, GenTree* pTreeB);
// return true if 'childNode' is an immediate that can be contained
// by the 'parentNode' (i.e. folded into an instruction)
// for example small enough and non-relocatable
bool IsContainableImmed(GenTree* parentNode, GenTree* childNode) const;
// Return true if 'node' is a containable memory op.
bool IsContainableMemoryOp(GenTree* node) const
{
return m_lsra->isContainableMemoryOp(node);
}
#ifdef FEATURE_HW_INTRINSICS
// Tries to get a containable node for a given HWIntrinsic
bool TryGetContainableHWIntrinsicOp(GenTreeHWIntrinsic* containingNode,
GenTree** pNode,
bool* supportsRegOptional,
GenTreeHWIntrinsic* transparentParentNode = nullptr);
#endif // FEATURE_HW_INTRINSICS
static void TransformUnusedIndirection(GenTreeIndir* ind, Compiler* comp, BasicBlock* block);
private:
static bool NodesAreEquivalentLeaves(GenTree* candidate, GenTree* storeInd);
bool AreSourcesPossiblyModifiedLocals(GenTree* addr, GenTree* base, GenTree* index);
// Makes 'childNode' contained in the 'parentNode'
void MakeSrcContained(GenTree* parentNode, GenTree* childNode) const;
// Checks and makes 'childNode' contained in the 'parentNode'
bool CheckImmedAndMakeContained(GenTree* parentNode, GenTree* childNode);
// Checks for memory conflicts in the instructions between childNode and parentNode, and returns true if childNode
// can be contained.
bool IsSafeToContainMem(GenTree* parentNode, GenTree* childNode) const;
// Similar to above, but allows bypassing a "transparent" parent.
bool IsSafeToContainMem(GenTree* grandparentNode, GenTree* parentNode, GenTree* childNode) const;
inline LIR::Range& BlockRange() const
{
return LIR::AsRange(m_block);
}
// Any tracked lclVar accessed by a LCL_FLD or STORE_LCL_FLD should be marked doNotEnregister.
// This method checks, and asserts in the DEBUG case if it is not so marked,
// but in the non-DEBUG case (asserts disabled) set the flag so that we don't generate bad code.
// This ensures that the local's value is valid on-stack as expected for a *LCL_FLD.
void verifyLclFldDoNotEnregister(unsigned lclNum)
{
LclVarDsc* varDsc = comp->lvaGetDesc(lclNum);
// Do a couple of simple checks before setting lvDoNotEnregister.
// This may not cover all cases in 'isRegCandidate()' but we don't want to
// do an expensive check here. For non-candidates it is not harmful to set lvDoNotEnregister.
if (varDsc->lvTracked && !varDsc->lvDoNotEnregister)
{
assert(!m_lsra->isRegCandidate(varDsc));
comp->lvaSetVarDoNotEnregister(lclNum DEBUG_ARG(DoNotEnregisterReason::LocalField));
}
}
LinearScan* m_lsra;
unsigned vtableCallTemp; // local variable we use as a temp for vtable calls
mutable SideEffectSet m_scratchSideEffects; // SideEffectSet used for IsSafeToContainMem and isRMWIndirCandidate
BasicBlock* m_block;
};
#endif // _LOWER_H_
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Lower XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#ifndef _LOWER_H_
#define _LOWER_H_
#include "compiler.h"
#include "phase.h"
#include "lsra.h"
#include "sideeffects.h"
class Lowering final : public Phase
{
public:
inline Lowering(Compiler* compiler, LinearScanInterface* lsra)
: Phase(compiler, PHASE_LOWERING), vtableCallTemp(BAD_VAR_NUM)
{
m_lsra = (LinearScan*)lsra;
assert(m_lsra);
}
virtual PhaseStatus DoPhase() override;
// This variant of LowerRange is called from outside of the main Lowering pass,
// so it creates its own instance of Lowering to do so.
void LowerRange(BasicBlock* block, LIR::ReadOnlyRange& range)
{
Lowering lowerer(comp, m_lsra);
lowerer.m_block = block;
lowerer.LowerRange(range);
}
private:
// LowerRange handles new code that is introduced by or after Lowering.
void LowerRange(LIR::ReadOnlyRange& range)
{
for (GenTree* newNode : range)
{
LowerNode(newNode);
}
}
void LowerRange(GenTree* firstNode, GenTree* lastNode)
{
LIR::ReadOnlyRange range(firstNode, lastNode);
LowerRange(range);
}
// ContainCheckRange handles new code that is introduced by or after Lowering,
// and that is known to be already in Lowered form.
void ContainCheckRange(LIR::ReadOnlyRange& range)
{
for (GenTree* newNode : range)
{
ContainCheckNode(newNode);
}
}
void ContainCheckRange(GenTree* firstNode, GenTree* lastNode)
{
LIR::ReadOnlyRange range(firstNode, lastNode);
ContainCheckRange(range);
}
void InsertTreeBeforeAndContainCheck(GenTree* insertionPoint, GenTree* tree)
{
LIR::Range range = LIR::SeqTree(comp, tree);
ContainCheckRange(range);
BlockRange().InsertBefore(insertionPoint, std::move(range));
}
void ContainCheckNode(GenTree* node);
void ContainCheckDivOrMod(GenTreeOp* node);
void ContainCheckReturnTrap(GenTreeOp* node);
void ContainCheckArrOffset(GenTreeArrOffs* node);
void ContainCheckLclHeap(GenTreeOp* node);
void ContainCheckRet(GenTreeUnOp* ret);
void ContainCheckJTrue(GenTreeOp* node);
void ContainCheckBitCast(GenTree* node);
void ContainCheckCallOperands(GenTreeCall* call);
void ContainCheckIndir(GenTreeIndir* indirNode);
void ContainCheckStoreIndir(GenTreeStoreInd* indirNode);
void ContainCheckMul(GenTreeOp* node);
void ContainCheckShiftRotate(GenTreeOp* node);
void ContainCheckStoreLoc(GenTreeLclVarCommon* storeLoc) const;
void ContainCheckCast(GenTreeCast* node);
void ContainCheckCompare(GenTreeOp* node);
void ContainCheckBinary(GenTreeOp* node);
void ContainCheckBoundsChk(GenTreeBoundsChk* node);
#ifdef TARGET_XARCH
void ContainCheckFloatBinary(GenTreeOp* node);
void ContainCheckIntrinsic(GenTreeOp* node);
#endif // TARGET_XARCH
#ifdef FEATURE_SIMD
void ContainCheckSIMD(GenTreeSIMD* simdNode);
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
void ContainCheckHWIntrinsicAddr(GenTreeHWIntrinsic* node, GenTree* addr);
void ContainCheckHWIntrinsic(GenTreeHWIntrinsic* node);
#endif // FEATURE_HW_INTRINSICS
#ifdef DEBUG
static void CheckCallArg(GenTree* arg);
static void CheckCall(GenTreeCall* call);
static void CheckNode(Compiler* compiler, GenTree* node);
static bool CheckBlock(Compiler* compiler, BasicBlock* block);
#endif // DEBUG
void LowerBlock(BasicBlock* block);
GenTree* LowerNode(GenTree* node);
bool IsInvariantInRange(GenTree* node, GenTree* endExclusive);
// ------------------------------
// Call Lowering
// ------------------------------
void LowerCall(GenTree* call);
void LowerCFGCall(GenTreeCall* call);
void MoveCFGCallArg(GenTreeCall* call, GenTree* node);
#ifndef TARGET_64BIT
GenTree* DecomposeLongCompare(GenTree* cmp);
#endif
GenTree* OptimizeConstCompare(GenTree* cmp);
GenTree* LowerCompare(GenTree* cmp);
GenTree* LowerJTrue(GenTreeOp* jtrue);
GenTreeCC* LowerNodeCC(GenTree* node, GenCondition condition);
void LowerJmpMethod(GenTree* jmp);
void LowerRet(GenTreeUnOp* ret);
void LowerStoreLocCommon(GenTreeLclVarCommon* lclVar);
void LowerRetStruct(GenTreeUnOp* ret);
void LowerRetSingleRegStructLclVar(GenTreeUnOp* ret);
void LowerCallStruct(GenTreeCall* call);
void LowerStoreSingleRegCallStruct(GenTreeBlk* store);
#if !defined(WINDOWS_AMD64_ABI)
GenTreeLclVar* SpillStructCallResult(GenTreeCall* call) const;
#endif // WINDOWS_AMD64_ABI
GenTree* LowerDelegateInvoke(GenTreeCall* call);
GenTree* LowerIndirectNonvirtCall(GenTreeCall* call);
GenTree* LowerDirectCall(GenTreeCall* call);
GenTree* LowerNonvirtPinvokeCall(GenTreeCall* call);
GenTree* LowerTailCallViaJitHelper(GenTreeCall* callNode, GenTree* callTarget);
void LowerFastTailCall(GenTreeCall* callNode);
void RehomeArgForFastTailCall(unsigned int lclNum,
GenTree* insertTempBefore,
GenTree* lookForUsesStart,
GenTreeCall* callNode);
void InsertProfTailCallHook(GenTreeCall* callNode, GenTree* insertionPoint);
GenTree* LowerVirtualVtableCall(GenTreeCall* call);
GenTree* LowerVirtualStubCall(GenTreeCall* call);
void LowerArgsForCall(GenTreeCall* call);
void ReplaceArgWithPutArgOrBitcast(GenTree** ppChild, GenTree* newNode);
GenTree* NewPutArg(GenTreeCall* call, GenTree* arg, fgArgTabEntry* info, var_types type);
void LowerArg(GenTreeCall* call, GenTree** ppTree);
#if defined(TARGET_ARMARCH) || defined(TARGET_LOONGARCH64)
GenTree* LowerFloatArg(GenTree** pArg, fgArgTabEntry* info);
GenTree* LowerFloatArgReg(GenTree* arg, regNumber regNum);
#endif
void InsertPInvokeCallProlog(GenTreeCall* call);
void InsertPInvokeCallEpilog(GenTreeCall* call);
void InsertPInvokeMethodProlog();
void InsertPInvokeMethodEpilog(BasicBlock* returnBB DEBUGARG(GenTree* lastExpr));
GenTree* SetGCState(int cns);
GenTree* CreateReturnTrapSeq();
enum FrameLinkAction
{
PushFrame,
PopFrame
};
GenTree* CreateFrameLinkUpdate(FrameLinkAction);
GenTree* AddrGen(ssize_t addr);
GenTree* AddrGen(void* addr);
GenTree* Ind(GenTree* tree, var_types type = TYP_I_IMPL)
{
return comp->gtNewOperNode(GT_IND, type, tree);
}
GenTree* PhysReg(regNumber reg, var_types type = TYP_I_IMPL)
{
return comp->gtNewPhysRegNode(reg, type);
}
GenTree* ThisReg(GenTreeCall* call)
{
return PhysReg(comp->codeGen->genGetThisArgReg(call), TYP_REF);
}
GenTree* Offset(GenTree* base, unsigned offset)
{
var_types resultType = (base->TypeGet() == TYP_REF) ? TYP_BYREF : base->TypeGet();
return new (comp, GT_LEA) GenTreeAddrMode(resultType, base, nullptr, 0, offset);
}
GenTree* OffsetByIndex(GenTree* base, GenTree* index)
{
var_types resultType = (base->TypeGet() == TYP_REF) ? TYP_BYREF : base->TypeGet();
return new (comp, GT_LEA) GenTreeAddrMode(resultType, base, index, 0, 0);
}
GenTree* OffsetByIndexWithScale(GenTree* base, GenTree* index, unsigned scale)
{
var_types resultType = (base->TypeGet() == TYP_REF) ? TYP_BYREF : base->TypeGet();
return new (comp, GT_LEA) GenTreeAddrMode(resultType, base, index, scale, 0);
}
// Replace the definition of the given use with a lclVar, allocating a new temp
// if 'tempNum' is BAD_VAR_NUM. Returns the LclVar node.
GenTreeLclVar* ReplaceWithLclVar(LIR::Use& use, unsigned tempNum = BAD_VAR_NUM)
{
GenTree* oldUseNode = use.Def();
if ((oldUseNode->gtOper != GT_LCL_VAR) || (tempNum != BAD_VAR_NUM))
{
GenTree* assign;
use.ReplaceWithLclVar(comp, tempNum, &assign);
GenTree* newUseNode = use.Def();
ContainCheckRange(oldUseNode->gtNext, newUseNode);
// We need to lower the LclVar and assignment since there may be certain
// types or scenarios, such as TYP_SIMD12, that need special handling
LowerNode(assign);
LowerNode(newUseNode);
return newUseNode->AsLclVar();
}
return oldUseNode->AsLclVar();
}
// return true if this call target is within range of a pc-rel call on the machine
bool IsCallTargetInRange(void* addr);
#if defined(TARGET_XARCH)
GenTree* PreferredRegOptionalOperand(GenTree* tree);
// ------------------------------------------------------------------
// SetRegOptionalBinOp - Indicates which of the operands of a bin-op
// register requirement is optional. Xarch instruction set allows
// either of op1 or op2 of binary operation (e.g. add, mul etc) to be
// a memory operand. This routine provides info to register allocator
// which of its operands optionally require a register. Lsra might not
// allocate a register to RefTypeUse positions of such operands if it
// is beneficial. In such a case codegen will treat them as memory
// operands.
//
// Arguments:
// tree - Gentree of a binary operation.
// isSafeToMarkOp1 True if it's safe to mark op1 as register optional
// isSafeToMarkOp2 True if it's safe to mark op2 as register optional
//
// Returns
// The caller is expected to get isSafeToMarkOp1 and isSafeToMarkOp2
// by calling IsSafeToContainMem.
//
// Note: On xarch at most only one of the operands will be marked as
// reg optional, even when both operands could be considered register
// optional.
void SetRegOptionalForBinOp(GenTree* tree, bool isSafeToMarkOp1, bool isSafeToMarkOp2)
{
assert(GenTree::OperIsBinary(tree->OperGet()));
GenTree* const op1 = tree->gtGetOp1();
GenTree* const op2 = tree->gtGetOp2();
const unsigned operatorSize = genTypeSize(tree->TypeGet());
const bool op1Legal =
isSafeToMarkOp1 && tree->OperIsCommutative() && (operatorSize == genTypeSize(op1->TypeGet()));
const bool op2Legal = isSafeToMarkOp2 && (operatorSize == genTypeSize(op2->TypeGet()));
GenTree* regOptionalOperand = nullptr;
if (op1Legal)
{
regOptionalOperand = op2Legal ? PreferredRegOptionalOperand(tree) : op1;
}
else if (op2Legal)
{
regOptionalOperand = op2;
}
if (regOptionalOperand != nullptr)
{
regOptionalOperand->SetRegOptional();
}
}
#endif // defined(TARGET_XARCH)
// Per tree node member functions
void LowerStoreIndirCommon(GenTreeStoreInd* ind);
void LowerIndir(GenTreeIndir* ind);
void LowerStoreIndir(GenTreeStoreInd* node);
GenTree* LowerAdd(GenTreeOp* node);
GenTree* LowerMul(GenTreeOp* mul);
GenTree* LowerBinaryArithmetic(GenTreeOp* binOp);
bool LowerUnsignedDivOrMod(GenTreeOp* divMod);
GenTree* LowerConstIntDivOrMod(GenTree* node);
GenTree* LowerSignedDivOrMod(GenTree* node);
void LowerBlockStore(GenTreeBlk* blkNode);
void LowerBlockStoreCommon(GenTreeBlk* blkNode);
void ContainBlockStoreAddress(GenTreeBlk* blkNode, unsigned size, GenTree* addr);
void LowerPutArgStk(GenTreePutArgStk* tree);
bool TryCreateAddrMode(GenTree* addr, bool isContainable, GenTree* parent);
bool TryTransformStoreObjAsStoreInd(GenTreeBlk* blkNode);
GenTree* LowerSwitch(GenTree* node);
bool TryLowerSwitchToBitTest(
BasicBlock* jumpTable[], unsigned jumpCount, unsigned targetCount, BasicBlock* bbSwitch, GenTree* switchValue);
void LowerCast(GenTree* node);
#if !CPU_LOAD_STORE_ARCH
bool IsRMWIndirCandidate(GenTree* operand, GenTree* storeInd);
bool IsBinOpInRMWStoreInd(GenTree* tree);
bool IsRMWMemOpRootedAtStoreInd(GenTree* storeIndTree, GenTree** indirCandidate, GenTree** indirOpSource);
bool LowerRMWMemOp(GenTreeIndir* storeInd);
#endif
void WidenSIMD12IfNecessary(GenTreeLclVarCommon* node);
bool CheckMultiRegLclVar(GenTreeLclVar* lclNode, const ReturnTypeDesc* retTypeDesc);
void LowerStoreLoc(GenTreeLclVarCommon* tree);
GenTree* LowerArrElem(GenTree* node);
void LowerRotate(GenTree* tree);
void LowerShift(GenTreeOp* shift);
#ifdef FEATURE_SIMD
void LowerSIMD(GenTreeSIMD* simdNode);
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
void LowerHWIntrinsic(GenTreeHWIntrinsic* node);
void LowerHWIntrinsicCC(GenTreeHWIntrinsic* node, NamedIntrinsic newIntrinsicId, GenCondition condition);
void LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cmpOp);
void LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node);
void LowerHWIntrinsicDot(GenTreeHWIntrinsic* node);
#if defined(TARGET_XARCH)
void LowerFusedMultiplyAdd(GenTreeHWIntrinsic* node);
void LowerHWIntrinsicToScalar(GenTreeHWIntrinsic* node);
void LowerHWIntrinsicGetElement(GenTreeHWIntrinsic* node);
void LowerHWIntrinsicWithElement(GenTreeHWIntrinsic* node);
GenTree* TryLowerAndOpToResetLowestSetBit(GenTreeOp* andNode);
GenTree* TryLowerAndOpToExtractLowestSetBit(GenTreeOp* andNode);
GenTree* TryLowerAndOpToAndNot(GenTreeOp* andNode);
#elif defined(TARGET_ARM64)
bool IsValidConstForMovImm(GenTreeHWIntrinsic* node);
void LowerHWIntrinsicFusedMultiplyAddScalar(GenTreeHWIntrinsic* node);
GenTree* LowerModPow2(GenTree* node);
#endif // !TARGET_XARCH && !TARGET_ARM64
union VectorConstant {
int8_t i8[32];
uint8_t u8[32];
int16_t i16[16];
uint16_t u16[16];
int32_t i32[8];
uint32_t u32[8];
int64_t i64[4];
uint64_t u64[4];
float f32[8];
double f64[4];
};
//----------------------------------------------------------------------------------------------
// VectorConstantIsBroadcastedI64: Check N i64 elements in a constant vector for equality
//
// Arguments:
// vecCns - Constant vector
// count - Amount of i64 components to compare
//
// Returns:
// true if N i64 elements of the given vector are equal
static bool VectorConstantIsBroadcastedI64(VectorConstant& vecCns, int count)
{
assert(count >= 1 && count <= 4);
for (int i = 1; i < count; i++)
{
if (vecCns.i64[i] != vecCns.i64[0])
{
return false;
}
}
return true;
}
//----------------------------------------------------------------------------------------------
// ProcessArgForHWIntrinsicCreate: Processes an argument for the Lowering::LowerHWIntrinsicCreate method
//
// Arguments:
// arg - The argument to process
// argIdx - The index of the argument being processed
// vecCns - The vector constant being constructed
// baseType - The base type of the vector constant
//
// Returns:
// true if arg was a constant; otherwise, false
static bool HandleArgForHWIntrinsicCreate(GenTree* arg, int argIdx, VectorConstant& vecCns, var_types baseType)
{
switch (baseType)
{
case TYP_BYTE:
case TYP_UBYTE:
{
if (arg->IsCnsIntOrI())
{
vecCns.i8[argIdx] = static_cast<int8_t>(arg->AsIntCon()->gtIconVal);
return true;
}
else
{
// We expect the VectorConstant to have been already zeroed
assert(vecCns.i8[argIdx] == 0);
}
break;
}
case TYP_SHORT:
case TYP_USHORT:
{
if (arg->IsCnsIntOrI())
{
vecCns.i16[argIdx] = static_cast<int16_t>(arg->AsIntCon()->gtIconVal);
return true;
}
else
{
// We expect the VectorConstant to have been already zeroed
assert(vecCns.i16[argIdx] == 0);
}
break;
}
case TYP_INT:
case TYP_UINT:
{
if (arg->IsCnsIntOrI())
{
vecCns.i32[argIdx] = static_cast<int32_t>(arg->AsIntCon()->gtIconVal);
return true;
}
else
{
// We expect the VectorConstant to have been already zeroed
assert(vecCns.i32[argIdx] == 0);
}
break;
}
case TYP_LONG:
case TYP_ULONG:
{
#if defined(TARGET_64BIT)
if (arg->IsCnsIntOrI())
{
vecCns.i64[argIdx] = static_cast<int64_t>(arg->AsIntCon()->gtIconVal);
return true;
}
#else
if (arg->OperIsLong() && arg->AsOp()->gtOp1->IsCnsIntOrI() && arg->AsOp()->gtOp2->IsCnsIntOrI())
{
// 32-bit targets will decompose GT_CNS_LNG into two GT_CNS_INT
// We need to reconstruct the 64-bit value in order to handle this
INT64 gtLconVal = arg->AsOp()->gtOp2->AsIntCon()->gtIconVal;
gtLconVal <<= 32;
gtLconVal |= arg->AsOp()->gtOp1->AsIntCon()->gtIconVal;
vecCns.i64[argIdx] = gtLconVal;
return true;
}
#endif // TARGET_64BIT
else
{
// We expect the VectorConstant to have been already zeroed
assert(vecCns.i64[argIdx] == 0);
}
break;
}
case TYP_FLOAT:
{
if (arg->IsCnsFltOrDbl())
{
vecCns.f32[argIdx] = static_cast<float>(arg->AsDblCon()->gtDconVal);
return true;
}
else
{
// We expect the VectorConstant to have been already zeroed
// We check against the i32, rather than f32, to account for -0.0
assert(vecCns.i32[argIdx] == 0);
}
break;
}
case TYP_DOUBLE:
{
if (arg->IsCnsFltOrDbl())
{
vecCns.f64[argIdx] = static_cast<double>(arg->AsDblCon()->gtDconVal);
return true;
}
else
{
// We expect the VectorConstant to have been already zeroed
// We check against the i64, rather than f64, to account for -0.0
assert(vecCns.i64[argIdx] == 0);
}
break;
}
default:
{
unreached();
}
}
return false;
}
#endif // FEATURE_HW_INTRINSICS
//----------------------------------------------------------------------------------------------
// TryRemoveCastIfPresent: Removes op it is a cast operation and the size of its input is at
// least the size of expectedType
//
// Arguments:
// expectedType - The expected type of the cast operation input if it is to be removed
// op - The tree to remove if it is a cast op whose input is at least the size of expectedType
//
// Returns:
// op if it was not a cast node or if its input is not at least the size of expected type;
// Otherwise, it returns the underlying operation that was being casted
GenTree* TryRemoveCastIfPresent(var_types expectedType, GenTree* op)
{
if (!op->OperIs(GT_CAST))
{
return op;
}
GenTree* castOp = op->AsCast()->CastOp();
if (genTypeSize(castOp->gtType) >= genTypeSize(expectedType))
{
BlockRange().Remove(op);
return castOp;
}
return op;
}
// Utility functions
public:
static bool IndirsAreEquivalent(GenTree* pTreeA, GenTree* pTreeB);
// return true if 'childNode' is an immediate that can be contained
// by the 'parentNode' (i.e. folded into an instruction)
// for example small enough and non-relocatable
bool IsContainableImmed(GenTree* parentNode, GenTree* childNode) const;
// Return true if 'node' is a containable memory op.
bool IsContainableMemoryOp(GenTree* node) const
{
return m_lsra->isContainableMemoryOp(node);
}
#ifdef FEATURE_HW_INTRINSICS
// Tries to get a containable node for a given HWIntrinsic
bool TryGetContainableHWIntrinsicOp(GenTreeHWIntrinsic* containingNode,
GenTree** pNode,
bool* supportsRegOptional,
GenTreeHWIntrinsic* transparentParentNode = nullptr);
#endif // FEATURE_HW_INTRINSICS
static void TransformUnusedIndirection(GenTreeIndir* ind, Compiler* comp, BasicBlock* block);
private:
static bool NodesAreEquivalentLeaves(GenTree* candidate, GenTree* storeInd);
bool AreSourcesPossiblyModifiedLocals(GenTree* addr, GenTree* base, GenTree* index);
// Makes 'childNode' contained in the 'parentNode'
void MakeSrcContained(GenTree* parentNode, GenTree* childNode) const;
// Checks and makes 'childNode' contained in the 'parentNode'
bool CheckImmedAndMakeContained(GenTree* parentNode, GenTree* childNode);
// Checks for memory conflicts in the instructions between childNode and parentNode, and returns true if childNode
// can be contained.
bool IsSafeToContainMem(GenTree* parentNode, GenTree* childNode) const;
// Similar to above, but allows bypassing a "transparent" parent.
bool IsSafeToContainMem(GenTree* grandparentNode, GenTree* parentNode, GenTree* childNode) const;
inline LIR::Range& BlockRange() const
{
return LIR::AsRange(m_block);
}
// Any tracked lclVar accessed by a LCL_FLD or STORE_LCL_FLD should be marked doNotEnregister.
// This method checks, and asserts in the DEBUG case if it is not so marked,
// but in the non-DEBUG case (asserts disabled) set the flag so that we don't generate bad code.
// This ensures that the local's value is valid on-stack as expected for a *LCL_FLD.
void verifyLclFldDoNotEnregister(unsigned lclNum)
{
LclVarDsc* varDsc = comp->lvaGetDesc(lclNum);
// Do a couple of simple checks before setting lvDoNotEnregister.
// This may not cover all cases in 'isRegCandidate()' but we don't want to
// do an expensive check here. For non-candidates it is not harmful to set lvDoNotEnregister.
if (varDsc->lvTracked && !varDsc->lvDoNotEnregister)
{
assert(!m_lsra->isRegCandidate(varDsc));
comp->lvaSetVarDoNotEnregister(lclNum DEBUG_ARG(DoNotEnregisterReason::LocalField));
}
}
LinearScan* m_lsra;
unsigned vtableCallTemp; // local variable we use as a temp for vtable calls
mutable SideEffectSet m_scratchSideEffects; // SideEffectSet used for IsSafeToContainMem and isRMWIndirCandidate
BasicBlock* m_block;
};
#endif // _LOWER_H_
| 1 |
dotnet/runtime
| 66,407 |
ARM64 - Optimizing a % b operations part 2
|
Addressing part of this issue: https://github.com/dotnet/runtime/issues/34937
**Description**
There are various ways to optimize `%` for integers on ARM64.
`a % b` can be transformed into a specific sequence of instructions for ARM64 if the operation is signed and `b` is a constant with the power of 2.
**Acceptance Criteria**
- [x] Merge https://github.com/dotnet/runtime/pull/65535
- [x] Add Tests
ARM64 diffs based on tests
```diff
- asr w1, w0, #31
- and w1, w1, #15
- add w1, w1, w0
- asr w1, w1, #4
- lsl w1, w1, #4
- sub w0, w0, w1
- ;; bbWeight=1 PerfScore 4.50
+ and w1, w0, #15
+ negs w0, w0
+ and w0, w0, #15
+ csneg w0, w1, w0, mi
+ ;; bbWeight=1 PerfScore 2.00
```
|
TIHan
| 2022-03-09T20:33:02Z | 2022-04-13T20:01:26Z |
3b6a539c7219383ec6181a112465f904fe9e2edb
|
a81f4bc9d53dedefe03f49da53ed198abbf9c467
|
ARM64 - Optimizing a % b operations part 2. Addressing part of this issue: https://github.com/dotnet/runtime/issues/34937
**Description**
There are various ways to optimize `%` for integers on ARM64.
`a % b` can be transformed into a specific sequence of instructions for ARM64 if the operation is signed and `b` is a constant with the power of 2.
**Acceptance Criteria**
- [x] Merge https://github.com/dotnet/runtime/pull/65535
- [x] Add Tests
ARM64 diffs based on tests
```diff
- asr w1, w0, #31
- and w1, w1, #15
- add w1, w1, w0
- asr w1, w1, #4
- lsl w1, w1, #4
- sub w0, w0, w1
- ;; bbWeight=1 PerfScore 4.50
+ and w1, w0, #15
+ negs w0, w0
+ and w0, w0, #15
+ csneg w0, w1, w0, mi
+ ;; bbWeight=1 PerfScore 2.00
```
|
./src/coreclr/jit/lowerarmarch.cpp
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Lowering for ARM and ARM64 common code XX
XX XX
XX This encapsulates common logic for lowering trees for the ARM and ARM64 XX
XX architectures. For a more detailed view of what is lowering, please XX
XX take a look at Lower.cpp XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#ifdef TARGET_ARMARCH // This file is ONLY used for ARM and ARM64 architectures
#include "jit.h"
#include "sideeffects.h"
#include "lower.h"
#include "lsra.h"
#ifdef FEATURE_HW_INTRINSICS
#include "hwintrinsic.h"
#endif
//------------------------------------------------------------------------
// IsCallTargetInRange: Can a call target address be encoded in-place?
//
// Return Value:
// True if the addr fits into the range.
//
bool Lowering::IsCallTargetInRange(void* addr)
{
return comp->codeGen->validImmForBL((ssize_t)addr);
}
//------------------------------------------------------------------------
// IsContainableImmed: Is an immediate encodable in-place?
//
// Return Value:
// True if the immediate can be folded into an instruction,
// for example small enough and non-relocatable.
//
bool Lowering::IsContainableImmed(GenTree* parentNode, GenTree* childNode) const
{
if (!varTypeIsFloating(parentNode->TypeGet()))
{
#ifdef TARGET_ARM64
if (parentNode->OperIsCompare() && childNode->IsFPZero())
{
// Contain 0.0 constant in fcmp on arm64
// TODO: Enable for arm too (vcmp)
// We currently don't emit these for floating points
assert(!parentNode->OperIs(GT_TEST_EQ, GT_TEST_NE));
return true;
}
#endif
// Make sure we have an actual immediate
if (!childNode->IsCnsIntOrI())
return false;
if (childNode->AsIntCon()->ImmedValNeedsReloc(comp))
return false;
// TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntCon::gtIconVal had target_ssize_t type.
target_ssize_t immVal = (target_ssize_t)childNode->AsIntCon()->gtIconVal;
emitAttr attr = emitActualTypeSize(childNode->TypeGet());
emitAttr size = EA_SIZE(attr);
#ifdef TARGET_ARM
insFlags flags = parentNode->gtSetFlags() ? INS_FLAGS_SET : INS_FLAGS_DONT_CARE;
#endif
switch (parentNode->OperGet())
{
case GT_ADD:
case GT_SUB:
#ifdef TARGET_ARM64
return emitter::emitIns_valid_imm_for_add(immVal, size);
case GT_CMPXCHG:
case GT_LOCKADD:
case GT_XORR:
case GT_XAND:
case GT_XADD:
return comp->compOpportunisticallyDependsOn(InstructionSet_Atomics)
? false
: emitter::emitIns_valid_imm_for_add(immVal, size);
#elif defined(TARGET_ARM)
return emitter::emitIns_valid_imm_for_add(immVal, flags);
#endif
break;
#ifdef TARGET_ARM64
case GT_EQ:
case GT_NE:
case GT_LT:
case GT_LE:
case GT_GE:
case GT_GT:
case GT_BOUNDS_CHECK:
return emitter::emitIns_valid_imm_for_cmp(immVal, size);
case GT_AND:
case GT_OR:
case GT_XOR:
case GT_TEST_EQ:
case GT_TEST_NE:
return emitter::emitIns_valid_imm_for_alu(immVal, size);
case GT_JCMP:
assert(((parentNode->gtFlags & GTF_JCMP_TST) == 0) ? (immVal == 0) : isPow2(immVal));
return true;
#elif defined(TARGET_ARM)
case GT_EQ:
case GT_NE:
case GT_LT:
case GT_LE:
case GT_GE:
case GT_GT:
case GT_CMP:
case GT_AND:
case GT_OR:
case GT_XOR:
return emitter::emitIns_valid_imm_for_alu(immVal);
#endif // TARGET_ARM
#ifdef TARGET_ARM64
case GT_STORE_LCL_FLD:
case GT_STORE_LCL_VAR:
if (immVal == 0)
return true;
break;
#endif
default:
break;
}
}
return false;
}
//------------------------------------------------------------------------
// LowerStoreLoc: Lower a store of a lclVar
//
// Arguments:
// storeLoc - the local store (GT_STORE_LCL_FLD or GT_STORE_LCL_VAR)
//
// Notes:
// This involves:
// - Widening operations of unsigneds.
//
void Lowering::LowerStoreLoc(GenTreeLclVarCommon* storeLoc)
{
GenTree* op1 = storeLoc->gtGetOp1();
if ((storeLoc->gtOper == GT_STORE_LCL_VAR) && (op1->gtOper == GT_CNS_INT))
{
// Try to widen the ops if they are going into a local var.
GenTreeIntCon* con = op1->AsIntCon();
ssize_t ival = con->gtIconVal;
unsigned varNum = storeLoc->GetLclNum();
LclVarDsc* varDsc = comp->lvaGetDesc(varNum);
if (varDsc->lvIsSIMDType())
{
noway_assert(storeLoc->gtType != TYP_STRUCT);
}
unsigned size = genTypeSize(storeLoc);
// If we are storing a constant into a local variable
// we extend the size of the store here
if ((size < 4) && !varTypeIsStruct(varDsc))
{
if (!varTypeIsUnsigned(varDsc))
{
if (genTypeSize(storeLoc) == 1)
{
if ((ival & 0x7f) != ival)
{
ival = ival | 0xffffff00;
}
}
else
{
assert(genTypeSize(storeLoc) == 2);
if ((ival & 0x7fff) != ival)
{
ival = ival | 0xffff0000;
}
}
}
// TODO-CQ: if the field is promoted independently shouldn't we
// also be able to do this?
if (!varDsc->lvIsStructField && (varDsc->GetStackSlotHomeType() == TYP_INT))
{
storeLoc->gtType = TYP_INT;
con->SetIconValue(ival);
}
}
}
if (storeLoc->OperIs(GT_STORE_LCL_FLD))
{
// We should only encounter this for lclVars that are lvDoNotEnregister.
verifyLclFldDoNotEnregister(storeLoc->GetLclNum());
}
ContainCheckStoreLoc(storeLoc);
}
//------------------------------------------------------------------------
// LowerStoreIndir: Determine addressing mode for an indirection, and whether operands are contained.
//
// Arguments:
// node - The indirect store node (GT_STORE_IND) of interest
//
// Return Value:
// None.
//
void Lowering::LowerStoreIndir(GenTreeStoreInd* node)
{
ContainCheckStoreIndir(node);
}
//------------------------------------------------------------------------
// LowerMul: Lower a GT_MUL/GT_MULHI/GT_MUL_LONG node.
//
// For ARM64 recognized GT_MULs that can be turned into GT_MUL_LONGs, as
// those are cheaper. Performs contaiment checks.
//
// Arguments:
// mul - The node to lower
//
// Return Value:
// The next node to lower.
//
GenTree* Lowering::LowerMul(GenTreeOp* mul)
{
assert(mul->OperIsMul());
#ifdef TARGET_ARM64
if (comp->opts.OptimizationEnabled() && mul->OperIs(GT_MUL) && mul->IsValidLongMul())
{
GenTreeCast* op1 = mul->gtGetOp1()->AsCast();
GenTree* op2 = mul->gtGetOp2();
mul->ClearOverflow();
mul->ClearUnsigned();
if (op1->IsUnsigned())
{
mul->SetUnsigned();
}
mul->gtOp1 = op1->CastOp();
BlockRange().Remove(op1);
if (op2->OperIs(GT_CAST))
{
mul->gtOp2 = op2->AsCast()->CastOp();
BlockRange().Remove(op2);
}
else
{
assert(op2->IsIntegralConst());
assert(FitsIn<int32_t>(op2->AsIntConCommon()->IntegralValue()));
op2->ChangeType(TYP_INT);
}
mul->ChangeOper(GT_MUL_LONG);
}
#endif // TARGET_ARM64
ContainCheckMul(mul);
return mul->gtNext;
}
//------------------------------------------------------------------------
// LowerBinaryArithmetic: lowers the given binary arithmetic node.
//
// Arguments:
// node - the arithmetic node to lower
//
// Returns:
// The next node to lower.
//
GenTree* Lowering::LowerBinaryArithmetic(GenTreeOp* binOp)
{
if (comp->opts.OptimizationEnabled() && binOp->OperIs(GT_AND))
{
GenTree* opNode = nullptr;
GenTree* notNode = nullptr;
if (binOp->gtGetOp1()->OperIs(GT_NOT))
{
notNode = binOp->gtGetOp1();
opNode = binOp->gtGetOp2();
}
else if (binOp->gtGetOp2()->OperIs(GT_NOT))
{
notNode = binOp->gtGetOp2();
opNode = binOp->gtGetOp1();
}
if (notNode != nullptr)
{
binOp->gtOp1 = opNode;
binOp->gtOp2 = notNode->AsUnOp()->gtGetOp1();
binOp->ChangeOper(GT_AND_NOT);
BlockRange().Remove(notNode);
}
}
ContainCheckBinary(binOp);
return binOp->gtNext;
}
//------------------------------------------------------------------------
// LowerBlockStore: Lower a block store node
//
// Arguments:
// blkNode - The block store node to lower
//
void Lowering::LowerBlockStore(GenTreeBlk* blkNode)
{
GenTree* dstAddr = blkNode->Addr();
GenTree* src = blkNode->Data();
unsigned size = blkNode->Size();
const bool isDstAddrLocal = dstAddr->OperIsLocalAddr();
if (blkNode->OperIsInitBlkOp())
{
if (src->OperIs(GT_INIT_VAL))
{
src->SetContained();
src = src->AsUnOp()->gtGetOp1();
}
if (blkNode->OperIs(GT_STORE_OBJ))
{
blkNode->SetOper(GT_STORE_BLK);
}
unsigned initBlockUnrollLimit = INITBLK_UNROLL_LIMIT;
#ifdef TARGET_ARM64
if (isDstAddrLocal)
{
// Since dstAddr points to the stack CodeGen can use more optimal
// quad-word store SIMD instructions for InitBlock.
initBlockUnrollLimit = INITBLK_LCL_UNROLL_LIMIT;
}
#endif
if (!blkNode->OperIs(GT_STORE_DYN_BLK) && (size <= initBlockUnrollLimit) && src->OperIs(GT_CNS_INT))
{
blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindUnroll;
// The fill value of an initblk is interpreted to hold a
// value of (unsigned int8) however a constant of any size
// may practically reside on the evaluation stack. So extract
// the lower byte out of the initVal constant and replicate
// it to a larger constant whose size is sufficient to support
// the largest width store of the desired inline expansion.
ssize_t fill = src->AsIntCon()->IconValue() & 0xFF;
if (fill == 0)
{
#ifdef TARGET_ARM64
// On ARM64 we can just use REG_ZR instead of having to load
// the constant into a real register like on ARM32.
src->SetContained();
#endif
}
#ifdef TARGET_ARM64
else if (size >= REGSIZE_BYTES)
{
fill *= 0x0101010101010101LL;
src->gtType = TYP_LONG;
}
#endif
else
{
fill *= 0x01010101;
}
src->AsIntCon()->SetIconValue(fill);
ContainBlockStoreAddress(blkNode, size, dstAddr);
}
else
{
blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindHelper;
}
}
else
{
assert(src->OperIs(GT_IND, GT_LCL_VAR, GT_LCL_FLD));
src->SetContained();
bool isSrcAddrLocal = false;
if (src->OperIs(GT_IND))
{
GenTree* srcAddr = src->AsIndir()->Addr();
// TODO-Cleanup: Make sure that GT_IND lowering didn't mark the source address as contained.
// Sometimes the GT_IND type is a non-struct type and then GT_IND lowering may contain the
// address, not knowing that GT_IND is part of a block op that has containment restrictions.
srcAddr->ClearContained();
isSrcAddrLocal = srcAddr->OperIsLocalAddr();
}
else
{
isSrcAddrLocal = true;
if (src->OperIs(GT_LCL_VAR))
{
// TODO-1stClassStructs: for now we can't work with STORE_BLOCK source in register.
const unsigned srcLclNum = src->AsLclVar()->GetLclNum();
comp->lvaSetVarDoNotEnregister(srcLclNum DEBUGARG(DoNotEnregisterReason::BlockOp));
}
}
unsigned copyBlockUnrollLimit = CPBLK_UNROLL_LIMIT;
#ifdef TARGET_ARM64
if (isSrcAddrLocal && isDstAddrLocal)
{
// Since both srcAddr and dstAddr point to the stack CodeGen can use more optimal
// quad-word load and store SIMD instructions for CopyBlock.
copyBlockUnrollLimit = CPBLK_LCL_UNROLL_LIMIT;
}
#endif
if (blkNode->OperIs(GT_STORE_OBJ))
{
if (!blkNode->AsObj()->GetLayout()->HasGCPtr())
{
blkNode->SetOper(GT_STORE_BLK);
}
else if (isDstAddrLocal && (size <= copyBlockUnrollLimit))
{
// If the size is small enough to unroll then we need to mark the block as non-interruptible
// to actually allow unrolling. The generated code does not report GC references loaded in the
// temporary register(s) used for copying.
blkNode->SetOper(GT_STORE_BLK);
blkNode->gtBlkOpGcUnsafe = true;
}
}
if (blkNode->OperIs(GT_STORE_OBJ))
{
assert((dstAddr->TypeGet() == TYP_BYREF) || (dstAddr->TypeGet() == TYP_I_IMPL));
blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindUnroll;
}
else if (blkNode->OperIs(GT_STORE_BLK) && (size <= copyBlockUnrollLimit))
{
blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindUnroll;
if (src->OperIs(GT_IND))
{
ContainBlockStoreAddress(blkNode, size, src->AsIndir()->Addr());
}
ContainBlockStoreAddress(blkNode, size, dstAddr);
}
else
{
assert(blkNode->OperIs(GT_STORE_BLK, GT_STORE_DYN_BLK));
blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindHelper;
}
}
}
//------------------------------------------------------------------------
// ContainBlockStoreAddress: Attempt to contain an address used by an unrolled block store.
//
// Arguments:
// blkNode - the block store node
// size - the block size
// addr - the address node to try to contain
//
void Lowering::ContainBlockStoreAddress(GenTreeBlk* blkNode, unsigned size, GenTree* addr)
{
assert(blkNode->OperIs(GT_STORE_BLK) && (blkNode->gtBlkOpKind == GenTreeBlk::BlkOpKindUnroll));
assert(size < INT32_MAX);
if (addr->OperIsLocalAddr())
{
addr->SetContained();
return;
}
if (!addr->OperIs(GT_ADD) || addr->gtOverflow() || !addr->AsOp()->gtGetOp2()->OperIs(GT_CNS_INT))
{
return;
}
GenTreeIntCon* offsetNode = addr->AsOp()->gtGetOp2()->AsIntCon();
ssize_t offset = offsetNode->IconValue();
#ifdef TARGET_ARM
// All integer load/store instructions on Arm support offsets in range -255..255.
// Of course, this is a rather conservative check.
if ((offset < -255) || (offset > 255) || (offset + static_cast<int>(size) > 256))
{
return;
}
#endif // TARGET_ARM
if (!IsSafeToContainMem(blkNode, addr))
{
return;
}
BlockRange().Remove(offsetNode);
addr->ChangeOper(GT_LEA);
addr->AsAddrMode()->SetIndex(nullptr);
addr->AsAddrMode()->SetScale(0);
addr->AsAddrMode()->SetOffset(static_cast<int>(offset));
addr->SetContained();
}
//------------------------------------------------------------------------
// LowerCast: Lower GT_CAST(srcType, DstType) nodes.
//
// Arguments:
// tree - GT_CAST node to be lowered
//
// Return Value:
// None.
//
// Notes:
// Casts from float/double to a smaller int type are transformed as follows:
// GT_CAST(float/double, byte) = GT_CAST(GT_CAST(float/double, int32), byte)
// GT_CAST(float/double, sbyte) = GT_CAST(GT_CAST(float/double, int32), sbyte)
// GT_CAST(float/double, int16) = GT_CAST(GT_CAST(double/double, int32), int16)
// GT_CAST(float/double, uint16) = GT_CAST(GT_CAST(double/double, int32), uint16)
//
// Note that for the overflow conversions we still depend on helper calls and
// don't expect to see them here.
// i) GT_CAST(float/double, int type with overflow detection)
//
void Lowering::LowerCast(GenTree* tree)
{
assert(tree->OperGet() == GT_CAST);
JITDUMP("LowerCast for: ");
DISPNODE(tree);
JITDUMP("\n");
GenTree* op1 = tree->AsOp()->gtOp1;
var_types dstType = tree->CastToType();
var_types srcType = genActualType(op1->TypeGet());
if (varTypeIsFloating(srcType))
{
noway_assert(!tree->gtOverflow());
assert(!varTypeIsSmall(dstType)); // fgMorphCast creates intermediate casts when converting from float to small
// int.
}
assert(!varTypeIsSmall(srcType));
// Now determine if we have operands that should be contained.
ContainCheckCast(tree->AsCast());
}
//------------------------------------------------------------------------
// LowerRotate: Lower GT_ROL and GT_ROR nodes.
//
// Arguments:
// tree - the node to lower
//
// Return Value:
// None.
//
void Lowering::LowerRotate(GenTree* tree)
{
if (tree->OperGet() == GT_ROL)
{
// There is no ROL instruction on ARM. Convert ROL into ROR.
GenTree* rotatedValue = tree->AsOp()->gtOp1;
unsigned rotatedValueBitSize = genTypeSize(rotatedValue->gtType) * 8;
GenTree* rotateLeftIndexNode = tree->AsOp()->gtOp2;
if (rotateLeftIndexNode->IsCnsIntOrI())
{
ssize_t rotateLeftIndex = rotateLeftIndexNode->AsIntCon()->gtIconVal;
ssize_t rotateRightIndex = rotatedValueBitSize - rotateLeftIndex;
rotateLeftIndexNode->AsIntCon()->gtIconVal = rotateRightIndex;
}
else
{
GenTree* tmp = comp->gtNewOperNode(GT_NEG, genActualType(rotateLeftIndexNode->gtType), rotateLeftIndexNode);
BlockRange().InsertAfter(rotateLeftIndexNode, tmp);
tree->AsOp()->gtOp2 = tmp;
}
tree->ChangeOper(GT_ROR);
}
ContainCheckShiftRotate(tree->AsOp());
}
#ifdef FEATURE_HW_INTRINSICS
//----------------------------------------------------------------------------------------------
// LowerHWIntrinsicFusedMultiplyAddScalar: Lowers AdvSimd_FusedMultiplyAddScalar intrinsics
// when some of the operands are negated by "containing" such negation.
//
// Arguments:
// node - The original hardware intrinsic node
//
// | op1 | op2 | op3 |
// | + | + | + | AdvSimd_FusedMultiplyAddScalar
// | + | + | - | AdvSimd_FusedMultiplySubtractScalar
// | + | - | + | AdvSimd_FusedMultiplySubtractScalar
// | + | - | - | AdvSimd_FusedMultiplyAddScalar
// | - | + | + | AdvSimd_FusedMultiplySubtractNegatedScalar
// | - | + | - | AdvSimd_FusedMultiplyAddNegatedScalar
// | - | - | + | AdvSimd_FusedMultiplyAddNegatedScalar
// | - | - | - | AdvSimd_FusedMultiplySubtractNegatedScalar
//
void Lowering::LowerHWIntrinsicFusedMultiplyAddScalar(GenTreeHWIntrinsic* node)
{
assert(node->GetHWIntrinsicId() == NI_AdvSimd_FusedMultiplyAddScalar);
GenTree* op1 = node->Op(1);
GenTree* op2 = node->Op(2);
GenTree* op3 = node->Op(3);
auto lowerOperand = [this](GenTree* op) {
bool wasNegated = false;
if (op->OperIsHWIntrinsic() &&
((op->AsHWIntrinsic()->GetHWIntrinsicId() == NI_AdvSimd_Arm64_DuplicateToVector64) ||
(op->AsHWIntrinsic()->GetHWIntrinsicId() == NI_Vector64_CreateScalarUnsafe)))
{
GenTreeHWIntrinsic* createVector64 = op->AsHWIntrinsic();
GenTree* valueOp = createVector64->Op(1);
if (valueOp->OperIs(GT_NEG))
{
createVector64->Op(1) = valueOp->gtGetOp1();
BlockRange().Remove(valueOp);
wasNegated = true;
}
}
return wasNegated;
};
const bool op1WasNegated = lowerOperand(op1);
const bool op2WasNegated = lowerOperand(op2);
const bool op3WasNegated = lowerOperand(op3);
if (op1WasNegated)
{
if (op2WasNegated != op3WasNegated)
{
node->ChangeHWIntrinsicId(NI_AdvSimd_FusedMultiplyAddNegatedScalar);
}
else
{
node->ChangeHWIntrinsicId(NI_AdvSimd_FusedMultiplySubtractNegatedScalar);
}
}
else if (op2WasNegated != op3WasNegated)
{
node->ChangeHWIntrinsicId(NI_AdvSimd_FusedMultiplySubtractScalar);
}
}
//----------------------------------------------------------------------------------------------
// Lowering::LowerHWIntrinsic: Perform containment analysis for a hardware intrinsic node.
//
// Arguments:
// node - The hardware intrinsic node.
//
void Lowering::LowerHWIntrinsic(GenTreeHWIntrinsic* node)
{
assert(node->TypeGet() != TYP_SIMD32);
if (node->TypeGet() == TYP_SIMD12)
{
// GT_HWINTRINSIC node requiring to produce TYP_SIMD12 in fact
// produces a TYP_SIMD16 result
node->gtType = TYP_SIMD16;
}
NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
switch (intrinsicId)
{
case NI_Vector64_Create:
case NI_Vector128_Create:
{
// We don't directly support the Vector64.Create or Vector128.Create methods in codegen
// and instead lower them to other intrinsic nodes in LowerHWIntrinsicCreate so we expect
// that the node is modified to either not be a HWIntrinsic node or that it is no longer
// the same intrinsic as when it came in.
LowerHWIntrinsicCreate(node);
assert(!node->OperIsHWIntrinsic() || (node->GetHWIntrinsicId() != intrinsicId));
LowerNode(node);
return;
}
case NI_Vector64_Dot:
case NI_Vector128_Dot:
{
LowerHWIntrinsicDot(node);
return;
}
case NI_Vector64_op_Equality:
case NI_Vector128_op_Equality:
{
LowerHWIntrinsicCmpOp(node, GT_EQ);
return;
}
case NI_Vector64_op_Inequality:
case NI_Vector128_op_Inequality:
{
LowerHWIntrinsicCmpOp(node, GT_NE);
return;
}
case NI_AdvSimd_FusedMultiplyAddScalar:
LowerHWIntrinsicFusedMultiplyAddScalar(node);
break;
default:
break;
}
ContainCheckHWIntrinsic(node);
}
//----------------------------------------------------------------------------------------------
// Lowering::IsValidConstForMovImm: Determines if the given node can be replaced by a mov/fmov immediate instruction
//
// Arguments:
// node - The hardware intrinsic node.
//
// Returns:
// true if the node can be replaced by a mov/fmov immediate instruction; otherwise, false
//
// IMPORTANT:
// This check may end up modifying node->gtOp1 if it is a cast node that can be removed
bool Lowering::IsValidConstForMovImm(GenTreeHWIntrinsic* node)
{
assert((node->GetHWIntrinsicId() == NI_Vector64_Create) || (node->GetHWIntrinsicId() == NI_Vector128_Create) ||
(node->GetHWIntrinsicId() == NI_Vector64_CreateScalarUnsafe) ||
(node->GetHWIntrinsicId() == NI_Vector128_CreateScalarUnsafe) ||
(node->GetHWIntrinsicId() == NI_AdvSimd_DuplicateToVector64) ||
(node->GetHWIntrinsicId() == NI_AdvSimd_DuplicateToVector128) ||
(node->GetHWIntrinsicId() == NI_AdvSimd_Arm64_DuplicateToVector64) ||
(node->GetHWIntrinsicId() == NI_AdvSimd_Arm64_DuplicateToVector128));
assert(node->GetOperandCount() == 1);
GenTree* op1 = node->Op(1);
GenTree* castOp = nullptr;
// TODO-Casts: why don't we fold the casts? MinOpts?
if (varTypeIsIntegral(node->GetSimdBaseType()) && op1->OperIs(GT_CAST))
{
// We will sometimes get a cast around a constant value (such as for
// certain long constants) which would block the below containment.
// So we will temporarily check what the cast is from instead so we
// can catch those cases as well.
castOp = op1->AsCast()->CastOp();
op1 = castOp;
}
if (op1->IsCnsIntOrI())
{
const ssize_t dataValue = op1->AsIntCon()->gtIconVal;
if (comp->GetEmitter()->emitIns_valid_imm_for_movi(dataValue, emitActualTypeSize(node->GetSimdBaseType())))
{
if (castOp != nullptr)
{
// We found a containable immediate under
// a cast, so remove the cast from the LIR.
BlockRange().Remove(node->Op(1));
node->Op(1) = op1;
}
return true;
}
}
else if (op1->IsCnsFltOrDbl())
{
assert(varTypeIsFloating(node->GetSimdBaseType()));
assert(castOp == nullptr);
const double dataValue = op1->AsDblCon()->gtDconVal;
return comp->GetEmitter()->emitIns_valid_imm_for_fmov(dataValue);
}
return false;
}
//----------------------------------------------------------------------------------------------
// Lowering::LowerHWIntrinsicCmpOp: Lowers a Vector128 or Vector256 comparison intrinsic
//
// Arguments:
// node - The hardware intrinsic node.
// cmpOp - The comparison operation, currently must be GT_EQ or GT_NE
//
void Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cmpOp)
{
NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
CorInfoType simdBaseJitType = node->GetSimdBaseJitType();
var_types simdBaseType = node->GetSimdBaseType();
unsigned simdSize = node->GetSimdSize();
var_types simdType = Compiler::getSIMDTypeForSize(simdSize);
assert((intrinsicId == NI_Vector64_op_Equality) || (intrinsicId == NI_Vector64_op_Inequality) ||
(intrinsicId == NI_Vector128_op_Equality) || (intrinsicId == NI_Vector128_op_Inequality));
assert(varTypeIsSIMD(simdType));
assert(varTypeIsArithmetic(simdBaseType));
assert(simdSize != 0);
assert(node->gtType == TYP_BOOL);
assert((cmpOp == GT_EQ) || (cmpOp == GT_NE));
// We have the following (with the appropriate simd size and where the intrinsic could be op_Inequality):
// /--* op2 simd
// /--* op1 simd
// node = * HWINTRINSIC simd T op_Equality
GenTree* op1 = node->Op(1);
GenTree* op2 = node->Op(2);
// Optimize comparison against Vector64/128<>.Zero via UMAX:
//
// bool eq = v == Vector128<integer>.Zero
//
// to:
//
// bool eq = AdvSimd.Arm64.MaxAcross(v.AsUInt16()).ToScalar() == 0;
//
GenTree* op = nullptr;
GenTree* opZero = nullptr;
if (op1->IsVectorZero())
{
op = op2;
opZero = op1;
}
else if (op2->IsVectorZero())
{
op = op1;
opZero = op2;
}
if (!varTypeIsFloating(simdBaseType) && (op != nullptr))
{
// Use USHORT for V64 and UINT for V128 due to better latency/TP on some CPUs
CorInfoType maxType = (simdSize == 8) ? CORINFO_TYPE_USHORT : CORINFO_TYPE_UINT;
GenTree* cmp = comp->gtNewSimdHWIntrinsicNode(simdType, op, NI_AdvSimd_Arm64_MaxAcross, maxType, simdSize);
BlockRange().InsertBefore(node, cmp);
LowerNode(cmp);
BlockRange().Remove(opZero);
GenTree* val = comp->gtNewSimdHWIntrinsicNode(TYP_INT, cmp, NI_Vector128_ToScalar, CORINFO_TYPE_UINT, simdSize);
BlockRange().InsertAfter(cmp, val);
LowerNode(val);
GenTree* cmpZeroCns = comp->gtNewIconNode(0, TYP_INT);
BlockRange().InsertAfter(val, cmpZeroCns);
node->ChangeOper(cmpOp);
node->gtType = TYP_INT;
node->AsOp()->gtOp1 = val;
node->AsOp()->gtOp2 = cmpZeroCns;
LowerNodeCC(node, (cmpOp == GT_EQ) ? GenCondition::EQ : GenCondition::NE);
node->gtType = TYP_VOID;
node->ClearUnusedValue();
LowerNode(node);
return;
}
NamedIntrinsic cmpIntrinsic;
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
case TYP_SHORT:
case TYP_USHORT:
case TYP_INT:
case TYP_UINT:
case TYP_FLOAT:
{
cmpIntrinsic = NI_AdvSimd_CompareEqual;
break;
}
case TYP_LONG:
case TYP_ULONG:
case TYP_DOUBLE:
{
cmpIntrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_CompareEqualScalar : NI_AdvSimd_Arm64_CompareEqual;
break;
}
default:
{
unreached();
}
}
GenTree* cmp = comp->gtNewSimdHWIntrinsicNode(simdType, op1, op2, cmpIntrinsic, simdBaseJitType, simdSize);
BlockRange().InsertBefore(node, cmp);
LowerNode(cmp);
if ((simdBaseType == TYP_FLOAT) && (simdSize == 12))
{
// For TYP_SIMD12 we don't want the upper bits to participate in the comparison. So, we will insert all ones
// into those bits of the result, "as if" the upper bits are equal. Then if all lower bits are equal, we get the
// expected all-ones result, and will get the expected 0's only where there are non-matching bits.
GenTree* idxCns = comp->gtNewIconNode(3, TYP_INT);
BlockRange().InsertAfter(cmp, idxCns);
GenTree* insCns = comp->gtNewIconNode(-1, TYP_INT);
BlockRange().InsertAfter(idxCns, insCns);
GenTree* tmp = comp->gtNewSimdHWIntrinsicNode(simdType, cmp, idxCns, insCns, NI_AdvSimd_Insert,
CORINFO_TYPE_INT, simdSize);
BlockRange().InsertAfter(insCns, tmp);
LowerNode(tmp);
cmp = tmp;
}
GenTree* msk =
comp->gtNewSimdHWIntrinsicNode(simdType, cmp, NI_AdvSimd_Arm64_MinAcross, CORINFO_TYPE_UBYTE, simdSize);
BlockRange().InsertAfter(cmp, msk);
LowerNode(msk);
GenTree* zroCns = comp->gtNewIconNode(0, TYP_INT);
BlockRange().InsertAfter(msk, zroCns);
GenTree* val =
comp->gtNewSimdHWIntrinsicNode(TYP_UBYTE, msk, zroCns, NI_AdvSimd_Extract, CORINFO_TYPE_UBYTE, simdSize);
BlockRange().InsertAfter(zroCns, val);
LowerNode(val);
zroCns = comp->gtNewIconNode(0, TYP_INT);
BlockRange().InsertAfter(val, zroCns);
node->ChangeOper(cmpOp);
node->gtType = TYP_INT;
node->AsOp()->gtOp1 = val;
node->AsOp()->gtOp2 = zroCns;
// The CompareEqual will set (condition is true) or clear (condition is false) all bits of the respective element
// The MinAcross then ensures we get either all bits set (all conditions are true) or clear (any condition is false)
// So, we need to invert the condition from the operation since we compare against zero
GenCondition cmpCnd = (cmpOp == GT_EQ) ? GenCondition::NE : GenCondition::EQ;
GenTree* cc = LowerNodeCC(node, cmpCnd);
node->gtType = TYP_VOID;
node->ClearUnusedValue();
LowerNode(node);
}
//----------------------------------------------------------------------------------------------
// Lowering::LowerHWIntrinsicCreate: Lowers a Vector64 or Vector128 Create call
//
// Performs the following transformations:
// 1. If all the arguments are constant (including the broadcast case), the vector
// will be loaded from the data section, or turned into Zero/AllBitsSet, if possible.
// 2. Non-constant broadcasts (argCnt == 1) are turned into DuplicateToVector intrinsics.
// 3. Remaining cases get a chain of "Insert"s, from the second element to the last, where
// the vector to be inserted into is created with CreateUnsafeScalar from the first element.
//
// Arguments:
// node - The hardware intrinsic node.
//
void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
{
NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
var_types simdType = node->TypeGet();
CorInfoType simdBaseJitType = node->GetSimdBaseJitType();
var_types simdBaseType = node->GetSimdBaseType();
unsigned simdSize = node->GetSimdSize();
VectorConstant vecCns = {};
if ((simdSize == 8) && (simdType == TYP_DOUBLE))
{
// TODO-Cleanup: Struct retyping means we have the wrong type here. We need to
// manually fix it up so the simdType checks below are correct.
simdType = TYP_SIMD8;
}
assert(varTypeIsSIMD(simdType));
assert(varTypeIsArithmetic(simdBaseType));
assert(simdSize != 0);
size_t argCnt = node->GetOperandCount();
size_t cnsArgCnt = 0;
// These intrinsics are meant to set the same value to every element.
if ((argCnt == 1) && HandleArgForHWIntrinsicCreate(node->Op(1), 0, vecCns, simdBaseType))
{
// Now assign the rest of the arguments.
for (unsigned i = 1; i < simdSize / genTypeSize(simdBaseType); i++)
{
HandleArgForHWIntrinsicCreate(node->Op(1), i, vecCns, simdBaseType);
}
cnsArgCnt = 1;
}
else
{
for (unsigned i = 1; i <= argCnt; i++)
{
if (HandleArgForHWIntrinsicCreate(node->Op(i), i - 1, vecCns, simdBaseType))
{
cnsArgCnt++;
}
}
}
assert((argCnt == 1) || (argCnt == (simdSize / genTypeSize(simdBaseType))));
// Check if we have a cast that we can remove. Note that "IsValidConstForMovImm"
// will reset Op(1) if it finds such a cast, so we do not need to handle it here.
// TODO-Casts: why are casts from constants checked for here?
if ((argCnt == cnsArgCnt) && (argCnt == 1) && IsValidConstForMovImm(node))
{
// Set the cnsArgCnt to zero so we get lowered to a DuplicateToVector
// intrinsic, which will itself mark the node as contained.
cnsArgCnt = 0;
}
if (argCnt == cnsArgCnt)
{
for (GenTree* arg : node->Operands())
{
BlockRange().Remove(arg);
}
assert((simdSize == 8) || (simdSize == 16));
if (VectorConstantIsBroadcastedI64(vecCns, simdSize / 8))
{
// If we are a single constant or if all parts are the same, we might be able to optimize
// this even further for certain values, such as Zero or AllBitsSet.
if (vecCns.i64[0] == 0)
{
node->ResetHWIntrinsicId((simdSize == 8) ? NI_Vector64_get_Zero : NI_Vector128_get_Zero);
return;
}
else if (vecCns.i64[0] == -1)
{
node->ResetHWIntrinsicId((simdSize == 8) ? NI_Vector64_get_AllBitsSet : NI_Vector128_get_AllBitsSet);
return;
}
}
unsigned cnsSize = (simdSize == 12) ? 16 : simdSize;
unsigned cnsAlign = cnsSize;
var_types dataType = Compiler::getSIMDTypeForSize(simdSize);
UNATIVE_OFFSET cnum = comp->GetEmitter()->emitDataConst(&vecCns, cnsSize, cnsAlign, dataType);
CORINFO_FIELD_HANDLE hnd = comp->eeFindJitDataOffs(cnum);
GenTree* clsVarAddr = new (comp, GT_CLS_VAR_ADDR) GenTreeClsVar(GT_CLS_VAR_ADDR, TYP_I_IMPL, hnd, nullptr);
BlockRange().InsertBefore(node, clsVarAddr);
node->ChangeOper(GT_IND);
node->AsOp()->gtOp1 = clsVarAddr;
// TODO-ARM64-CQ: We should be able to modify at least the paths that use Insert to trivially support partial
// vector constants. With this, we can create a constant if say 50% of the inputs are also constant and just
// insert the non-constant values which should still allow some gains.
return;
}
else if (argCnt == 1)
{
// We have the following (where simd is simd8 or simd16):
// /--* op1 T
// node = * HWINTRINSIC simd T Create
// We will be constructing the following parts:
// /--* op1 T
// node = * HWINTRINSIC simd T DuplicateToVector
// This is roughly the following managed code:
// return AdvSimd.Arm64.DuplicateToVector(op1);
if (varTypeIsLong(simdBaseType) || (simdBaseType == TYP_DOUBLE))
{
node->ChangeHWIntrinsicId((simdType == TYP_SIMD8) ? NI_AdvSimd_Arm64_DuplicateToVector64
: NI_AdvSimd_Arm64_DuplicateToVector128);
}
else
{
node->ChangeHWIntrinsicId((simdType == TYP_SIMD8) ? NI_AdvSimd_DuplicateToVector64
: NI_AdvSimd_DuplicateToVector128);
}
return;
}
// We have the following (where simd is simd8 or simd16):
// /--* op1 T
// +--* ... T
// +--* opN T
// node = * HWINTRINSIC simd T Create
// We will be constructing the following parts:
// /--* op1 T
// tmp1 = * HWINTRINSIC simd8 T CreateScalarUnsafe
// ...
// This is roughly the following managed code:
// var tmp1 = Vector64.CreateScalarUnsafe(op1);
// ...
NamedIntrinsic createScalar =
(simdType == TYP_SIMD8) ? NI_Vector64_CreateScalarUnsafe : NI_Vector128_CreateScalarUnsafe;
GenTree* tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, node->Op(1), createScalar, simdBaseJitType, simdSize);
BlockRange().InsertAfter(node->Op(1), tmp1);
LowerNode(tmp1);
// We will be constructing the following parts:
// ...
// idx = CNS_INT int N
// /--* tmp1 simd
// +--* idx int
// +--* opN T
// tmp1 = * HWINTRINSIC simd T Insert
// ...
// This is roughly the following managed code:
// ...
// tmp1 = AdvSimd.Insert(tmp1, N, opN);
// ...
unsigned N = 0;
GenTree* opN = nullptr;
GenTree* idx = nullptr;
for (N = 1; N < argCnt - 1; N++)
{
opN = node->Op(N + 1);
idx = comp->gtNewIconNode(N);
BlockRange().InsertBefore(opN, idx);
tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, idx, opN, NI_AdvSimd_Insert, simdBaseJitType, simdSize);
BlockRange().InsertAfter(opN, tmp1);
LowerNode(tmp1);
}
assert(N == (argCnt - 1));
// For the last insert, we will reuse the existing node and so handle it here, outside the loop.
opN = node->Op(argCnt);
idx = comp->gtNewIconNode(N);
BlockRange().InsertBefore(opN, idx);
node->ResetHWIntrinsicId(NI_AdvSimd_Insert, comp, tmp1, idx, opN);
}
//----------------------------------------------------------------------------------------------
// Lowering::LowerHWIntrinsicDot: Lowers a Vector64 or Vector128 Dot call
//
// Arguments:
// node - The hardware intrinsic node.
//
void Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node)
{
NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
CorInfoType simdBaseJitType = node->GetSimdBaseJitType();
var_types simdBaseType = node->GetSimdBaseType();
unsigned simdSize = node->GetSimdSize();
var_types simdType = Compiler::getSIMDTypeForSize(simdSize);
assert((intrinsicId == NI_Vector64_Dot) || (intrinsicId == NI_Vector128_Dot));
assert(varTypeIsSIMD(simdType));
assert(varTypeIsArithmetic(simdBaseType));
assert(simdSize != 0);
GenTree* op1 = node->Op(1);
GenTree* op2 = node->Op(2);
// Spare GenTrees to be used for the lowering logic below
// Defined upfront to avoid naming conflicts, etc...
GenTree* idx = nullptr;
GenTree* tmp1 = nullptr;
GenTree* tmp2 = nullptr;
if (simdSize == 12)
{
assert(simdBaseType == TYP_FLOAT);
// For 12 byte SIMD, we need to clear the upper 4 bytes:
// idx = CNS_INT int 0x03
// tmp1 = * CNS_DLB float 0.0
// /--* op1 simd16
// +--* idx int
// +--* tmp1 simd16
// op1 = * HWINTRINSIC simd16 T Insert
// ...
// This is roughly the following managed code:
// op1 = AdvSimd.Insert(op1, 0x03, 0.0f);
// ...
idx = comp->gtNewIconNode(0x03, TYP_INT);
BlockRange().InsertAfter(op1, idx);
tmp1 = comp->gtNewZeroConNode(TYP_FLOAT);
BlockRange().InsertAfter(idx, tmp1);
LowerNode(tmp1);
op1 = comp->gtNewSimdHWIntrinsicNode(simdType, op1, idx, tmp1, NI_AdvSimd_Insert, simdBaseJitType, simdSize);
BlockRange().InsertAfter(tmp1, op1);
LowerNode(op1);
idx = comp->gtNewIconNode(0x03, TYP_INT);
BlockRange().InsertAfter(op2, idx);
tmp2 = comp->gtNewZeroConNode(TYP_FLOAT);
BlockRange().InsertAfter(idx, tmp2);
LowerNode(tmp2);
op2 = comp->gtNewSimdHWIntrinsicNode(simdType, op2, idx, tmp2, NI_AdvSimd_Insert, simdBaseJitType, simdSize);
BlockRange().InsertAfter(tmp2, op2);
LowerNode(op2);
}
// We will be constructing the following parts:
// ...
// /--* op1 simd16
// +--* op2 simd16
// tmp1 = * HWINTRINSIC simd16 T Multiply
// ...
// This is roughly the following managed code:
// ...
// var tmp1 = AdvSimd.Multiply(op1, op2);
// ...
NamedIntrinsic multiply = NI_AdvSimd_Multiply;
if (simdBaseType == TYP_DOUBLE)
{
multiply = (simdSize == 8) ? NI_AdvSimd_MultiplyScalar : NI_AdvSimd_Arm64_Multiply;
}
assert(!varTypeIsLong(simdBaseType));
tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, op1, op2, multiply, simdBaseJitType, simdSize);
BlockRange().InsertBefore(node, tmp1);
LowerNode(tmp1);
if (varTypeIsFloating(simdBaseType))
{
if ((simdSize != 8) || (simdBaseType == TYP_FLOAT))
{
// We will be constructing the following parts:
// ...
// /--* tmp1 simd16
// * STORE_LCL_VAR simd16
// tmp1 = LCL_VAR simd16
// tmp2 = LCL_VAR simd16
// ...
// This is roughly the following managed code:
// ...
// var tmp2 = tmp1;
// ...
node->Op(1) = tmp1;
LIR::Use tmp1Use(BlockRange(), &node->Op(1), node);
ReplaceWithLclVar(tmp1Use);
tmp1 = node->Op(1);
tmp2 = comp->gtClone(tmp1);
BlockRange().InsertAfter(tmp1, tmp2);
}
if (simdSize == 8)
{
if (simdBaseType == TYP_FLOAT)
{
// We will be constructing the following parts:
// ...
// /--* tmp1 simd8
// +--* tmp2 simd8
// tmp1 = * HWINTRINSIC simd8 T AddPairwise
// ...
// This is roughly the following managed code:
// ...
// var tmp1 = AdvSimd.AddPairwise(tmp1, tmp2);
// ...
tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, NI_AdvSimd_AddPairwise, simdBaseJitType,
simdSize);
BlockRange().InsertAfter(tmp2, tmp1);
LowerNode(tmp1);
}
else
{
// No pairs to add for double, as its a single element
}
}
else
{
assert((simdSize == 12) || (simdSize == 16));
// We will be constructing the following parts:
// ...
// /--* tmp1 simd16
// +--* tmp2 simd16
// tmp2 = * HWINTRINSIC simd16 T AddPairwise
// ...
// This is roughly the following managed code:
// ...
// var tmp1 = AdvSimd.Arm64.AddPairwise(tmp1, tmp2);
// ...
tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, NI_AdvSimd_Arm64_AddPairwise, simdBaseJitType,
simdSize);
BlockRange().InsertAfter(tmp2, tmp1);
LowerNode(tmp1);
if (simdBaseType == TYP_FLOAT)
{
// Float needs an additional pairwise add to finish summing the parts
// The first will have summed e0 with e1 and e2 with e3 and then repeats that for the upper half
// So, we will have a vector that looks like this:
// < e0 + e1, e2 + e3, e0 + e1, e2 + e3>
// Doing a second horizontal add with itself will then give us
// e0 + e1 + e2 + e3 in all elements of the vector
// We will be constructing the following parts:
// ...
// /--* tmp1 simd16
// * STORE_LCL_VAR simd16
// tmp1 = LCL_VAR simd16
// tmp2 = LCL_VAR simd16
// /--* tmp1 simd16
// +--* tmp2 simd16
// tmp2 = * HWINTRINSIC simd16 T AddPairwise
// ...
// This is roughly the following managed code:
// ...
// var tmp2 = tmp1;
// var tmp1 = AdvSimd.Arm64.AddPairwise(tmp1, tmp2);
// ...
node->Op(1) = tmp1;
LIR::Use tmp1Use(BlockRange(), &node->Op(1), node);
ReplaceWithLclVar(tmp1Use);
tmp1 = node->Op(1);
tmp2 = comp->gtClone(tmp1);
BlockRange().InsertAfter(tmp1, tmp2);
tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, NI_AdvSimd_Arm64_AddPairwise,
simdBaseJitType, simdSize);
BlockRange().InsertAfter(tmp2, tmp1);
LowerNode(tmp1);
}
}
tmp2 = tmp1;
}
else
{
assert(varTypeIsIntegral(simdBaseType));
if ((simdSize == 8) && ((simdBaseType == TYP_INT) || (simdBaseType == TYP_UINT)))
{
// We will be constructing the following parts:
// ...
// /--* tmp1 simd16
// * STORE_LCL_VAR simd16
// tmp1 = LCL_VAR simd16
// tmp2 = LCL_VAR simd16
// ...
// This is roughly the following managed code:
// ...
// var tmp2 = tmp1;
// ...
node->Op(1) = tmp1;
LIR::Use tmp1Use(BlockRange(), &node->Op(1), node);
ReplaceWithLclVar(tmp1Use);
tmp1 = node->Op(1);
tmp2 = comp->gtClone(tmp1);
BlockRange().InsertAfter(tmp1, tmp2);
// We will be constructing the following parts:
// ...
// /--* tmp1 simd16
// /--* tmp2 simd16
// tmp2 = * HWINTRINSIC simd8 T AddPairwise
// ...
// This is roughly the following managed code:
// ...
// var tmp2 = AdvSimd.AddPairwise(tmp1, tmp2);
// ...
tmp1 =
comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, NI_AdvSimd_AddPairwise, simdBaseJitType, simdSize);
BlockRange().InsertAfter(tmp2, tmp1);
LowerNode(tmp1);
tmp2 = tmp1;
}
else
{
// We will be constructing the following parts:
// ...
// /--* tmp1 simd16
// tmp2 = * HWINTRINSIC simd16 T AddAcross
// ...
// This is roughly the following managed code:
// ...
// var tmp2 = AdvSimd.Arm64.AddAcross(tmp1);
// ...
tmp2 =
comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, simdSize);
BlockRange().InsertAfter(tmp1, tmp2);
LowerNode(tmp2);
}
}
// We will be constructing the following parts:
// ...
// /--* tmp2 simd16
// node = * HWINTRINSIC simd16 T ToScalar
// This is roughly the following managed code:
// ...
// return tmp2.ToScalar();
node->ResetHWIntrinsicId((simdSize == 8) ? NI_Vector64_ToScalar : NI_Vector128_ToScalar, tmp2);
LowerNode(node);
return;
}
#endif // FEATURE_HW_INTRINSICS
//------------------------------------------------------------------------
// Containment analysis
//------------------------------------------------------------------------
//------------------------------------------------------------------------
// ContainCheckCallOperands: Determine whether operands of a call should be contained.
//
// Arguments:
// call - The call node of interest
//
// Return Value:
// None.
//
void Lowering::ContainCheckCallOperands(GenTreeCall* call)
{
// There are no contained operands for arm.
}
//------------------------------------------------------------------------
// ContainCheckStoreIndir: determine whether the sources of a STOREIND node should be contained.
//
// Arguments:
// node - pointer to the node
//
void Lowering::ContainCheckStoreIndir(GenTreeStoreInd* node)
{
#ifdef TARGET_ARM64
GenTree* src = node->Data();
if (src->IsIntegralConst(0))
{
// an integer zero for 'src' can be contained.
MakeSrcContained(node, src);
}
#endif // TARGET_ARM64
ContainCheckIndir(node);
}
//------------------------------------------------------------------------
// ContainCheckIndir: Determine whether operands of an indir should be contained.
//
// Arguments:
// indirNode - The indirection node of interest
//
// Notes:
// This is called for both store and load indirections.
//
// Return Value:
// None.
//
void Lowering::ContainCheckIndir(GenTreeIndir* indirNode)
{
// If this is the rhs of a block copy it will be handled when we handle the store.
if (indirNode->TypeGet() == TYP_STRUCT)
{
return;
}
#ifdef FEATURE_SIMD
// If indirTree is of TYP_SIMD12, don't mark addr as contained
// so that it always get computed to a register. This would
// mean codegen side logic doesn't need to handle all possible
// addr expressions that could be contained.
//
// TODO-ARM64-CQ: handle other addr mode expressions that could be marked
// as contained.
if (indirNode->TypeGet() == TYP_SIMD12)
{
return;
}
#endif // FEATURE_SIMD
GenTree* addr = indirNode->Addr();
if ((addr->OperGet() == GT_LEA) && IsSafeToContainMem(indirNode, addr))
{
bool makeContained = true;
#ifdef TARGET_ARM
// ARM floating-point load/store doesn't support a form similar to integer
// ldr Rdst, [Rbase + Roffset] with offset in a register. The only supported
// form is vldr Rdst, [Rbase + imm] with a more limited constraint on the imm.
GenTreeAddrMode* lea = addr->AsAddrMode();
int cns = lea->Offset();
if (lea->HasIndex() || !emitter::emitIns_valid_imm_for_vldst_offset(cns))
{
if (indirNode->OperGet() == GT_STOREIND)
{
if (varTypeIsFloating(indirNode->AsStoreInd()->Data()))
{
makeContained = false;
}
}
else if (indirNode->OperGet() == GT_IND)
{
if (varTypeIsFloating(indirNode))
{
makeContained = false;
}
}
}
#endif // TARGET_ARM
if (makeContained)
{
MakeSrcContained(indirNode, addr);
}
}
else if (addr->OperIs(GT_LCL_VAR_ADDR, GT_LCL_FLD_ADDR))
{
// These nodes go into an addr mode:
// - GT_LCL_VAR_ADDR, GT_LCL_FLD_ADDR is a stack addr mode.
MakeSrcContained(indirNode, addr);
}
#ifdef TARGET_ARM64
else if (addr->OperIs(GT_CLS_VAR_ADDR))
{
// These nodes go into an addr mode:
// - GT_CLS_VAR_ADDR turns into a constant.
// make this contained, it turns into a constant that goes into an addr mode
MakeSrcContained(indirNode, addr);
}
#endif // TARGET_ARM64
}
//------------------------------------------------------------------------
// ContainCheckBinary: Determine whether a binary op's operands should be contained.
//
// Arguments:
// node - the node we care about
//
void Lowering::ContainCheckBinary(GenTreeOp* node)
{
GenTree* op1 = node->gtGetOp1();
GenTree* op2 = node->gtGetOp2();
// Check and make op2 contained (if it is a containable immediate)
CheckImmedAndMakeContained(node, op2);
#ifdef TARGET_ARM64
if (comp->opts.OptimizationEnabled() && varTypeIsIntegral(node) && !node->isContained())
{
// Find "a * b + c" or "c + a * b" in order to emit MADD/MSUB
if (node->OperIs(GT_ADD) && !node->gtOverflow() && (op1->OperIs(GT_MUL) || op2->OperIs(GT_MUL)))
{
GenTree* mul;
GenTree* c;
if (op1->OperIs(GT_MUL))
{
mul = op1;
c = op2;
}
else
{
mul = op2;
c = op1;
}
GenTree* a = mul->gtGetOp1();
GenTree* b = mul->gtGetOp2();
if (!mul->isContained() && !mul->gtOverflow() && !a->isContained() && !b->isContained() &&
!c->isContained() && varTypeIsIntegral(mul))
{
if (a->OperIs(GT_NEG) && !a->gtGetOp1()->isContained() && !a->gtGetOp1()->IsRegOptional())
{
// "-a * b + c" to MSUB
MakeSrcContained(mul, a);
}
if (b->OperIs(GT_NEG) && !b->gtGetOp1()->isContained())
{
// "a * -b + c" to MSUB
MakeSrcContained(mul, b);
}
// If both 'a' and 'b' are GT_NEG - MADD will be emitted.
node->ChangeOper(GT_MADD);
MakeSrcContained(node, mul);
}
}
// Find "a - b * c" in order to emit MSUB
else if (node->OperIs(GT_SUB) && !node->gtOverflow() && op2->OperIs(GT_MUL) && !op2->isContained() &&
!op2->gtOverflow() && varTypeIsIntegral(op2))
{
GenTree* a = op1;
GenTree* b = op2->gtGetOp1();
GenTree* c = op2->gtGetOp2();
if (!a->isContained() && !b->isContained() && !c->isContained())
{
node->ChangeOper(GT_MSUB);
MakeSrcContained(node, op2);
}
}
}
// Change ADD TO ADDEX for ADD(X, CAST(Y)) or ADD(CAST(X), Y) where CAST is int->long
// or for ADD(LSH(X, CNS), X) or ADD(X, LSH(X, CNS)) where CNS is in the (0..typeWidth) range
if (node->OperIs(GT_ADD) && !op1->isContained() && !op2->isContained() && varTypeIsIntegral(node) &&
!node->gtOverflow())
{
assert(!node->isContained());
if (op1->OperIs(GT_CAST) || op2->OperIs(GT_CAST))
{
GenTree* cast = op1->OperIs(GT_CAST) ? op1 : op2;
if (cast->gtGetOp1()->TypeIs(TYP_INT) && cast->TypeIs(TYP_LONG) && !cast->gtOverflow())
{
node->ChangeOper(GT_ADDEX);
MakeSrcContained(node, cast);
}
}
else if (op1->OperIs(GT_LSH) || op2->OperIs(GT_LSH))
{
GenTree* lsh = op1->OperIs(GT_LSH) ? op1 : op2;
GenTree* shiftBy = lsh->gtGetOp2();
if (shiftBy->IsCnsIntOrI())
{
const ssize_t shiftByCns = shiftBy->AsIntCon()->IconValue();
const ssize_t maxShift = (ssize_t)genTypeSize(node) * BITS_IN_BYTE;
if ((shiftByCns > 0) && (shiftByCns < maxShift))
{
// shiftBy is small so it has to be contained at this point.
assert(shiftBy->isContained());
node->ChangeOper(GT_ADDEX);
MakeSrcContained(node, lsh);
}
}
}
}
#endif
}
//------------------------------------------------------------------------
// ContainCheckMul: Determine whether a mul op's operands should be contained.
//
// Arguments:
// node - the node we care about
//
void Lowering::ContainCheckMul(GenTreeOp* node)
{
ContainCheckBinary(node);
}
//------------------------------------------------------------------------
// ContainCheckDivOrMod: determine which operands of a div/mod should be contained.
//
// Arguments:
// node - the node we care about
//
void Lowering::ContainCheckDivOrMod(GenTreeOp* node)
{
assert(node->OperIs(GT_DIV, GT_UDIV));
// ARM doesn't have a div instruction with an immediate operand
}
//------------------------------------------------------------------------
// ContainCheckShiftRotate: Determine whether a mul op's operands should be contained.
//
// Arguments:
// node - the node we care about
//
void Lowering::ContainCheckShiftRotate(GenTreeOp* node)
{
GenTree* shiftBy = node->gtOp2;
assert(node->OperIsShiftOrRotate());
#ifdef TARGET_ARM
GenTree* source = node->gtOp1;
if (node->OperIs(GT_LSH_HI, GT_RSH_LO))
{
assert(source->OperGet() == GT_LONG);
MakeSrcContained(node, source);
}
#endif // TARGET_ARM
if (shiftBy->IsCnsIntOrI())
{
MakeSrcContained(node, shiftBy);
}
}
//------------------------------------------------------------------------
// ContainCheckStoreLoc: determine whether the source of a STORE_LCL* should be contained.
//
// Arguments:
// node - pointer to the node
//
void Lowering::ContainCheckStoreLoc(GenTreeLclVarCommon* storeLoc) const
{
assert(storeLoc->OperIsLocalStore());
GenTree* op1 = storeLoc->gtGetOp1();
if (op1->OperIs(GT_BITCAST))
{
// If we know that the source of the bitcast will be in a register, then we can make
// the bitcast itself contained. This will allow us to store directly from the other
// type if this node doesn't get a register.
GenTree* bitCastSrc = op1->gtGetOp1();
if (!bitCastSrc->isContained() && !bitCastSrc->IsRegOptional())
{
op1->SetContained();
return;
}
}
const LclVarDsc* varDsc = comp->lvaGetDesc(storeLoc);
#ifdef FEATURE_SIMD
if (varTypeIsSIMD(storeLoc))
{
// If this is a store to memory, we can initialize a zero vector in memory from REG_ZR.
if ((op1->IsIntegralConst(0) || op1->IsSIMDZero()) && varDsc->lvDoNotEnregister)
{
MakeSrcContained(storeLoc, op1);
if (op1->IsSIMDZero())
{
MakeSrcContained(op1, op1->AsSIMD()->Op(1));
}
}
return;
}
#endif // FEATURE_SIMD
#ifdef TARGET_ARM64
if (IsContainableImmed(storeLoc, op1))
{
MakeSrcContained(storeLoc, op1);
}
#else
// If the source is a containable immediate, make it contained, unless it is
// an int-size or larger store of zero to memory, because we can generate smaller code
// by zeroing a register and then storing it.
var_types type = varDsc->GetRegisterType(storeLoc);
if (IsContainableImmed(storeLoc, op1) && (!op1->IsIntegralConst(0) || varTypeIsSmall(type)))
{
MakeSrcContained(storeLoc, op1);
}
else if (op1->OperGet() == GT_LONG)
{
MakeSrcContained(storeLoc, op1);
}
#endif // TARGET_ARM
}
//------------------------------------------------------------------------
// ContainCheckCast: determine whether the source of a CAST node should be contained.
//
// Arguments:
// node - pointer to the node
//
void Lowering::ContainCheckCast(GenTreeCast* node)
{
#ifdef TARGET_ARM
GenTree* castOp = node->CastOp();
var_types castToType = node->CastToType();
var_types srcType = castOp->TypeGet();
if (varTypeIsLong(castOp))
{
assert(castOp->OperGet() == GT_LONG);
MakeSrcContained(node, castOp);
}
#endif // TARGET_ARM
}
//------------------------------------------------------------------------
// ContainCheckCompare: determine whether the sources of a compare node should be contained.
//
// Arguments:
// node - pointer to the node
//
void Lowering::ContainCheckCompare(GenTreeOp* cmp)
{
CheckImmedAndMakeContained(cmp, cmp->gtOp2);
}
//------------------------------------------------------------------------
// ContainCheckBoundsChk: determine whether any source of a bounds check node should be contained.
//
// Arguments:
// node - pointer to the node
//
void Lowering::ContainCheckBoundsChk(GenTreeBoundsChk* node)
{
assert(node->OperIs(GT_BOUNDS_CHECK));
if (!CheckImmedAndMakeContained(node, node->GetIndex()))
{
CheckImmedAndMakeContained(node, node->GetArrayLength());
}
}
#ifdef FEATURE_SIMD
//----------------------------------------------------------------------------------------------
// ContainCheckSIMD: Perform containment analysis for a SIMD intrinsic node.
//
// Arguments:
// simdNode - The SIMD intrinsic node.
//
void Lowering::ContainCheckSIMD(GenTreeSIMD* simdNode)
{
switch (simdNode->GetSIMDIntrinsicId())
{
case SIMDIntrinsicInit:
{
GenTree* op1 = simdNode->Op(1);
if (op1->IsIntegralConst(0))
{
MakeSrcContained(simdNode, op1);
}
break;
}
case SIMDIntrinsicInitArray:
// We have an array and an index, which may be contained.
CheckImmedAndMakeContained(simdNode, simdNode->Op(2));
break;
default:
break;
}
}
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
//----------------------------------------------------------------------------------------------
// ContainCheckHWIntrinsic: Perform containment analysis for a hardware intrinsic node.
//
// Arguments:
// node - The hardware intrinsic node.
//
void Lowering::ContainCheckHWIntrinsic(GenTreeHWIntrinsic* node)
{
const HWIntrinsic intrin(node);
const bool hasImmediateOperand = HWIntrinsicInfo::HasImmediateOperand(intrin.id);
if ((intrin.category == HW_Category_ShiftLeftByImmediate) ||
(intrin.category == HW_Category_ShiftRightByImmediate) ||
((intrin.category == HW_Category_SIMDByIndexedElement) && hasImmediateOperand))
{
switch (intrin.numOperands)
{
case 4:
assert(varTypeIsIntegral(intrin.op4));
if (intrin.op4->IsCnsIntOrI())
{
MakeSrcContained(node, intrin.op4);
}
break;
case 3:
assert(varTypeIsIntegral(intrin.op3));
if (intrin.op3->IsCnsIntOrI())
{
MakeSrcContained(node, intrin.op3);
}
break;
case 2:
assert(varTypeIsIntegral(intrin.op2));
if (intrin.op2->IsCnsIntOrI())
{
MakeSrcContained(node, intrin.op2);
}
break;
default:
unreached();
}
}
else if (hasImmediateOperand || HWIntrinsicInfo::SupportsContainment(intrin.id))
{
switch (intrin.id)
{
case NI_AdvSimd_DuplicateSelectedScalarToVector64:
case NI_AdvSimd_DuplicateSelectedScalarToVector128:
case NI_AdvSimd_Extract:
case NI_AdvSimd_InsertScalar:
case NI_AdvSimd_LoadAndInsertScalar:
case NI_AdvSimd_Arm64_DuplicateSelectedScalarToVector128:
assert(hasImmediateOperand);
assert(varTypeIsIntegral(intrin.op2));
if (intrin.op2->IsCnsIntOrI())
{
MakeSrcContained(node, intrin.op2);
}
break;
case NI_AdvSimd_ExtractVector64:
case NI_AdvSimd_ExtractVector128:
case NI_AdvSimd_StoreSelectedScalar:
assert(hasImmediateOperand);
assert(varTypeIsIntegral(intrin.op3));
if (intrin.op3->IsCnsIntOrI())
{
MakeSrcContained(node, intrin.op3);
}
break;
case NI_AdvSimd_Insert:
assert(hasImmediateOperand);
assert(varTypeIsIntegral(intrin.op2));
if (intrin.op2->IsCnsIntOrI())
{
MakeSrcContained(node, intrin.op2);
if ((intrin.op2->AsIntCon()->gtIconVal == 0) && intrin.op3->IsCnsFltOrDbl())
{
assert(varTypeIsFloating(intrin.baseType));
const double dataValue = intrin.op3->AsDblCon()->gtDconVal;
if (comp->GetEmitter()->emitIns_valid_imm_for_fmov(dataValue))
{
MakeSrcContained(node, intrin.op3);
}
}
}
break;
case NI_AdvSimd_Arm64_InsertSelectedScalar:
assert(hasImmediateOperand);
assert(intrin.op2->IsCnsIntOrI());
assert(intrin.op4->IsCnsIntOrI());
MakeSrcContained(node, intrin.op2);
MakeSrcContained(node, intrin.op4);
break;
case NI_AdvSimd_CompareEqual:
case NI_AdvSimd_Arm64_CompareEqual:
case NI_AdvSimd_Arm64_CompareEqualScalar:
{
if (intrin.op1->IsVectorZero())
{
GenTree* op1 = intrin.op1;
GenTree* op2 = intrin.op2;
assert(HWIntrinsicInfo::IsCommutative(intrin.id));
MakeSrcContained(node, op1);
// Swap the operands here to make the containment checks in codegen simpler
node->Op(1) = op2;
node->Op(2) = op1;
}
else if (intrin.op2->IsVectorZero())
{
MakeSrcContained(node, intrin.op2);
}
break;
}
case NI_AdvSimd_CompareGreaterThan:
case NI_AdvSimd_CompareGreaterThanOrEqual:
case NI_AdvSimd_Arm64_CompareGreaterThan:
case NI_AdvSimd_Arm64_CompareGreaterThanOrEqual:
case NI_AdvSimd_Arm64_CompareGreaterThanScalar:
case NI_AdvSimd_Arm64_CompareGreaterThanOrEqualScalar:
{
// Containment is not supported for unsigned base types as the corresponding instructions:
// - cmhi
// - cmhs
// require both operands; they do not have a 'with zero'.
if (intrin.op2->IsVectorZero() && !varTypeIsUnsigned(intrin.baseType))
{
MakeSrcContained(node, intrin.op2);
}
break;
}
case NI_Vector64_CreateScalarUnsafe:
case NI_Vector128_CreateScalarUnsafe:
case NI_AdvSimd_DuplicateToVector64:
case NI_AdvSimd_DuplicateToVector128:
case NI_AdvSimd_Arm64_DuplicateToVector64:
case NI_AdvSimd_Arm64_DuplicateToVector128:
if (IsValidConstForMovImm(node))
{
MakeSrcContained(node, node->Op(1));
}
break;
case NI_Vector64_GetElement:
case NI_Vector128_GetElement:
{
assert(hasImmediateOperand);
assert(varTypeIsIntegral(intrin.op2));
if (intrin.op2->IsCnsIntOrI())
{
MakeSrcContained(node, intrin.op2);
}
if (IsContainableMemoryOp(intrin.op1))
{
MakeSrcContained(node, intrin.op1);
if (intrin.op1->OperIs(GT_IND))
{
intrin.op1->AsIndir()->Addr()->ClearContained();
}
}
break;
}
default:
unreached();
}
}
else if ((intrin.id == NI_AdvSimd_LoadVector128) || (intrin.id == NI_AdvSimd_LoadVector64))
{
assert(intrin.numOperands == 1);
assert(HWIntrinsicInfo::lookupCategory(intrin.id) == HW_Category_MemoryLoad);
GenTree* addr = node->Op(1);
if (TryCreateAddrMode(addr, true, node) && IsSafeToContainMem(node, addr))
{
assert(addr->OperIs(GT_LEA));
MakeSrcContained(node, addr);
}
}
}
#endif // FEATURE_HW_INTRINSICS
#endif // TARGET_ARMARCH
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Lowering for ARM and ARM64 common code XX
XX XX
XX This encapsulates common logic for lowering trees for the ARM and ARM64 XX
XX architectures. For a more detailed view of what is lowering, please XX
XX take a look at Lower.cpp XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#ifdef TARGET_ARMARCH // This file is ONLY used for ARM and ARM64 architectures
#include "jit.h"
#include "sideeffects.h"
#include "lower.h"
#include "lsra.h"
#ifdef FEATURE_HW_INTRINSICS
#include "hwintrinsic.h"
#endif
//------------------------------------------------------------------------
// IsCallTargetInRange: Can a call target address be encoded in-place?
//
// Return Value:
// True if the addr fits into the range.
//
bool Lowering::IsCallTargetInRange(void* addr)
{
return comp->codeGen->validImmForBL((ssize_t)addr);
}
//------------------------------------------------------------------------
// IsContainableImmed: Is an immediate encodable in-place?
//
// Return Value:
// True if the immediate can be folded into an instruction,
// for example small enough and non-relocatable.
//
bool Lowering::IsContainableImmed(GenTree* parentNode, GenTree* childNode) const
{
if (!varTypeIsFloating(parentNode->TypeGet()))
{
#ifdef TARGET_ARM64
if (parentNode->OperIsCompare() && childNode->IsFPZero())
{
// Contain 0.0 constant in fcmp on arm64
// TODO: Enable for arm too (vcmp)
// We currently don't emit these for floating points
assert(!parentNode->OperIs(GT_TEST_EQ, GT_TEST_NE));
return true;
}
#endif
// Make sure we have an actual immediate
if (!childNode->IsCnsIntOrI())
return false;
if (childNode->AsIntCon()->ImmedValNeedsReloc(comp))
return false;
// TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntCon::gtIconVal had target_ssize_t type.
target_ssize_t immVal = (target_ssize_t)childNode->AsIntCon()->gtIconVal;
emitAttr attr = emitActualTypeSize(childNode->TypeGet());
emitAttr size = EA_SIZE(attr);
#ifdef TARGET_ARM
insFlags flags = parentNode->gtSetFlags() ? INS_FLAGS_SET : INS_FLAGS_DONT_CARE;
#endif
switch (parentNode->OperGet())
{
case GT_ADD:
case GT_SUB:
#ifdef TARGET_ARM64
return emitter::emitIns_valid_imm_for_add(immVal, size);
case GT_CMPXCHG:
case GT_LOCKADD:
case GT_XORR:
case GT_XAND:
case GT_XADD:
return comp->compOpportunisticallyDependsOn(InstructionSet_Atomics)
? false
: emitter::emitIns_valid_imm_for_add(immVal, size);
#elif defined(TARGET_ARM)
return emitter::emitIns_valid_imm_for_add(immVal, flags);
#endif
break;
#ifdef TARGET_ARM64
case GT_EQ:
case GT_NE:
case GT_LT:
case GT_LE:
case GT_GE:
case GT_GT:
case GT_BOUNDS_CHECK:
return emitter::emitIns_valid_imm_for_cmp(immVal, size);
case GT_AND:
case GT_OR:
case GT_XOR:
case GT_TEST_EQ:
case GT_TEST_NE:
return emitter::emitIns_valid_imm_for_alu(immVal, size);
case GT_JCMP:
assert(((parentNode->gtFlags & GTF_JCMP_TST) == 0) ? (immVal == 0) : isPow2(immVal));
return true;
#elif defined(TARGET_ARM)
case GT_EQ:
case GT_NE:
case GT_LT:
case GT_LE:
case GT_GE:
case GT_GT:
case GT_CMP:
case GT_AND:
case GT_OR:
case GT_XOR:
return emitter::emitIns_valid_imm_for_alu(immVal);
#endif // TARGET_ARM
#ifdef TARGET_ARM64
case GT_STORE_LCL_FLD:
case GT_STORE_LCL_VAR:
if (immVal == 0)
return true;
break;
#endif
default:
break;
}
}
return false;
}
//------------------------------------------------------------------------
// LowerStoreLoc: Lower a store of a lclVar
//
// Arguments:
// storeLoc - the local store (GT_STORE_LCL_FLD or GT_STORE_LCL_VAR)
//
// Notes:
// This involves:
// - Widening operations of unsigneds.
//
void Lowering::LowerStoreLoc(GenTreeLclVarCommon* storeLoc)
{
GenTree* op1 = storeLoc->gtGetOp1();
if ((storeLoc->gtOper == GT_STORE_LCL_VAR) && (op1->gtOper == GT_CNS_INT))
{
// Try to widen the ops if they are going into a local var.
GenTreeIntCon* con = op1->AsIntCon();
ssize_t ival = con->gtIconVal;
unsigned varNum = storeLoc->GetLclNum();
LclVarDsc* varDsc = comp->lvaGetDesc(varNum);
if (varDsc->lvIsSIMDType())
{
noway_assert(storeLoc->gtType != TYP_STRUCT);
}
unsigned size = genTypeSize(storeLoc);
// If we are storing a constant into a local variable
// we extend the size of the store here
if ((size < 4) && !varTypeIsStruct(varDsc))
{
if (!varTypeIsUnsigned(varDsc))
{
if (genTypeSize(storeLoc) == 1)
{
if ((ival & 0x7f) != ival)
{
ival = ival | 0xffffff00;
}
}
else
{
assert(genTypeSize(storeLoc) == 2);
if ((ival & 0x7fff) != ival)
{
ival = ival | 0xffff0000;
}
}
}
// TODO-CQ: if the field is promoted independently shouldn't we
// also be able to do this?
if (!varDsc->lvIsStructField && (varDsc->GetStackSlotHomeType() == TYP_INT))
{
storeLoc->gtType = TYP_INT;
con->SetIconValue(ival);
}
}
}
if (storeLoc->OperIs(GT_STORE_LCL_FLD))
{
// We should only encounter this for lclVars that are lvDoNotEnregister.
verifyLclFldDoNotEnregister(storeLoc->GetLclNum());
}
ContainCheckStoreLoc(storeLoc);
}
//------------------------------------------------------------------------
// LowerStoreIndir: Determine addressing mode for an indirection, and whether operands are contained.
//
// Arguments:
// node - The indirect store node (GT_STORE_IND) of interest
//
// Return Value:
// None.
//
void Lowering::LowerStoreIndir(GenTreeStoreInd* node)
{
ContainCheckStoreIndir(node);
}
//------------------------------------------------------------------------
// LowerMul: Lower a GT_MUL/GT_MULHI/GT_MUL_LONG node.
//
// For ARM64 recognized GT_MULs that can be turned into GT_MUL_LONGs, as
// those are cheaper. Performs contaiment checks.
//
// Arguments:
// mul - The node to lower
//
// Return Value:
// The next node to lower.
//
GenTree* Lowering::LowerMul(GenTreeOp* mul)
{
assert(mul->OperIsMul());
#ifdef TARGET_ARM64
if (comp->opts.OptimizationEnabled() && mul->OperIs(GT_MUL) && mul->IsValidLongMul())
{
GenTreeCast* op1 = mul->gtGetOp1()->AsCast();
GenTree* op2 = mul->gtGetOp2();
mul->ClearOverflow();
mul->ClearUnsigned();
if (op1->IsUnsigned())
{
mul->SetUnsigned();
}
mul->gtOp1 = op1->CastOp();
BlockRange().Remove(op1);
if (op2->OperIs(GT_CAST))
{
mul->gtOp2 = op2->AsCast()->CastOp();
BlockRange().Remove(op2);
}
else
{
assert(op2->IsIntegralConst());
assert(FitsIn<int32_t>(op2->AsIntConCommon()->IntegralValue()));
op2->ChangeType(TYP_INT);
}
mul->ChangeOper(GT_MUL_LONG);
}
#endif // TARGET_ARM64
ContainCheckMul(mul);
return mul->gtNext;
}
//------------------------------------------------------------------------
// LowerBinaryArithmetic: lowers the given binary arithmetic node.
//
// Arguments:
// node - the arithmetic node to lower
//
// Returns:
// The next node to lower.
//
GenTree* Lowering::LowerBinaryArithmetic(GenTreeOp* binOp)
{
if (comp->opts.OptimizationEnabled() && binOp->OperIs(GT_AND))
{
GenTree* opNode = nullptr;
GenTree* notNode = nullptr;
if (binOp->gtGetOp1()->OperIs(GT_NOT))
{
notNode = binOp->gtGetOp1();
opNode = binOp->gtGetOp2();
}
else if (binOp->gtGetOp2()->OperIs(GT_NOT))
{
notNode = binOp->gtGetOp2();
opNode = binOp->gtGetOp1();
}
if (notNode != nullptr)
{
binOp->gtOp1 = opNode;
binOp->gtOp2 = notNode->AsUnOp()->gtGetOp1();
binOp->ChangeOper(GT_AND_NOT);
BlockRange().Remove(notNode);
}
}
ContainCheckBinary(binOp);
return binOp->gtNext;
}
//------------------------------------------------------------------------
// LowerBlockStore: Lower a block store node
//
// Arguments:
// blkNode - The block store node to lower
//
void Lowering::LowerBlockStore(GenTreeBlk* blkNode)
{
GenTree* dstAddr = blkNode->Addr();
GenTree* src = blkNode->Data();
unsigned size = blkNode->Size();
const bool isDstAddrLocal = dstAddr->OperIsLocalAddr();
if (blkNode->OperIsInitBlkOp())
{
if (src->OperIs(GT_INIT_VAL))
{
src->SetContained();
src = src->AsUnOp()->gtGetOp1();
}
if (blkNode->OperIs(GT_STORE_OBJ))
{
blkNode->SetOper(GT_STORE_BLK);
}
unsigned initBlockUnrollLimit = INITBLK_UNROLL_LIMIT;
#ifdef TARGET_ARM64
if (isDstAddrLocal)
{
// Since dstAddr points to the stack CodeGen can use more optimal
// quad-word store SIMD instructions for InitBlock.
initBlockUnrollLimit = INITBLK_LCL_UNROLL_LIMIT;
}
#endif
if (!blkNode->OperIs(GT_STORE_DYN_BLK) && (size <= initBlockUnrollLimit) && src->OperIs(GT_CNS_INT))
{
blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindUnroll;
// The fill value of an initblk is interpreted to hold a
// value of (unsigned int8) however a constant of any size
// may practically reside on the evaluation stack. So extract
// the lower byte out of the initVal constant and replicate
// it to a larger constant whose size is sufficient to support
// the largest width store of the desired inline expansion.
ssize_t fill = src->AsIntCon()->IconValue() & 0xFF;
if (fill == 0)
{
#ifdef TARGET_ARM64
// On ARM64 we can just use REG_ZR instead of having to load
// the constant into a real register like on ARM32.
src->SetContained();
#endif
}
#ifdef TARGET_ARM64
else if (size >= REGSIZE_BYTES)
{
fill *= 0x0101010101010101LL;
src->gtType = TYP_LONG;
}
#endif
else
{
fill *= 0x01010101;
}
src->AsIntCon()->SetIconValue(fill);
ContainBlockStoreAddress(blkNode, size, dstAddr);
}
else
{
blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindHelper;
}
}
else
{
assert(src->OperIs(GT_IND, GT_LCL_VAR, GT_LCL_FLD));
src->SetContained();
bool isSrcAddrLocal = false;
if (src->OperIs(GT_IND))
{
GenTree* srcAddr = src->AsIndir()->Addr();
// TODO-Cleanup: Make sure that GT_IND lowering didn't mark the source address as contained.
// Sometimes the GT_IND type is a non-struct type and then GT_IND lowering may contain the
// address, not knowing that GT_IND is part of a block op that has containment restrictions.
srcAddr->ClearContained();
isSrcAddrLocal = srcAddr->OperIsLocalAddr();
}
else
{
isSrcAddrLocal = true;
if (src->OperIs(GT_LCL_VAR))
{
// TODO-1stClassStructs: for now we can't work with STORE_BLOCK source in register.
const unsigned srcLclNum = src->AsLclVar()->GetLclNum();
comp->lvaSetVarDoNotEnregister(srcLclNum DEBUGARG(DoNotEnregisterReason::BlockOp));
}
}
unsigned copyBlockUnrollLimit = CPBLK_UNROLL_LIMIT;
#ifdef TARGET_ARM64
if (isSrcAddrLocal && isDstAddrLocal)
{
// Since both srcAddr and dstAddr point to the stack CodeGen can use more optimal
// quad-word load and store SIMD instructions for CopyBlock.
copyBlockUnrollLimit = CPBLK_LCL_UNROLL_LIMIT;
}
#endif
if (blkNode->OperIs(GT_STORE_OBJ))
{
if (!blkNode->AsObj()->GetLayout()->HasGCPtr())
{
blkNode->SetOper(GT_STORE_BLK);
}
else if (isDstAddrLocal && (size <= copyBlockUnrollLimit))
{
// If the size is small enough to unroll then we need to mark the block as non-interruptible
// to actually allow unrolling. The generated code does not report GC references loaded in the
// temporary register(s) used for copying.
blkNode->SetOper(GT_STORE_BLK);
blkNode->gtBlkOpGcUnsafe = true;
}
}
if (blkNode->OperIs(GT_STORE_OBJ))
{
assert((dstAddr->TypeGet() == TYP_BYREF) || (dstAddr->TypeGet() == TYP_I_IMPL));
blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindUnroll;
}
else if (blkNode->OperIs(GT_STORE_BLK) && (size <= copyBlockUnrollLimit))
{
blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindUnroll;
if (src->OperIs(GT_IND))
{
ContainBlockStoreAddress(blkNode, size, src->AsIndir()->Addr());
}
ContainBlockStoreAddress(blkNode, size, dstAddr);
}
else
{
assert(blkNode->OperIs(GT_STORE_BLK, GT_STORE_DYN_BLK));
blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindHelper;
}
}
}
//------------------------------------------------------------------------
// ContainBlockStoreAddress: Attempt to contain an address used by an unrolled block store.
//
// Arguments:
// blkNode - the block store node
// size - the block size
// addr - the address node to try to contain
//
void Lowering::ContainBlockStoreAddress(GenTreeBlk* blkNode, unsigned size, GenTree* addr)
{
assert(blkNode->OperIs(GT_STORE_BLK) && (blkNode->gtBlkOpKind == GenTreeBlk::BlkOpKindUnroll));
assert(size < INT32_MAX);
if (addr->OperIsLocalAddr())
{
addr->SetContained();
return;
}
if (!addr->OperIs(GT_ADD) || addr->gtOverflow() || !addr->AsOp()->gtGetOp2()->OperIs(GT_CNS_INT))
{
return;
}
GenTreeIntCon* offsetNode = addr->AsOp()->gtGetOp2()->AsIntCon();
ssize_t offset = offsetNode->IconValue();
#ifdef TARGET_ARM
// All integer load/store instructions on Arm support offsets in range -255..255.
// Of course, this is a rather conservative check.
if ((offset < -255) || (offset > 255) || (offset + static_cast<int>(size) > 256))
{
return;
}
#endif // TARGET_ARM
if (!IsSafeToContainMem(blkNode, addr))
{
return;
}
BlockRange().Remove(offsetNode);
addr->ChangeOper(GT_LEA);
addr->AsAddrMode()->SetIndex(nullptr);
addr->AsAddrMode()->SetScale(0);
addr->AsAddrMode()->SetOffset(static_cast<int>(offset));
addr->SetContained();
}
//------------------------------------------------------------------------
// LowerCast: Lower GT_CAST(srcType, DstType) nodes.
//
// Arguments:
// tree - GT_CAST node to be lowered
//
// Return Value:
// None.
//
// Notes:
// Casts from float/double to a smaller int type are transformed as follows:
// GT_CAST(float/double, byte) = GT_CAST(GT_CAST(float/double, int32), byte)
// GT_CAST(float/double, sbyte) = GT_CAST(GT_CAST(float/double, int32), sbyte)
// GT_CAST(float/double, int16) = GT_CAST(GT_CAST(double/double, int32), int16)
// GT_CAST(float/double, uint16) = GT_CAST(GT_CAST(double/double, int32), uint16)
//
// Note that for the overflow conversions we still depend on helper calls and
// don't expect to see them here.
// i) GT_CAST(float/double, int type with overflow detection)
//
void Lowering::LowerCast(GenTree* tree)
{
assert(tree->OperGet() == GT_CAST);
JITDUMP("LowerCast for: ");
DISPNODE(tree);
JITDUMP("\n");
GenTree* op1 = tree->AsOp()->gtOp1;
var_types dstType = tree->CastToType();
var_types srcType = genActualType(op1->TypeGet());
if (varTypeIsFloating(srcType))
{
noway_assert(!tree->gtOverflow());
assert(!varTypeIsSmall(dstType)); // fgMorphCast creates intermediate casts when converting from float to small
// int.
}
assert(!varTypeIsSmall(srcType));
// Now determine if we have operands that should be contained.
ContainCheckCast(tree->AsCast());
}
//------------------------------------------------------------------------
// LowerRotate: Lower GT_ROL and GT_ROR nodes.
//
// Arguments:
// tree - the node to lower
//
// Return Value:
// None.
//
void Lowering::LowerRotate(GenTree* tree)
{
if (tree->OperGet() == GT_ROL)
{
// There is no ROL instruction on ARM. Convert ROL into ROR.
GenTree* rotatedValue = tree->AsOp()->gtOp1;
unsigned rotatedValueBitSize = genTypeSize(rotatedValue->gtType) * 8;
GenTree* rotateLeftIndexNode = tree->AsOp()->gtOp2;
if (rotateLeftIndexNode->IsCnsIntOrI())
{
ssize_t rotateLeftIndex = rotateLeftIndexNode->AsIntCon()->gtIconVal;
ssize_t rotateRightIndex = rotatedValueBitSize - rotateLeftIndex;
rotateLeftIndexNode->AsIntCon()->gtIconVal = rotateRightIndex;
}
else
{
GenTree* tmp = comp->gtNewOperNode(GT_NEG, genActualType(rotateLeftIndexNode->gtType), rotateLeftIndexNode);
BlockRange().InsertAfter(rotateLeftIndexNode, tmp);
tree->AsOp()->gtOp2 = tmp;
}
tree->ChangeOper(GT_ROR);
}
ContainCheckShiftRotate(tree->AsOp());
}
#ifdef TARGET_ARM64
//------------------------------------------------------------------------
// LowerModPow2: Lower GT_MOD if the second operand is a constant power of 2.
//
// Arguments:
// tree - the node to lower
//
// Return Value:
// A new tree node if it changed.
//
// Notes:
// {expr} % {cns}
// Logically turns into:
// let a = {expr}
// if a > 0 then (a & ({cns} - 1)) else -(-a & ({cns} - 1))
// which then turns into:
// and reg1, reg0, #({cns} - 1)
// negs reg0, reg0
// and reg0, reg0, #({cns} - 1)
// csneg reg0, reg1, reg0, mi
// TODO: We could do this optimization in morph but we do not have
// a conditional select op in HIR. At some point, we may
// introduce such an op.
GenTree* Lowering::LowerModPow2(GenTree* node)
{
assert(node->OperIs(GT_MOD));
GenTree* mod = node;
GenTree* dividend = mod->gtGetOp1();
GenTree* divisor = mod->gtGetOp2();
assert(divisor->IsIntegralConstPow2());
const var_types type = mod->TypeGet();
assert((type == TYP_INT) || (type == TYP_LONG));
LIR::Use use;
if (!BlockRange().TryGetUse(node, &use))
{
return nullptr;
}
ssize_t cnsValue = static_cast<ssize_t>(divisor->AsIntConCommon()->IntegralValue()) - 1;
BlockRange().Remove(divisor);
// We need to use the dividend node multiple times so its value needs to be
// computed once and stored in a temp variable.
LIR::Use opDividend(BlockRange(), &mod->AsOp()->gtOp1, mod);
dividend = ReplaceWithLclVar(opDividend);
GenTree* dividend2 = comp->gtClone(dividend);
BlockRange().InsertAfter(dividend, dividend2);
GenTreeIntCon* cns = comp->gtNewIconNode(cnsValue, type);
BlockRange().InsertAfter(dividend2, cns);
GenTree* const trueExpr = comp->gtNewOperNode(GT_AND, type, dividend, cns);
BlockRange().InsertAfter(cns, trueExpr);
LowerNode(trueExpr);
GenTree* const neg = comp->gtNewOperNode(GT_NEG, type, dividend2);
neg->gtFlags |= GTF_SET_FLAGS;
BlockRange().InsertAfter(trueExpr, neg);
GenTreeIntCon* cns2 = comp->gtNewIconNode(cnsValue, type);
BlockRange().InsertAfter(neg, cns2);
GenTree* const falseExpr = comp->gtNewOperNode(GT_AND, type, neg, cns2);
BlockRange().InsertAfter(cns2, falseExpr);
LowerNode(falseExpr);
GenTree* const cc = comp->gtNewOperNode(GT_CSNEG_MI, type, trueExpr, falseExpr);
cc->gtFlags |= GTF_USE_FLAGS;
JITDUMP("Lower: optimize X MOD POW2");
DISPNODE(mod);
JITDUMP("to:\n");
DISPNODE(cc);
BlockRange().InsertBefore(mod, cc);
ContainCheckNode(cc);
BlockRange().Remove(mod);
use.ReplaceWith(cc);
return cc->gtNext;
}
#endif
#ifdef FEATURE_HW_INTRINSICS
//----------------------------------------------------------------------------------------------
// LowerHWIntrinsicFusedMultiplyAddScalar: Lowers AdvSimd_FusedMultiplyAddScalar intrinsics
// when some of the operands are negated by "containing" such negation.
//
// Arguments:
// node - The original hardware intrinsic node
//
// | op1 | op2 | op3 |
// | + | + | + | AdvSimd_FusedMultiplyAddScalar
// | + | + | - | AdvSimd_FusedMultiplySubtractScalar
// | + | - | + | AdvSimd_FusedMultiplySubtractScalar
// | + | - | - | AdvSimd_FusedMultiplyAddScalar
// | - | + | + | AdvSimd_FusedMultiplySubtractNegatedScalar
// | - | + | - | AdvSimd_FusedMultiplyAddNegatedScalar
// | - | - | + | AdvSimd_FusedMultiplyAddNegatedScalar
// | - | - | - | AdvSimd_FusedMultiplySubtractNegatedScalar
//
void Lowering::LowerHWIntrinsicFusedMultiplyAddScalar(GenTreeHWIntrinsic* node)
{
assert(node->GetHWIntrinsicId() == NI_AdvSimd_FusedMultiplyAddScalar);
GenTree* op1 = node->Op(1);
GenTree* op2 = node->Op(2);
GenTree* op3 = node->Op(3);
auto lowerOperand = [this](GenTree* op) {
bool wasNegated = false;
if (op->OperIsHWIntrinsic() &&
((op->AsHWIntrinsic()->GetHWIntrinsicId() == NI_AdvSimd_Arm64_DuplicateToVector64) ||
(op->AsHWIntrinsic()->GetHWIntrinsicId() == NI_Vector64_CreateScalarUnsafe)))
{
GenTreeHWIntrinsic* createVector64 = op->AsHWIntrinsic();
GenTree* valueOp = createVector64->Op(1);
if (valueOp->OperIs(GT_NEG))
{
createVector64->Op(1) = valueOp->gtGetOp1();
BlockRange().Remove(valueOp);
wasNegated = true;
}
}
return wasNegated;
};
const bool op1WasNegated = lowerOperand(op1);
const bool op2WasNegated = lowerOperand(op2);
const bool op3WasNegated = lowerOperand(op3);
if (op1WasNegated)
{
if (op2WasNegated != op3WasNegated)
{
node->ChangeHWIntrinsicId(NI_AdvSimd_FusedMultiplyAddNegatedScalar);
}
else
{
node->ChangeHWIntrinsicId(NI_AdvSimd_FusedMultiplySubtractNegatedScalar);
}
}
else if (op2WasNegated != op3WasNegated)
{
node->ChangeHWIntrinsicId(NI_AdvSimd_FusedMultiplySubtractScalar);
}
}
//----------------------------------------------------------------------------------------------
// Lowering::LowerHWIntrinsic: Perform containment analysis for a hardware intrinsic node.
//
// Arguments:
// node - The hardware intrinsic node.
//
void Lowering::LowerHWIntrinsic(GenTreeHWIntrinsic* node)
{
assert(node->TypeGet() != TYP_SIMD32);
if (node->TypeGet() == TYP_SIMD12)
{
// GT_HWINTRINSIC node requiring to produce TYP_SIMD12 in fact
// produces a TYP_SIMD16 result
node->gtType = TYP_SIMD16;
}
NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
switch (intrinsicId)
{
case NI_Vector64_Create:
case NI_Vector128_Create:
{
// We don't directly support the Vector64.Create or Vector128.Create methods in codegen
// and instead lower them to other intrinsic nodes in LowerHWIntrinsicCreate so we expect
// that the node is modified to either not be a HWIntrinsic node or that it is no longer
// the same intrinsic as when it came in.
LowerHWIntrinsicCreate(node);
assert(!node->OperIsHWIntrinsic() || (node->GetHWIntrinsicId() != intrinsicId));
LowerNode(node);
return;
}
case NI_Vector64_Dot:
case NI_Vector128_Dot:
{
LowerHWIntrinsicDot(node);
return;
}
case NI_Vector64_op_Equality:
case NI_Vector128_op_Equality:
{
LowerHWIntrinsicCmpOp(node, GT_EQ);
return;
}
case NI_Vector64_op_Inequality:
case NI_Vector128_op_Inequality:
{
LowerHWIntrinsicCmpOp(node, GT_NE);
return;
}
case NI_AdvSimd_FusedMultiplyAddScalar:
LowerHWIntrinsicFusedMultiplyAddScalar(node);
break;
default:
break;
}
ContainCheckHWIntrinsic(node);
}
//----------------------------------------------------------------------------------------------
// Lowering::IsValidConstForMovImm: Determines if the given node can be replaced by a mov/fmov immediate instruction
//
// Arguments:
// node - The hardware intrinsic node.
//
// Returns:
// true if the node can be replaced by a mov/fmov immediate instruction; otherwise, false
//
// IMPORTANT:
// This check may end up modifying node->gtOp1 if it is a cast node that can be removed
bool Lowering::IsValidConstForMovImm(GenTreeHWIntrinsic* node)
{
assert((node->GetHWIntrinsicId() == NI_Vector64_Create) || (node->GetHWIntrinsicId() == NI_Vector128_Create) ||
(node->GetHWIntrinsicId() == NI_Vector64_CreateScalarUnsafe) ||
(node->GetHWIntrinsicId() == NI_Vector128_CreateScalarUnsafe) ||
(node->GetHWIntrinsicId() == NI_AdvSimd_DuplicateToVector64) ||
(node->GetHWIntrinsicId() == NI_AdvSimd_DuplicateToVector128) ||
(node->GetHWIntrinsicId() == NI_AdvSimd_Arm64_DuplicateToVector64) ||
(node->GetHWIntrinsicId() == NI_AdvSimd_Arm64_DuplicateToVector128));
assert(node->GetOperandCount() == 1);
GenTree* op1 = node->Op(1);
GenTree* castOp = nullptr;
// TODO-Casts: why don't we fold the casts? MinOpts?
if (varTypeIsIntegral(node->GetSimdBaseType()) && op1->OperIs(GT_CAST))
{
// We will sometimes get a cast around a constant value (such as for
// certain long constants) which would block the below containment.
// So we will temporarily check what the cast is from instead so we
// can catch those cases as well.
castOp = op1->AsCast()->CastOp();
op1 = castOp;
}
if (op1->IsCnsIntOrI())
{
const ssize_t dataValue = op1->AsIntCon()->gtIconVal;
if (comp->GetEmitter()->emitIns_valid_imm_for_movi(dataValue, emitActualTypeSize(node->GetSimdBaseType())))
{
if (castOp != nullptr)
{
// We found a containable immediate under
// a cast, so remove the cast from the LIR.
BlockRange().Remove(node->Op(1));
node->Op(1) = op1;
}
return true;
}
}
else if (op1->IsCnsFltOrDbl())
{
assert(varTypeIsFloating(node->GetSimdBaseType()));
assert(castOp == nullptr);
const double dataValue = op1->AsDblCon()->gtDconVal;
return comp->GetEmitter()->emitIns_valid_imm_for_fmov(dataValue);
}
return false;
}
//----------------------------------------------------------------------------------------------
// Lowering::LowerHWIntrinsicCmpOp: Lowers a Vector128 or Vector256 comparison intrinsic
//
// Arguments:
// node - The hardware intrinsic node.
// cmpOp - The comparison operation, currently must be GT_EQ or GT_NE
//
void Lowering::LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cmpOp)
{
NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
CorInfoType simdBaseJitType = node->GetSimdBaseJitType();
var_types simdBaseType = node->GetSimdBaseType();
unsigned simdSize = node->GetSimdSize();
var_types simdType = Compiler::getSIMDTypeForSize(simdSize);
assert((intrinsicId == NI_Vector64_op_Equality) || (intrinsicId == NI_Vector64_op_Inequality) ||
(intrinsicId == NI_Vector128_op_Equality) || (intrinsicId == NI_Vector128_op_Inequality));
assert(varTypeIsSIMD(simdType));
assert(varTypeIsArithmetic(simdBaseType));
assert(simdSize != 0);
assert(node->gtType == TYP_BOOL);
assert((cmpOp == GT_EQ) || (cmpOp == GT_NE));
// We have the following (with the appropriate simd size and where the intrinsic could be op_Inequality):
// /--* op2 simd
// /--* op1 simd
// node = * HWINTRINSIC simd T op_Equality
GenTree* op1 = node->Op(1);
GenTree* op2 = node->Op(2);
// Optimize comparison against Vector64/128<>.Zero via UMAX:
//
// bool eq = v == Vector128<integer>.Zero
//
// to:
//
// bool eq = AdvSimd.Arm64.MaxAcross(v.AsUInt16()).ToScalar() == 0;
//
GenTree* op = nullptr;
GenTree* opZero = nullptr;
if (op1->IsVectorZero())
{
op = op2;
opZero = op1;
}
else if (op2->IsVectorZero())
{
op = op1;
opZero = op2;
}
if (!varTypeIsFloating(simdBaseType) && (op != nullptr))
{
// Use USHORT for V64 and UINT for V128 due to better latency/TP on some CPUs
CorInfoType maxType = (simdSize == 8) ? CORINFO_TYPE_USHORT : CORINFO_TYPE_UINT;
GenTree* cmp = comp->gtNewSimdHWIntrinsicNode(simdType, op, NI_AdvSimd_Arm64_MaxAcross, maxType, simdSize);
BlockRange().InsertBefore(node, cmp);
LowerNode(cmp);
BlockRange().Remove(opZero);
GenTree* val = comp->gtNewSimdHWIntrinsicNode(TYP_INT, cmp, NI_Vector128_ToScalar, CORINFO_TYPE_UINT, simdSize);
BlockRange().InsertAfter(cmp, val);
LowerNode(val);
GenTree* cmpZeroCns = comp->gtNewIconNode(0, TYP_INT);
BlockRange().InsertAfter(val, cmpZeroCns);
node->ChangeOper(cmpOp);
node->gtType = TYP_INT;
node->AsOp()->gtOp1 = val;
node->AsOp()->gtOp2 = cmpZeroCns;
LowerNodeCC(node, (cmpOp == GT_EQ) ? GenCondition::EQ : GenCondition::NE);
node->gtType = TYP_VOID;
node->ClearUnusedValue();
LowerNode(node);
return;
}
NamedIntrinsic cmpIntrinsic;
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
case TYP_SHORT:
case TYP_USHORT:
case TYP_INT:
case TYP_UINT:
case TYP_FLOAT:
{
cmpIntrinsic = NI_AdvSimd_CompareEqual;
break;
}
case TYP_LONG:
case TYP_ULONG:
case TYP_DOUBLE:
{
cmpIntrinsic = (simdSize == 8) ? NI_AdvSimd_Arm64_CompareEqualScalar : NI_AdvSimd_Arm64_CompareEqual;
break;
}
default:
{
unreached();
}
}
GenTree* cmp = comp->gtNewSimdHWIntrinsicNode(simdType, op1, op2, cmpIntrinsic, simdBaseJitType, simdSize);
BlockRange().InsertBefore(node, cmp);
LowerNode(cmp);
if ((simdBaseType == TYP_FLOAT) && (simdSize == 12))
{
// For TYP_SIMD12 we don't want the upper bits to participate in the comparison. So, we will insert all ones
// into those bits of the result, "as if" the upper bits are equal. Then if all lower bits are equal, we get the
// expected all-ones result, and will get the expected 0's only where there are non-matching bits.
GenTree* idxCns = comp->gtNewIconNode(3, TYP_INT);
BlockRange().InsertAfter(cmp, idxCns);
GenTree* insCns = comp->gtNewIconNode(-1, TYP_INT);
BlockRange().InsertAfter(idxCns, insCns);
GenTree* tmp = comp->gtNewSimdHWIntrinsicNode(simdType, cmp, idxCns, insCns, NI_AdvSimd_Insert,
CORINFO_TYPE_INT, simdSize);
BlockRange().InsertAfter(insCns, tmp);
LowerNode(tmp);
cmp = tmp;
}
GenTree* msk =
comp->gtNewSimdHWIntrinsicNode(simdType, cmp, NI_AdvSimd_Arm64_MinAcross, CORINFO_TYPE_UBYTE, simdSize);
BlockRange().InsertAfter(cmp, msk);
LowerNode(msk);
GenTree* zroCns = comp->gtNewIconNode(0, TYP_INT);
BlockRange().InsertAfter(msk, zroCns);
GenTree* val =
comp->gtNewSimdHWIntrinsicNode(TYP_UBYTE, msk, zroCns, NI_AdvSimd_Extract, CORINFO_TYPE_UBYTE, simdSize);
BlockRange().InsertAfter(zroCns, val);
LowerNode(val);
zroCns = comp->gtNewIconNode(0, TYP_INT);
BlockRange().InsertAfter(val, zroCns);
node->ChangeOper(cmpOp);
node->gtType = TYP_INT;
node->AsOp()->gtOp1 = val;
node->AsOp()->gtOp2 = zroCns;
// The CompareEqual will set (condition is true) or clear (condition is false) all bits of the respective element
// The MinAcross then ensures we get either all bits set (all conditions are true) or clear (any condition is false)
// So, we need to invert the condition from the operation since we compare against zero
GenCondition cmpCnd = (cmpOp == GT_EQ) ? GenCondition::NE : GenCondition::EQ;
GenTree* cc = LowerNodeCC(node, cmpCnd);
node->gtType = TYP_VOID;
node->ClearUnusedValue();
LowerNode(node);
}
//----------------------------------------------------------------------------------------------
// Lowering::LowerHWIntrinsicCreate: Lowers a Vector64 or Vector128 Create call
//
// Performs the following transformations:
// 1. If all the arguments are constant (including the broadcast case), the vector
// will be loaded from the data section, or turned into Zero/AllBitsSet, if possible.
// 2. Non-constant broadcasts (argCnt == 1) are turned into DuplicateToVector intrinsics.
// 3. Remaining cases get a chain of "Insert"s, from the second element to the last, where
// the vector to be inserted into is created with CreateUnsafeScalar from the first element.
//
// Arguments:
// node - The hardware intrinsic node.
//
void Lowering::LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node)
{
NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
var_types simdType = node->TypeGet();
CorInfoType simdBaseJitType = node->GetSimdBaseJitType();
var_types simdBaseType = node->GetSimdBaseType();
unsigned simdSize = node->GetSimdSize();
VectorConstant vecCns = {};
if ((simdSize == 8) && (simdType == TYP_DOUBLE))
{
// TODO-Cleanup: Struct retyping means we have the wrong type here. We need to
// manually fix it up so the simdType checks below are correct.
simdType = TYP_SIMD8;
}
assert(varTypeIsSIMD(simdType));
assert(varTypeIsArithmetic(simdBaseType));
assert(simdSize != 0);
size_t argCnt = node->GetOperandCount();
size_t cnsArgCnt = 0;
// These intrinsics are meant to set the same value to every element.
if ((argCnt == 1) && HandleArgForHWIntrinsicCreate(node->Op(1), 0, vecCns, simdBaseType))
{
// Now assign the rest of the arguments.
for (unsigned i = 1; i < simdSize / genTypeSize(simdBaseType); i++)
{
HandleArgForHWIntrinsicCreate(node->Op(1), i, vecCns, simdBaseType);
}
cnsArgCnt = 1;
}
else
{
for (unsigned i = 1; i <= argCnt; i++)
{
if (HandleArgForHWIntrinsicCreate(node->Op(i), i - 1, vecCns, simdBaseType))
{
cnsArgCnt++;
}
}
}
assert((argCnt == 1) || (argCnt == (simdSize / genTypeSize(simdBaseType))));
// Check if we have a cast that we can remove. Note that "IsValidConstForMovImm"
// will reset Op(1) if it finds such a cast, so we do not need to handle it here.
// TODO-Casts: why are casts from constants checked for here?
if ((argCnt == cnsArgCnt) && (argCnt == 1) && IsValidConstForMovImm(node))
{
// Set the cnsArgCnt to zero so we get lowered to a DuplicateToVector
// intrinsic, which will itself mark the node as contained.
cnsArgCnt = 0;
}
if (argCnt == cnsArgCnt)
{
for (GenTree* arg : node->Operands())
{
BlockRange().Remove(arg);
}
assert((simdSize == 8) || (simdSize == 16));
if (VectorConstantIsBroadcastedI64(vecCns, simdSize / 8))
{
// If we are a single constant or if all parts are the same, we might be able to optimize
// this even further for certain values, such as Zero or AllBitsSet.
if (vecCns.i64[0] == 0)
{
node->ResetHWIntrinsicId((simdSize == 8) ? NI_Vector64_get_Zero : NI_Vector128_get_Zero);
return;
}
else if (vecCns.i64[0] == -1)
{
node->ResetHWIntrinsicId((simdSize == 8) ? NI_Vector64_get_AllBitsSet : NI_Vector128_get_AllBitsSet);
return;
}
}
unsigned cnsSize = (simdSize == 12) ? 16 : simdSize;
unsigned cnsAlign = cnsSize;
var_types dataType = Compiler::getSIMDTypeForSize(simdSize);
UNATIVE_OFFSET cnum = comp->GetEmitter()->emitDataConst(&vecCns, cnsSize, cnsAlign, dataType);
CORINFO_FIELD_HANDLE hnd = comp->eeFindJitDataOffs(cnum);
GenTree* clsVarAddr = new (comp, GT_CLS_VAR_ADDR) GenTreeClsVar(GT_CLS_VAR_ADDR, TYP_I_IMPL, hnd, nullptr);
BlockRange().InsertBefore(node, clsVarAddr);
node->ChangeOper(GT_IND);
node->AsOp()->gtOp1 = clsVarAddr;
// TODO-ARM64-CQ: We should be able to modify at least the paths that use Insert to trivially support partial
// vector constants. With this, we can create a constant if say 50% of the inputs are also constant and just
// insert the non-constant values which should still allow some gains.
return;
}
else if (argCnt == 1)
{
// We have the following (where simd is simd8 or simd16):
// /--* op1 T
// node = * HWINTRINSIC simd T Create
// We will be constructing the following parts:
// /--* op1 T
// node = * HWINTRINSIC simd T DuplicateToVector
// This is roughly the following managed code:
// return AdvSimd.Arm64.DuplicateToVector(op1);
if (varTypeIsLong(simdBaseType) || (simdBaseType == TYP_DOUBLE))
{
node->ChangeHWIntrinsicId((simdType == TYP_SIMD8) ? NI_AdvSimd_Arm64_DuplicateToVector64
: NI_AdvSimd_Arm64_DuplicateToVector128);
}
else
{
node->ChangeHWIntrinsicId((simdType == TYP_SIMD8) ? NI_AdvSimd_DuplicateToVector64
: NI_AdvSimd_DuplicateToVector128);
}
return;
}
// We have the following (where simd is simd8 or simd16):
// /--* op1 T
// +--* ... T
// +--* opN T
// node = * HWINTRINSIC simd T Create
// We will be constructing the following parts:
// /--* op1 T
// tmp1 = * HWINTRINSIC simd8 T CreateScalarUnsafe
// ...
// This is roughly the following managed code:
// var tmp1 = Vector64.CreateScalarUnsafe(op1);
// ...
NamedIntrinsic createScalar =
(simdType == TYP_SIMD8) ? NI_Vector64_CreateScalarUnsafe : NI_Vector128_CreateScalarUnsafe;
GenTree* tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, node->Op(1), createScalar, simdBaseJitType, simdSize);
BlockRange().InsertAfter(node->Op(1), tmp1);
LowerNode(tmp1);
// We will be constructing the following parts:
// ...
// idx = CNS_INT int N
// /--* tmp1 simd
// +--* idx int
// +--* opN T
// tmp1 = * HWINTRINSIC simd T Insert
// ...
// This is roughly the following managed code:
// ...
// tmp1 = AdvSimd.Insert(tmp1, N, opN);
// ...
unsigned N = 0;
GenTree* opN = nullptr;
GenTree* idx = nullptr;
for (N = 1; N < argCnt - 1; N++)
{
opN = node->Op(N + 1);
idx = comp->gtNewIconNode(N);
BlockRange().InsertBefore(opN, idx);
tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, idx, opN, NI_AdvSimd_Insert, simdBaseJitType, simdSize);
BlockRange().InsertAfter(opN, tmp1);
LowerNode(tmp1);
}
assert(N == (argCnt - 1));
// For the last insert, we will reuse the existing node and so handle it here, outside the loop.
opN = node->Op(argCnt);
idx = comp->gtNewIconNode(N);
BlockRange().InsertBefore(opN, idx);
node->ResetHWIntrinsicId(NI_AdvSimd_Insert, comp, tmp1, idx, opN);
}
//----------------------------------------------------------------------------------------------
// Lowering::LowerHWIntrinsicDot: Lowers a Vector64 or Vector128 Dot call
//
// Arguments:
// node - The hardware intrinsic node.
//
void Lowering::LowerHWIntrinsicDot(GenTreeHWIntrinsic* node)
{
NamedIntrinsic intrinsicId = node->GetHWIntrinsicId();
CorInfoType simdBaseJitType = node->GetSimdBaseJitType();
var_types simdBaseType = node->GetSimdBaseType();
unsigned simdSize = node->GetSimdSize();
var_types simdType = Compiler::getSIMDTypeForSize(simdSize);
assert((intrinsicId == NI_Vector64_Dot) || (intrinsicId == NI_Vector128_Dot));
assert(varTypeIsSIMD(simdType));
assert(varTypeIsArithmetic(simdBaseType));
assert(simdSize != 0);
GenTree* op1 = node->Op(1);
GenTree* op2 = node->Op(2);
// Spare GenTrees to be used for the lowering logic below
// Defined upfront to avoid naming conflicts, etc...
GenTree* idx = nullptr;
GenTree* tmp1 = nullptr;
GenTree* tmp2 = nullptr;
if (simdSize == 12)
{
assert(simdBaseType == TYP_FLOAT);
// For 12 byte SIMD, we need to clear the upper 4 bytes:
// idx = CNS_INT int 0x03
// tmp1 = * CNS_DLB float 0.0
// /--* op1 simd16
// +--* idx int
// +--* tmp1 simd16
// op1 = * HWINTRINSIC simd16 T Insert
// ...
// This is roughly the following managed code:
// op1 = AdvSimd.Insert(op1, 0x03, 0.0f);
// ...
idx = comp->gtNewIconNode(0x03, TYP_INT);
BlockRange().InsertAfter(op1, idx);
tmp1 = comp->gtNewZeroConNode(TYP_FLOAT);
BlockRange().InsertAfter(idx, tmp1);
LowerNode(tmp1);
op1 = comp->gtNewSimdHWIntrinsicNode(simdType, op1, idx, tmp1, NI_AdvSimd_Insert, simdBaseJitType, simdSize);
BlockRange().InsertAfter(tmp1, op1);
LowerNode(op1);
idx = comp->gtNewIconNode(0x03, TYP_INT);
BlockRange().InsertAfter(op2, idx);
tmp2 = comp->gtNewZeroConNode(TYP_FLOAT);
BlockRange().InsertAfter(idx, tmp2);
LowerNode(tmp2);
op2 = comp->gtNewSimdHWIntrinsicNode(simdType, op2, idx, tmp2, NI_AdvSimd_Insert, simdBaseJitType, simdSize);
BlockRange().InsertAfter(tmp2, op2);
LowerNode(op2);
}
// We will be constructing the following parts:
// ...
// /--* op1 simd16
// +--* op2 simd16
// tmp1 = * HWINTRINSIC simd16 T Multiply
// ...
// This is roughly the following managed code:
// ...
// var tmp1 = AdvSimd.Multiply(op1, op2);
// ...
NamedIntrinsic multiply = NI_AdvSimd_Multiply;
if (simdBaseType == TYP_DOUBLE)
{
multiply = (simdSize == 8) ? NI_AdvSimd_MultiplyScalar : NI_AdvSimd_Arm64_Multiply;
}
assert(!varTypeIsLong(simdBaseType));
tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, op1, op2, multiply, simdBaseJitType, simdSize);
BlockRange().InsertBefore(node, tmp1);
LowerNode(tmp1);
if (varTypeIsFloating(simdBaseType))
{
if ((simdSize != 8) || (simdBaseType == TYP_FLOAT))
{
// We will be constructing the following parts:
// ...
// /--* tmp1 simd16
// * STORE_LCL_VAR simd16
// tmp1 = LCL_VAR simd16
// tmp2 = LCL_VAR simd16
// ...
// This is roughly the following managed code:
// ...
// var tmp2 = tmp1;
// ...
node->Op(1) = tmp1;
LIR::Use tmp1Use(BlockRange(), &node->Op(1), node);
ReplaceWithLclVar(tmp1Use);
tmp1 = node->Op(1);
tmp2 = comp->gtClone(tmp1);
BlockRange().InsertAfter(tmp1, tmp2);
}
if (simdSize == 8)
{
if (simdBaseType == TYP_FLOAT)
{
// We will be constructing the following parts:
// ...
// /--* tmp1 simd8
// +--* tmp2 simd8
// tmp1 = * HWINTRINSIC simd8 T AddPairwise
// ...
// This is roughly the following managed code:
// ...
// var tmp1 = AdvSimd.AddPairwise(tmp1, tmp2);
// ...
tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, NI_AdvSimd_AddPairwise, simdBaseJitType,
simdSize);
BlockRange().InsertAfter(tmp2, tmp1);
LowerNode(tmp1);
}
else
{
// No pairs to add for double, as its a single element
}
}
else
{
assert((simdSize == 12) || (simdSize == 16));
// We will be constructing the following parts:
// ...
// /--* tmp1 simd16
// +--* tmp2 simd16
// tmp2 = * HWINTRINSIC simd16 T AddPairwise
// ...
// This is roughly the following managed code:
// ...
// var tmp1 = AdvSimd.Arm64.AddPairwise(tmp1, tmp2);
// ...
tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, NI_AdvSimd_Arm64_AddPairwise, simdBaseJitType,
simdSize);
BlockRange().InsertAfter(tmp2, tmp1);
LowerNode(tmp1);
if (simdBaseType == TYP_FLOAT)
{
// Float needs an additional pairwise add to finish summing the parts
// The first will have summed e0 with e1 and e2 with e3 and then repeats that for the upper half
// So, we will have a vector that looks like this:
// < e0 + e1, e2 + e3, e0 + e1, e2 + e3>
// Doing a second horizontal add with itself will then give us
// e0 + e1 + e2 + e3 in all elements of the vector
// We will be constructing the following parts:
// ...
// /--* tmp1 simd16
// * STORE_LCL_VAR simd16
// tmp1 = LCL_VAR simd16
// tmp2 = LCL_VAR simd16
// /--* tmp1 simd16
// +--* tmp2 simd16
// tmp2 = * HWINTRINSIC simd16 T AddPairwise
// ...
// This is roughly the following managed code:
// ...
// var tmp2 = tmp1;
// var tmp1 = AdvSimd.Arm64.AddPairwise(tmp1, tmp2);
// ...
node->Op(1) = tmp1;
LIR::Use tmp1Use(BlockRange(), &node->Op(1), node);
ReplaceWithLclVar(tmp1Use);
tmp1 = node->Op(1);
tmp2 = comp->gtClone(tmp1);
BlockRange().InsertAfter(tmp1, tmp2);
tmp1 = comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, NI_AdvSimd_Arm64_AddPairwise,
simdBaseJitType, simdSize);
BlockRange().InsertAfter(tmp2, tmp1);
LowerNode(tmp1);
}
}
tmp2 = tmp1;
}
else
{
assert(varTypeIsIntegral(simdBaseType));
if ((simdSize == 8) && ((simdBaseType == TYP_INT) || (simdBaseType == TYP_UINT)))
{
// We will be constructing the following parts:
// ...
// /--* tmp1 simd16
// * STORE_LCL_VAR simd16
// tmp1 = LCL_VAR simd16
// tmp2 = LCL_VAR simd16
// ...
// This is roughly the following managed code:
// ...
// var tmp2 = tmp1;
// ...
node->Op(1) = tmp1;
LIR::Use tmp1Use(BlockRange(), &node->Op(1), node);
ReplaceWithLclVar(tmp1Use);
tmp1 = node->Op(1);
tmp2 = comp->gtClone(tmp1);
BlockRange().InsertAfter(tmp1, tmp2);
// We will be constructing the following parts:
// ...
// /--* tmp1 simd16
// /--* tmp2 simd16
// tmp2 = * HWINTRINSIC simd8 T AddPairwise
// ...
// This is roughly the following managed code:
// ...
// var tmp2 = AdvSimd.AddPairwise(tmp1, tmp2);
// ...
tmp1 =
comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, tmp2, NI_AdvSimd_AddPairwise, simdBaseJitType, simdSize);
BlockRange().InsertAfter(tmp2, tmp1);
LowerNode(tmp1);
tmp2 = tmp1;
}
else
{
// We will be constructing the following parts:
// ...
// /--* tmp1 simd16
// tmp2 = * HWINTRINSIC simd16 T AddAcross
// ...
// This is roughly the following managed code:
// ...
// var tmp2 = AdvSimd.Arm64.AddAcross(tmp1);
// ...
tmp2 =
comp->gtNewSimdHWIntrinsicNode(simdType, tmp1, NI_AdvSimd_Arm64_AddAcross, simdBaseJitType, simdSize);
BlockRange().InsertAfter(tmp1, tmp2);
LowerNode(tmp2);
}
}
// We will be constructing the following parts:
// ...
// /--* tmp2 simd16
// node = * HWINTRINSIC simd16 T ToScalar
// This is roughly the following managed code:
// ...
// return tmp2.ToScalar();
node->ResetHWIntrinsicId((simdSize == 8) ? NI_Vector64_ToScalar : NI_Vector128_ToScalar, tmp2);
LowerNode(node);
return;
}
#endif // FEATURE_HW_INTRINSICS
//------------------------------------------------------------------------
// Containment analysis
//------------------------------------------------------------------------
//------------------------------------------------------------------------
// ContainCheckCallOperands: Determine whether operands of a call should be contained.
//
// Arguments:
// call - The call node of interest
//
// Return Value:
// None.
//
void Lowering::ContainCheckCallOperands(GenTreeCall* call)
{
// There are no contained operands for arm.
}
//------------------------------------------------------------------------
// ContainCheckStoreIndir: determine whether the sources of a STOREIND node should be contained.
//
// Arguments:
// node - pointer to the node
//
void Lowering::ContainCheckStoreIndir(GenTreeStoreInd* node)
{
#ifdef TARGET_ARM64
GenTree* src = node->Data();
if (src->IsIntegralConst(0))
{
// an integer zero for 'src' can be contained.
MakeSrcContained(node, src);
}
#endif // TARGET_ARM64
ContainCheckIndir(node);
}
//------------------------------------------------------------------------
// ContainCheckIndir: Determine whether operands of an indir should be contained.
//
// Arguments:
// indirNode - The indirection node of interest
//
// Notes:
// This is called for both store and load indirections.
//
// Return Value:
// None.
//
void Lowering::ContainCheckIndir(GenTreeIndir* indirNode)
{
// If this is the rhs of a block copy it will be handled when we handle the store.
if (indirNode->TypeGet() == TYP_STRUCT)
{
return;
}
#ifdef FEATURE_SIMD
// If indirTree is of TYP_SIMD12, don't mark addr as contained
// so that it always get computed to a register. This would
// mean codegen side logic doesn't need to handle all possible
// addr expressions that could be contained.
//
// TODO-ARM64-CQ: handle other addr mode expressions that could be marked
// as contained.
if (indirNode->TypeGet() == TYP_SIMD12)
{
return;
}
#endif // FEATURE_SIMD
GenTree* addr = indirNode->Addr();
if ((addr->OperGet() == GT_LEA) && IsSafeToContainMem(indirNode, addr))
{
bool makeContained = true;
#ifdef TARGET_ARM
// ARM floating-point load/store doesn't support a form similar to integer
// ldr Rdst, [Rbase + Roffset] with offset in a register. The only supported
// form is vldr Rdst, [Rbase + imm] with a more limited constraint on the imm.
GenTreeAddrMode* lea = addr->AsAddrMode();
int cns = lea->Offset();
if (lea->HasIndex() || !emitter::emitIns_valid_imm_for_vldst_offset(cns))
{
if (indirNode->OperGet() == GT_STOREIND)
{
if (varTypeIsFloating(indirNode->AsStoreInd()->Data()))
{
makeContained = false;
}
}
else if (indirNode->OperGet() == GT_IND)
{
if (varTypeIsFloating(indirNode))
{
makeContained = false;
}
}
}
#endif // TARGET_ARM
if (makeContained)
{
MakeSrcContained(indirNode, addr);
}
}
else if (addr->OperIs(GT_LCL_VAR_ADDR, GT_LCL_FLD_ADDR))
{
// These nodes go into an addr mode:
// - GT_LCL_VAR_ADDR, GT_LCL_FLD_ADDR is a stack addr mode.
MakeSrcContained(indirNode, addr);
}
#ifdef TARGET_ARM64
else if (addr->OperIs(GT_CLS_VAR_ADDR))
{
// These nodes go into an addr mode:
// - GT_CLS_VAR_ADDR turns into a constant.
// make this contained, it turns into a constant that goes into an addr mode
MakeSrcContained(indirNode, addr);
}
#endif // TARGET_ARM64
}
//------------------------------------------------------------------------
// ContainCheckBinary: Determine whether a binary op's operands should be contained.
//
// Arguments:
// node - the node we care about
//
void Lowering::ContainCheckBinary(GenTreeOp* node)
{
GenTree* op1 = node->gtGetOp1();
GenTree* op2 = node->gtGetOp2();
// Check and make op2 contained (if it is a containable immediate)
CheckImmedAndMakeContained(node, op2);
#ifdef TARGET_ARM64
if (comp->opts.OptimizationEnabled() && varTypeIsIntegral(node) && !node->isContained())
{
// Find "a * b + c" or "c + a * b" in order to emit MADD/MSUB
if (node->OperIs(GT_ADD) && !node->gtOverflow() && (op1->OperIs(GT_MUL) || op2->OperIs(GT_MUL)))
{
GenTree* mul;
GenTree* c;
if (op1->OperIs(GT_MUL))
{
mul = op1;
c = op2;
}
else
{
mul = op2;
c = op1;
}
GenTree* a = mul->gtGetOp1();
GenTree* b = mul->gtGetOp2();
if (!mul->isContained() && !mul->gtOverflow() && !a->isContained() && !b->isContained() &&
!c->isContained() && varTypeIsIntegral(mul))
{
if (a->OperIs(GT_NEG) && !a->gtGetOp1()->isContained() && !a->gtGetOp1()->IsRegOptional())
{
// "-a * b + c" to MSUB
MakeSrcContained(mul, a);
}
if (b->OperIs(GT_NEG) && !b->gtGetOp1()->isContained())
{
// "a * -b + c" to MSUB
MakeSrcContained(mul, b);
}
// If both 'a' and 'b' are GT_NEG - MADD will be emitted.
node->ChangeOper(GT_MADD);
MakeSrcContained(node, mul);
}
}
// Find "a - b * c" in order to emit MSUB
else if (node->OperIs(GT_SUB) && !node->gtOverflow() && op2->OperIs(GT_MUL) && !op2->isContained() &&
!op2->gtOverflow() && varTypeIsIntegral(op2))
{
GenTree* a = op1;
GenTree* b = op2->gtGetOp1();
GenTree* c = op2->gtGetOp2();
if (!a->isContained() && !b->isContained() && !c->isContained())
{
node->ChangeOper(GT_MSUB);
MakeSrcContained(node, op2);
}
}
}
// Change ADD TO ADDEX for ADD(X, CAST(Y)) or ADD(CAST(X), Y) where CAST is int->long
// or for ADD(LSH(X, CNS), X) or ADD(X, LSH(X, CNS)) where CNS is in the (0..typeWidth) range
if (node->OperIs(GT_ADD) && !op1->isContained() && !op2->isContained() && varTypeIsIntegral(node) &&
!node->gtOverflow())
{
assert(!node->isContained());
if (op1->OperIs(GT_CAST) || op2->OperIs(GT_CAST))
{
GenTree* cast = op1->OperIs(GT_CAST) ? op1 : op2;
if (cast->gtGetOp1()->TypeIs(TYP_INT) && cast->TypeIs(TYP_LONG) && !cast->gtOverflow())
{
node->ChangeOper(GT_ADDEX);
MakeSrcContained(node, cast);
}
}
else if (op1->OperIs(GT_LSH) || op2->OperIs(GT_LSH))
{
GenTree* lsh = op1->OperIs(GT_LSH) ? op1 : op2;
GenTree* shiftBy = lsh->gtGetOp2();
if (shiftBy->IsCnsIntOrI())
{
const ssize_t shiftByCns = shiftBy->AsIntCon()->IconValue();
const ssize_t maxShift = (ssize_t)genTypeSize(node) * BITS_IN_BYTE;
if ((shiftByCns > 0) && (shiftByCns < maxShift))
{
// shiftBy is small so it has to be contained at this point.
assert(shiftBy->isContained());
node->ChangeOper(GT_ADDEX);
MakeSrcContained(node, lsh);
}
}
}
}
#endif
}
//------------------------------------------------------------------------
// ContainCheckMul: Determine whether a mul op's operands should be contained.
//
// Arguments:
// node - the node we care about
//
void Lowering::ContainCheckMul(GenTreeOp* node)
{
ContainCheckBinary(node);
}
//------------------------------------------------------------------------
// ContainCheckDivOrMod: determine which operands of a div/mod should be contained.
//
// Arguments:
// node - the node we care about
//
void Lowering::ContainCheckDivOrMod(GenTreeOp* node)
{
assert(node->OperIs(GT_DIV, GT_UDIV, GT_MOD));
// ARM doesn't have a div instruction with an immediate operand
}
//------------------------------------------------------------------------
// ContainCheckShiftRotate: Determine whether a mul op's operands should be contained.
//
// Arguments:
// node - the node we care about
//
void Lowering::ContainCheckShiftRotate(GenTreeOp* node)
{
GenTree* shiftBy = node->gtOp2;
assert(node->OperIsShiftOrRotate());
#ifdef TARGET_ARM
GenTree* source = node->gtOp1;
if (node->OperIs(GT_LSH_HI, GT_RSH_LO))
{
assert(source->OperGet() == GT_LONG);
MakeSrcContained(node, source);
}
#endif // TARGET_ARM
if (shiftBy->IsCnsIntOrI())
{
MakeSrcContained(node, shiftBy);
}
}
//------------------------------------------------------------------------
// ContainCheckStoreLoc: determine whether the source of a STORE_LCL* should be contained.
//
// Arguments:
// node - pointer to the node
//
void Lowering::ContainCheckStoreLoc(GenTreeLclVarCommon* storeLoc) const
{
assert(storeLoc->OperIsLocalStore());
GenTree* op1 = storeLoc->gtGetOp1();
if (op1->OperIs(GT_BITCAST))
{
// If we know that the source of the bitcast will be in a register, then we can make
// the bitcast itself contained. This will allow us to store directly from the other
// type if this node doesn't get a register.
GenTree* bitCastSrc = op1->gtGetOp1();
if (!bitCastSrc->isContained() && !bitCastSrc->IsRegOptional())
{
op1->SetContained();
return;
}
}
const LclVarDsc* varDsc = comp->lvaGetDesc(storeLoc);
#ifdef FEATURE_SIMD
if (varTypeIsSIMD(storeLoc))
{
// If this is a store to memory, we can initialize a zero vector in memory from REG_ZR.
if ((op1->IsIntegralConst(0) || op1->IsSIMDZero()) && varDsc->lvDoNotEnregister)
{
MakeSrcContained(storeLoc, op1);
if (op1->IsSIMDZero())
{
MakeSrcContained(op1, op1->AsSIMD()->Op(1));
}
}
return;
}
#endif // FEATURE_SIMD
#ifdef TARGET_ARM64
if (IsContainableImmed(storeLoc, op1))
{
MakeSrcContained(storeLoc, op1);
}
#else
// If the source is a containable immediate, make it contained, unless it is
// an int-size or larger store of zero to memory, because we can generate smaller code
// by zeroing a register and then storing it.
var_types type = varDsc->GetRegisterType(storeLoc);
if (IsContainableImmed(storeLoc, op1) && (!op1->IsIntegralConst(0) || varTypeIsSmall(type)))
{
MakeSrcContained(storeLoc, op1);
}
else if (op1->OperGet() == GT_LONG)
{
MakeSrcContained(storeLoc, op1);
}
#endif // TARGET_ARM
}
//------------------------------------------------------------------------
// ContainCheckCast: determine whether the source of a CAST node should be contained.
//
// Arguments:
// node - pointer to the node
//
void Lowering::ContainCheckCast(GenTreeCast* node)
{
#ifdef TARGET_ARM
GenTree* castOp = node->CastOp();
var_types castToType = node->CastToType();
var_types srcType = castOp->TypeGet();
if (varTypeIsLong(castOp))
{
assert(castOp->OperGet() == GT_LONG);
MakeSrcContained(node, castOp);
}
#endif // TARGET_ARM
}
//------------------------------------------------------------------------
// ContainCheckCompare: determine whether the sources of a compare node should be contained.
//
// Arguments:
// node - pointer to the node
//
void Lowering::ContainCheckCompare(GenTreeOp* cmp)
{
CheckImmedAndMakeContained(cmp, cmp->gtOp2);
}
//------------------------------------------------------------------------
// ContainCheckBoundsChk: determine whether any source of a bounds check node should be contained.
//
// Arguments:
// node - pointer to the node
//
void Lowering::ContainCheckBoundsChk(GenTreeBoundsChk* node)
{
assert(node->OperIs(GT_BOUNDS_CHECK));
if (!CheckImmedAndMakeContained(node, node->GetIndex()))
{
CheckImmedAndMakeContained(node, node->GetArrayLength());
}
}
#ifdef FEATURE_SIMD
//----------------------------------------------------------------------------------------------
// ContainCheckSIMD: Perform containment analysis for a SIMD intrinsic node.
//
// Arguments:
// simdNode - The SIMD intrinsic node.
//
void Lowering::ContainCheckSIMD(GenTreeSIMD* simdNode)
{
switch (simdNode->GetSIMDIntrinsicId())
{
case SIMDIntrinsicInit:
{
GenTree* op1 = simdNode->Op(1);
if (op1->IsIntegralConst(0))
{
MakeSrcContained(simdNode, op1);
}
break;
}
case SIMDIntrinsicInitArray:
// We have an array and an index, which may be contained.
CheckImmedAndMakeContained(simdNode, simdNode->Op(2));
break;
default:
break;
}
}
#endif // FEATURE_SIMD
#ifdef FEATURE_HW_INTRINSICS
//----------------------------------------------------------------------------------------------
// ContainCheckHWIntrinsic: Perform containment analysis for a hardware intrinsic node.
//
// Arguments:
// node - The hardware intrinsic node.
//
void Lowering::ContainCheckHWIntrinsic(GenTreeHWIntrinsic* node)
{
const HWIntrinsic intrin(node);
const bool hasImmediateOperand = HWIntrinsicInfo::HasImmediateOperand(intrin.id);
if ((intrin.category == HW_Category_ShiftLeftByImmediate) ||
(intrin.category == HW_Category_ShiftRightByImmediate) ||
((intrin.category == HW_Category_SIMDByIndexedElement) && hasImmediateOperand))
{
switch (intrin.numOperands)
{
case 4:
assert(varTypeIsIntegral(intrin.op4));
if (intrin.op4->IsCnsIntOrI())
{
MakeSrcContained(node, intrin.op4);
}
break;
case 3:
assert(varTypeIsIntegral(intrin.op3));
if (intrin.op3->IsCnsIntOrI())
{
MakeSrcContained(node, intrin.op3);
}
break;
case 2:
assert(varTypeIsIntegral(intrin.op2));
if (intrin.op2->IsCnsIntOrI())
{
MakeSrcContained(node, intrin.op2);
}
break;
default:
unreached();
}
}
else if (hasImmediateOperand || HWIntrinsicInfo::SupportsContainment(intrin.id))
{
switch (intrin.id)
{
case NI_AdvSimd_DuplicateSelectedScalarToVector64:
case NI_AdvSimd_DuplicateSelectedScalarToVector128:
case NI_AdvSimd_Extract:
case NI_AdvSimd_InsertScalar:
case NI_AdvSimd_LoadAndInsertScalar:
case NI_AdvSimd_Arm64_DuplicateSelectedScalarToVector128:
assert(hasImmediateOperand);
assert(varTypeIsIntegral(intrin.op2));
if (intrin.op2->IsCnsIntOrI())
{
MakeSrcContained(node, intrin.op2);
}
break;
case NI_AdvSimd_ExtractVector64:
case NI_AdvSimd_ExtractVector128:
case NI_AdvSimd_StoreSelectedScalar:
assert(hasImmediateOperand);
assert(varTypeIsIntegral(intrin.op3));
if (intrin.op3->IsCnsIntOrI())
{
MakeSrcContained(node, intrin.op3);
}
break;
case NI_AdvSimd_Insert:
assert(hasImmediateOperand);
assert(varTypeIsIntegral(intrin.op2));
if (intrin.op2->IsCnsIntOrI())
{
MakeSrcContained(node, intrin.op2);
if ((intrin.op2->AsIntCon()->gtIconVal == 0) && intrin.op3->IsCnsFltOrDbl())
{
assert(varTypeIsFloating(intrin.baseType));
const double dataValue = intrin.op3->AsDblCon()->gtDconVal;
if (comp->GetEmitter()->emitIns_valid_imm_for_fmov(dataValue))
{
MakeSrcContained(node, intrin.op3);
}
}
}
break;
case NI_AdvSimd_Arm64_InsertSelectedScalar:
assert(hasImmediateOperand);
assert(intrin.op2->IsCnsIntOrI());
assert(intrin.op4->IsCnsIntOrI());
MakeSrcContained(node, intrin.op2);
MakeSrcContained(node, intrin.op4);
break;
case NI_AdvSimd_CompareEqual:
case NI_AdvSimd_Arm64_CompareEqual:
case NI_AdvSimd_Arm64_CompareEqualScalar:
{
if (intrin.op1->IsVectorZero())
{
GenTree* op1 = intrin.op1;
GenTree* op2 = intrin.op2;
assert(HWIntrinsicInfo::IsCommutative(intrin.id));
MakeSrcContained(node, op1);
// Swap the operands here to make the containment checks in codegen simpler
node->Op(1) = op2;
node->Op(2) = op1;
}
else if (intrin.op2->IsVectorZero())
{
MakeSrcContained(node, intrin.op2);
}
break;
}
case NI_AdvSimd_CompareGreaterThan:
case NI_AdvSimd_CompareGreaterThanOrEqual:
case NI_AdvSimd_Arm64_CompareGreaterThan:
case NI_AdvSimd_Arm64_CompareGreaterThanOrEqual:
case NI_AdvSimd_Arm64_CompareGreaterThanScalar:
case NI_AdvSimd_Arm64_CompareGreaterThanOrEqualScalar:
{
// Containment is not supported for unsigned base types as the corresponding instructions:
// - cmhi
// - cmhs
// require both operands; they do not have a 'with zero'.
if (intrin.op2->IsVectorZero() && !varTypeIsUnsigned(intrin.baseType))
{
MakeSrcContained(node, intrin.op2);
}
break;
}
case NI_Vector64_CreateScalarUnsafe:
case NI_Vector128_CreateScalarUnsafe:
case NI_AdvSimd_DuplicateToVector64:
case NI_AdvSimd_DuplicateToVector128:
case NI_AdvSimd_Arm64_DuplicateToVector64:
case NI_AdvSimd_Arm64_DuplicateToVector128:
if (IsValidConstForMovImm(node))
{
MakeSrcContained(node, node->Op(1));
}
break;
case NI_Vector64_GetElement:
case NI_Vector128_GetElement:
{
assert(hasImmediateOperand);
assert(varTypeIsIntegral(intrin.op2));
if (intrin.op2->IsCnsIntOrI())
{
MakeSrcContained(node, intrin.op2);
}
if (IsContainableMemoryOp(intrin.op1))
{
MakeSrcContained(node, intrin.op1);
if (intrin.op1->OperIs(GT_IND))
{
intrin.op1->AsIndir()->Addr()->ClearContained();
}
}
break;
}
default:
unreached();
}
}
else if ((intrin.id == NI_AdvSimd_LoadVector128) || (intrin.id == NI_AdvSimd_LoadVector64))
{
assert(intrin.numOperands == 1);
assert(HWIntrinsicInfo::lookupCategory(intrin.id) == HW_Category_MemoryLoad);
GenTree* addr = node->Op(1);
if (TryCreateAddrMode(addr, true, node) && IsSafeToContainMem(node, addr))
{
assert(addr->OperIs(GT_LEA));
MakeSrcContained(node, addr);
}
}
}
#endif // FEATURE_HW_INTRINSICS
#endif // TARGET_ARMARCH
| 1 |
dotnet/runtime
| 66,407 |
ARM64 - Optimizing a % b operations part 2
|
Addressing part of this issue: https://github.com/dotnet/runtime/issues/34937
**Description**
There are various ways to optimize `%` for integers on ARM64.
`a % b` can be transformed into a specific sequence of instructions for ARM64 if the operation is signed and `b` is a constant with the power of 2.
**Acceptance Criteria**
- [x] Merge https://github.com/dotnet/runtime/pull/65535
- [x] Add Tests
ARM64 diffs based on tests
```diff
- asr w1, w0, #31
- and w1, w1, #15
- add w1, w1, w0
- asr w1, w1, #4
- lsl w1, w1, #4
- sub w0, w0, w1
- ;; bbWeight=1 PerfScore 4.50
+ and w1, w0, #15
+ negs w0, w0
+ and w0, w0, #15
+ csneg w0, w1, w0, mi
+ ;; bbWeight=1 PerfScore 2.00
```
|
TIHan
| 2022-03-09T20:33:02Z | 2022-04-13T20:01:26Z |
3b6a539c7219383ec6181a112465f904fe9e2edb
|
a81f4bc9d53dedefe03f49da53ed198abbf9c467
|
ARM64 - Optimizing a % b operations part 2. Addressing part of this issue: https://github.com/dotnet/runtime/issues/34937
**Description**
There are various ways to optimize `%` for integers on ARM64.
`a % b` can be transformed into a specific sequence of instructions for ARM64 if the operation is signed and `b` is a constant with the power of 2.
**Acceptance Criteria**
- [x] Merge https://github.com/dotnet/runtime/pull/65535
- [x] Add Tests
ARM64 diffs based on tests
```diff
- asr w1, w0, #31
- and w1, w1, #15
- add w1, w1, w0
- asr w1, w1, #4
- lsl w1, w1, #4
- sub w0, w0, w1
- ;; bbWeight=1 PerfScore 4.50
+ and w1, w0, #15
+ negs w0, w0
+ and w0, w0, #15
+ csneg w0, w1, w0, mi
+ ;; bbWeight=1 PerfScore 2.00
```
|
./src/coreclr/jit/morph.cpp
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Morph XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#include "allocacheck.h" // for alloca
// Convert the given node into a call to the specified helper passing
// the given argument list.
//
// Tries to fold constants and also adds an edge for overflow exception
// returns the morphed tree
GenTree* Compiler::fgMorphCastIntoHelper(GenTree* tree, int helper, GenTree* oper)
{
GenTree* result;
/* If the operand is a constant, we'll try to fold it */
if (oper->OperIsConst())
{
GenTree* oldTree = tree;
tree = gtFoldExprConst(tree); // This may not fold the constant (NaN ...)
if (tree != oldTree)
{
return fgMorphTree(tree);
}
else if (tree->OperIsConst())
{
return fgMorphConst(tree);
}
// assert that oper is unchanged and that it is still a GT_CAST node
noway_assert(tree->AsCast()->CastOp() == oper);
noway_assert(tree->gtOper == GT_CAST);
}
result = fgMorphIntoHelperCall(tree, helper, gtNewCallArgs(oper));
assert(result == tree);
return result;
}
/*****************************************************************************
*
* Convert the given node into a call to the specified helper passing
* the given argument list.
*/
GenTree* Compiler::fgMorphIntoHelperCall(GenTree* tree, int helper, GenTreeCall::Use* args, bool morphArgs)
{
// The helper call ought to be semantically equivalent to the original node, so preserve its VN.
tree->ChangeOper(GT_CALL, GenTree::PRESERVE_VN);
GenTreeCall* call = tree->AsCall();
call->gtCallType = CT_HELPER;
call->gtReturnType = tree->TypeGet();
call->gtCallMethHnd = eeFindHelper(helper);
call->gtCallThisArg = nullptr;
call->gtCallArgs = args;
call->gtCallLateArgs = nullptr;
call->fgArgInfo = nullptr;
call->gtRetClsHnd = nullptr;
call->gtCallMoreFlags = GTF_CALL_M_EMPTY;
call->gtInlineCandidateInfo = nullptr;
call->gtControlExpr = nullptr;
call->gtRetBufArg = nullptr;
#ifdef UNIX_X86_ABI
call->gtFlags |= GTF_CALL_POP_ARGS;
#endif // UNIX_X86_ABI
#if DEBUG
// Helper calls are never candidates.
call->gtInlineObservation = InlineObservation::CALLSITE_IS_CALL_TO_HELPER;
call->callSig = nullptr;
#endif // DEBUG
#ifdef FEATURE_READYTORUN
call->gtEntryPoint.addr = nullptr;
call->gtEntryPoint.accessType = IAT_VALUE;
#endif
#if FEATURE_MULTIREG_RET
call->ResetReturnType();
call->ClearOtherRegs();
call->ClearOtherRegFlags();
#ifndef TARGET_64BIT
if (varTypeIsLong(tree))
{
call->InitializeLongReturnType();
}
#endif // !TARGET_64BIT
#endif // FEATURE_MULTIREG_RET
if (tree->OperMayThrow(this))
{
tree->gtFlags |= GTF_EXCEPT;
}
else
{
tree->gtFlags &= ~GTF_EXCEPT;
}
tree->gtFlags |= GTF_CALL;
for (GenTreeCall::Use& use : GenTreeCall::UseList(args))
{
tree->gtFlags |= (use.GetNode()->gtFlags & GTF_ALL_EFFECT);
}
/* Perform the morphing */
if (morphArgs)
{
tree = fgMorphArgs(call);
}
return tree;
}
//------------------------------------------------------------------------
// fgMorphExpandCast: Performs the pre-order (required) morphing for a cast.
//
// Performs a rich variety of pre-order transformations (and some optimizations).
//
// Notably:
// 1. Splits long -> small type casts into long -> int -> small type
// for 32 bit targets. Does the same for float/double -> small type
// casts for all targets.
// 2. Morphs casts not supported by the target directly into helpers.
// These mostly have to do with casts from and to floating point
// types, especially checked ones. Refer to the implementation for
// what specific casts need to be handled - it is a complex matrix.
// 3. "Casts away" the GC-ness of a tree (for CAST(nint <- byref)) via
// assigning the GC tree to an inline - COMMA(ASG, LCL_VAR) - non-GC
// temporary.
// 3. "Pushes down" truncating long -> int casts for some operations:
// CAST(int <- MUL(long, long)) => MUL(CAST(int <- long), CAST(int <- long)).
// The purpose of this is to allow "optNarrowTree" in the post-order
// traversal to fold the tree into a TYP_INT one, which helps 32 bit
// targets (and AMD64 too since 32 bit instructions are more compact).
// TODO-Arm64-CQ: Re-evaluate the value of this optimization for ARM64.
//
// Arguments:
// tree - the cast tree to morph
//
// Return Value:
// The fully morphed tree, or "nullptr" if it needs further morphing,
// in which case the cast may be transformed into an unchecked one
// and its operand changed (the cast "expanded" into two).
//
GenTree* Compiler::fgMorphExpandCast(GenTreeCast* tree)
{
GenTree* oper = tree->CastOp();
if (fgGlobalMorph && (oper->gtOper == GT_ADDR))
{
// Make sure we've checked if 'oper' is an address of an implicit-byref parameter.
// If it is, fgMorphImplicitByRefArgs will change its type, and we want the cast
// morphing code to see that type.
fgMorphImplicitByRefArgs(oper);
}
var_types srcType = genActualType(oper);
var_types dstType = tree->CastToType();
unsigned dstSize = genTypeSize(dstType);
// See if the cast has to be done in two steps. R -> I
if (varTypeIsFloating(srcType) && varTypeIsIntegral(dstType))
{
if (srcType == TYP_FLOAT
#if defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)
// Arm64: src = float, dst is overflow conversion.
// This goes through helper and hence src needs to be converted to double.
&& tree->gtOverflow()
#elif defined(TARGET_AMD64)
// Amd64: src = float, dst = uint64 or overflow conversion.
// This goes through helper and hence src needs to be converted to double.
&& (tree->gtOverflow() || (dstType == TYP_ULONG))
#elif defined(TARGET_ARM)
// Arm: src = float, dst = int64/uint64 or overflow conversion.
&& (tree->gtOverflow() || varTypeIsLong(dstType))
#else
// x86: src = float, dst = uint32/int64/uint64 or overflow conversion.
&& (tree->gtOverflow() || varTypeIsLong(dstType) || (dstType == TYP_UINT))
#endif
)
{
oper = gtNewCastNode(TYP_DOUBLE, oper, false, TYP_DOUBLE);
}
// Do we need to do it in two steps R -> I -> smallType?
if (dstSize < genTypeSize(TYP_INT))
{
oper = gtNewCastNodeL(TYP_INT, oper, /* fromUnsigned */ false, TYP_INT);
oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT));
tree->AsCast()->CastOp() = oper;
// We must not mistreat the original cast, which was from a floating point type,
// as from an unsigned type, since we now have a TYP_INT node for the source and
// CAST_OVF(BYTE <- INT) != CAST_OVF(BYTE <- UINT).
assert(!tree->IsUnsigned());
}
else
{
if (!tree->gtOverflow())
{
// ARM64 and LoongArch64 optimize all non-overflow checking conversions
#if defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)
return nullptr;
#else
switch (dstType)
{
case TYP_INT:
return nullptr;
case TYP_UINT:
#if defined(TARGET_ARM) || defined(TARGET_AMD64)
return nullptr;
#else // TARGET_X86
return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2UINT, oper);
#endif // TARGET_X86
case TYP_LONG:
#ifdef TARGET_AMD64
// SSE2 has instructions to convert a float/double directly to a long
return nullptr;
#else // !TARGET_AMD64
return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2LNG, oper);
#endif // !TARGET_AMD64
case TYP_ULONG:
return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2ULNG, oper);
default:
unreached();
}
#endif // TARGET_ARM64 || TARGET_LOONGARCH64
}
else
{
switch (dstType)
{
case TYP_INT:
return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2INT_OVF, oper);
case TYP_UINT:
return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2UINT_OVF, oper);
case TYP_LONG:
return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2LNG_OVF, oper);
case TYP_ULONG:
return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2ULNG_OVF, oper);
default:
unreached();
}
}
}
}
#ifndef TARGET_64BIT
// The code generation phase (for x86 & ARM32) does not handle casts
// directly from [u]long to anything other than [u]int. Insert an
// intermediate cast to native int.
else if (varTypeIsLong(srcType) && varTypeIsSmall(dstType))
{
oper = gtNewCastNode(TYP_I_IMPL, oper, tree->IsUnsigned(), TYP_I_IMPL);
oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT));
tree->ClearUnsigned();
tree->AsCast()->CastOp() = oper;
}
#endif //! TARGET_64BIT
#ifdef TARGET_ARMARCH
// AArch, unlike x86/amd64, has instructions that can cast directly from
// all integers (except for longs on AArch32 of course) to floats.
// Because there is no IL instruction conv.r4.un, uint/ulong -> float
// casts are always imported as CAST(float <- CAST(double <- uint/ulong)).
// We can eliminate the redundant intermediate cast as an optimization.
else if ((dstType == TYP_FLOAT) && (srcType == TYP_DOUBLE) && oper->OperIs(GT_CAST)
#ifdef TARGET_ARM
&& !varTypeIsLong(oper->AsCast()->CastOp())
#endif
)
{
oper->gtType = TYP_FLOAT;
oper->CastToType() = TYP_FLOAT;
return fgMorphTree(oper);
}
#endif // TARGET_ARMARCH
#ifdef TARGET_ARM
// converts long/ulong --> float/double casts into helper calls.
else if (varTypeIsFloating(dstType) && varTypeIsLong(srcType))
{
if (dstType == TYP_FLOAT)
{
// there is only a double helper, so we
// - change the dsttype to double
// - insert a cast from double to float
// - recurse into the resulting tree
tree->CastToType() = TYP_DOUBLE;
tree->gtType = TYP_DOUBLE;
tree = gtNewCastNode(TYP_FLOAT, tree, false, TYP_FLOAT);
return fgMorphTree(tree);
}
if (tree->gtFlags & GTF_UNSIGNED)
return fgMorphCastIntoHelper(tree, CORINFO_HELP_ULNG2DBL, oper);
return fgMorphCastIntoHelper(tree, CORINFO_HELP_LNG2DBL, oper);
}
#endif // TARGET_ARM
#ifdef TARGET_AMD64
// Do we have to do two step U4/8 -> R4/8 ?
// Codegen supports the following conversion as one-step operation
// a) Long -> R4/R8
// b) U8 -> R8
//
// The following conversions are performed as two-step operations using above.
// U4 -> R4/8 = U4-> Long -> R4/8
// U8 -> R4 = U8 -> R8 -> R4
else if (tree->IsUnsigned() && varTypeIsFloating(dstType))
{
srcType = varTypeToUnsigned(srcType);
if (srcType == TYP_ULONG)
{
if (dstType == TYP_FLOAT)
{
// Codegen can handle U8 -> R8 conversion.
// U8 -> R4 = U8 -> R8 -> R4
// - change the dsttype to double
// - insert a cast from double to float
// - recurse into the resulting tree
tree->CastToType() = TYP_DOUBLE;
tree->gtType = TYP_DOUBLE;
tree = gtNewCastNode(TYP_FLOAT, tree, false, TYP_FLOAT);
return fgMorphTree(tree);
}
}
else if (srcType == TYP_UINT)
{
oper = gtNewCastNode(TYP_LONG, oper, true, TYP_LONG);
oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT));
tree->ClearUnsigned();
tree->CastOp() = oper;
}
}
#endif // TARGET_AMD64
#ifdef TARGET_X86
// Do we have to do two step U4/8 -> R4/8 ?
else if (tree->IsUnsigned() && varTypeIsFloating(dstType))
{
srcType = varTypeToUnsigned(srcType);
if (srcType == TYP_ULONG)
{
return fgMorphCastIntoHelper(tree, CORINFO_HELP_ULNG2DBL, oper);
}
else if (srcType == TYP_UINT)
{
oper = gtNewCastNode(TYP_LONG, oper, true, TYP_LONG);
oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT));
tree->gtFlags &= ~GTF_UNSIGNED;
return fgMorphCastIntoHelper(tree, CORINFO_HELP_LNG2DBL, oper);
}
}
else if (((tree->gtFlags & GTF_UNSIGNED) == 0) && (srcType == TYP_LONG) && varTypeIsFloating(dstType))
{
oper = fgMorphCastIntoHelper(tree, CORINFO_HELP_LNG2DBL, oper);
// Since we don't have a Jit Helper that converts to a TYP_FLOAT
// we just use the one that converts to a TYP_DOUBLE
// and then add a cast to TYP_FLOAT
//
if ((dstType == TYP_FLOAT) && (oper->OperGet() == GT_CALL))
{
// Fix the return type to be TYP_DOUBLE
//
oper->gtType = TYP_DOUBLE;
// Add a Cast to TYP_FLOAT
//
tree = gtNewCastNode(TYP_FLOAT, oper, false, TYP_FLOAT);
INDEBUG(tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
return tree;
}
else
{
return oper;
}
}
#endif // TARGET_X86
else if (varTypeIsGC(srcType) != varTypeIsGC(dstType))
{
// We are casting away GC information. we would like to just
// change the type to int, however this gives the emitter fits because
// it believes the variable is a GC variable at the beginning of the
// instruction group, but is not turned non-gc by the code generator
// we fix this by copying the GC pointer to a non-gc pointer temp.
noway_assert(!varTypeIsGC(dstType) && "How can we have a cast to a GCRef here?");
// We generate an assignment to an int and then do the cast from an int. With this we avoid
// the gc problem and we allow casts to bytes, longs, etc...
unsigned lclNum = lvaGrabTemp(true DEBUGARG("Cast away GC"));
oper->gtType = TYP_I_IMPL;
GenTree* asg = gtNewTempAssign(lclNum, oper);
oper->gtType = srcType;
// do the real cast
GenTree* cast = gtNewCastNode(tree->TypeGet(), gtNewLclvNode(lclNum, TYP_I_IMPL), false, dstType);
// Generate the comma tree
oper = gtNewOperNode(GT_COMMA, tree->TypeGet(), asg, cast);
return fgMorphTree(oper);
}
// Look for narrowing casts ([u]long -> [u]int) and try to push them
// down into the operand before morphing it.
//
// It doesn't matter if this is cast is from ulong or long (i.e. if
// GTF_UNSIGNED is set) because the transformation is only applied to
// overflow-insensitive narrowing casts, which always silently truncate.
//
// Note that casts from [u]long to small integer types are handled above.
if ((srcType == TYP_LONG) && ((dstType == TYP_INT) || (dstType == TYP_UINT)))
{
// As a special case, look for overflow-sensitive casts of an AND
// expression, and see if the second operand is a small constant. Since
// the result of an AND is bound by its smaller operand, it may be
// possible to prove that the cast won't overflow, which will in turn
// allow the cast's operand to be transformed.
if (tree->gtOverflow() && (oper->OperGet() == GT_AND))
{
GenTree* andOp2 = oper->AsOp()->gtOp2;
// Look for a constant less than 2^{32} for a cast to uint, or less
// than 2^{31} for a cast to int.
int maxWidth = (dstType == TYP_UINT) ? 32 : 31;
if ((andOp2->OperGet() == GT_CNS_NATIVELONG) && ((andOp2->AsIntConCommon()->LngValue() >> maxWidth) == 0))
{
tree->ClearOverflow();
tree->SetAllEffectsFlags(oper);
}
}
// Only apply this transformation during global morph,
// when neither the cast node nor the oper node may throw an exception
// based on the upper 32 bits.
//
if (fgGlobalMorph && !tree->gtOverflow() && !oper->gtOverflowEx())
{
// For these operations the lower 32 bits of the result only depends
// upon the lower 32 bits of the operands.
//
bool canPushCast = oper->OperIs(GT_ADD, GT_SUB, GT_MUL, GT_AND, GT_OR, GT_XOR, GT_NOT, GT_NEG);
// For long LSH cast to int, there is a discontinuity in behavior
// when the shift amount is 32 or larger.
//
// CAST(INT, LSH(1LL, 31)) == LSH(1, 31)
// LSH(CAST(INT, 1LL), CAST(INT, 31)) == LSH(1, 31)
//
// CAST(INT, LSH(1LL, 32)) == 0
// LSH(CAST(INT, 1LL), CAST(INT, 32)) == LSH(1, 32) == LSH(1, 0) == 1
//
// So some extra validation is needed.
//
if (oper->OperIs(GT_LSH))
{
GenTree* shiftAmount = oper->AsOp()->gtOp2;
// Expose constant value for shift, if possible, to maximize the number
// of cases we can handle.
shiftAmount = gtFoldExpr(shiftAmount);
oper->AsOp()->gtOp2 = shiftAmount;
#if DEBUG
// We may remorph the shift amount tree again later, so clear any morphed flag.
shiftAmount->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED;
#endif // DEBUG
if (shiftAmount->IsIntegralConst())
{
const ssize_t shiftAmountValue = shiftAmount->AsIntCon()->IconValue();
if ((shiftAmountValue >= 64) || (shiftAmountValue < 0))
{
// Shift amount is large enough or negative so result is undefined.
// Don't try to optimize.
assert(!canPushCast);
}
else if (shiftAmountValue >= 32)
{
// We know that we have a narrowing cast ([u]long -> [u]int)
// and that we are casting to a 32-bit value, which will result in zero.
//
// Check to see if we have any side-effects that we must keep
//
if ((tree->gtFlags & GTF_ALL_EFFECT) == 0)
{
// Result of the shift is zero.
DEBUG_DESTROY_NODE(tree);
GenTree* zero = gtNewZeroConNode(TYP_INT);
return fgMorphTree(zero);
}
else // We do have a side-effect
{
// We could create a GT_COMMA node here to keep the side-effect and return a zero
// Instead we just don't try to optimize this case.
canPushCast = false;
}
}
else
{
// Shift amount is positive and small enough that we can push the cast through.
canPushCast = true;
}
}
else
{
// Shift amount is unknown. We can't optimize this case.
assert(!canPushCast);
}
}
if (canPushCast)
{
DEBUG_DESTROY_NODE(tree);
// Insert narrowing casts for op1 and op2.
oper->AsOp()->gtOp1 = gtNewCastNode(TYP_INT, oper->AsOp()->gtOp1, false, dstType);
if (oper->AsOp()->gtOp2 != nullptr)
{
oper->AsOp()->gtOp2 = gtNewCastNode(TYP_INT, oper->AsOp()->gtOp2, false, dstType);
}
// Clear the GT_MUL_64RSLT if it is set.
if (oper->gtOper == GT_MUL && (oper->gtFlags & GTF_MUL_64RSLT))
{
oper->gtFlags &= ~GTF_MUL_64RSLT;
}
// The operation now produces a 32-bit result.
oper->gtType = TYP_INT;
// Remorph the new tree as the casts that we added may be folded away.
return fgMorphTree(oper);
}
}
}
return nullptr;
}
#ifdef DEBUG
const char* getNonStandardArgKindName(NonStandardArgKind kind)
{
switch (kind)
{
case NonStandardArgKind::None:
return "None";
case NonStandardArgKind::PInvokeFrame:
return "PInvokeFrame";
case NonStandardArgKind::PInvokeTarget:
return "PInvokeTarget";
case NonStandardArgKind::PInvokeCookie:
return "PInvokeCookie";
case NonStandardArgKind::WrapperDelegateCell:
return "WrapperDelegateCell";
case NonStandardArgKind::ShiftLow:
return "ShiftLow";
case NonStandardArgKind::ShiftHigh:
return "ShiftHigh";
case NonStandardArgKind::FixedRetBuffer:
return "FixedRetBuffer";
case NonStandardArgKind::VirtualStubCell:
return "VirtualStubCell";
case NonStandardArgKind::R2RIndirectionCell:
return "R2RIndirectionCell";
case NonStandardArgKind::ValidateIndirectCallTarget:
return "ValidateIndirectCallTarget";
default:
unreached();
}
}
void fgArgTabEntry::Dump() const
{
printf("fgArgTabEntry[arg %u", argNum);
printf(" %d.%s", GetNode()->gtTreeID, GenTree::OpName(GetNode()->OperGet()));
printf(" %s", varTypeName(argType));
printf(" (%s)", passedByRef ? "By ref" : "By value");
if (GetRegNum() != REG_STK)
{
printf(", %u reg%s:", numRegs, numRegs == 1 ? "" : "s");
for (unsigned i = 0; i < numRegs; i++)
{
printf(" %s", getRegName(regNums[i]));
}
}
if (GetStackByteSize() > 0)
{
#if defined(DEBUG_ARG_SLOTS)
printf(", numSlots=%u, slotNum=%u, byteSize=%u, byteOffset=%u", numSlots, slotNum, m_byteSize, m_byteOffset);
#else
printf(", byteSize=%u, byteOffset=%u", m_byteSize, m_byteOffset);
#endif
}
printf(", byteAlignment=%u", m_byteAlignment);
if (isLateArg())
{
printf(", lateArgInx=%u", GetLateArgInx());
}
if (IsSplit())
{
printf(", isSplit");
}
if (needTmp)
{
printf(", tmpNum=V%02u", tmpNum);
}
if (needPlace)
{
printf(", needPlace");
}
if (isTmp)
{
printf(", isTmp");
}
if (processed)
{
printf(", processed");
}
if (IsHfaRegArg())
{
printf(", isHfa(%s)", varTypeName(GetHfaType()));
}
if (isBackFilled)
{
printf(", isBackFilled");
}
if (nonStandardArgKind != NonStandardArgKind::None)
{
printf(", nonStandard[%s]", getNonStandardArgKindName(nonStandardArgKind));
}
if (isStruct)
{
printf(", isStruct");
}
printf("]\n");
}
#endif
fgArgInfo::fgArgInfo(Compiler* comp, GenTreeCall* call, unsigned numArgs)
{
compiler = comp;
callTree = call;
argCount = 0; // filled in arg count, starts at zero
DEBUG_ARG_SLOTS_ONLY(nextSlotNum = INIT_ARG_STACK_SLOT;)
nextStackByteOffset = INIT_ARG_STACK_SLOT * TARGET_POINTER_SIZE;
stkLevel = 0;
#if defined(UNIX_X86_ABI)
alignmentDone = false;
stkSizeBytes = 0;
padStkAlign = 0;
#endif
#if FEATURE_FIXED_OUT_ARGS
outArgSize = 0;
#endif
argTableSize = numArgs; // the allocated table size
hasRegArgs = false;
hasStackArgs = false;
argsComplete = false;
argsSorted = false;
needsTemps = false;
if (argTableSize == 0)
{
argTable = nullptr;
}
else
{
argTable = new (compiler, CMK_fgArgInfoPtrArr) fgArgTabEntry*[argTableSize];
}
}
/*****************************************************************************
*
* fgArgInfo Copy Constructor
*
* This method needs to act like a copy constructor for fgArgInfo.
* The newCall needs to have its fgArgInfo initialized such that
* we have newCall that is an exact copy of the oldCall.
* We have to take care since the argument information
* in the argTable contains pointers that must point to the
* new arguments and not the old arguments.
*/
fgArgInfo::fgArgInfo(GenTreeCall* newCall, GenTreeCall* oldCall)
{
fgArgInfo* oldArgInfo = oldCall->AsCall()->fgArgInfo;
compiler = oldArgInfo->compiler;
callTree = newCall;
argCount = 0; // filled in arg count, starts at zero
DEBUG_ARG_SLOTS_ONLY(nextSlotNum = INIT_ARG_STACK_SLOT;)
nextStackByteOffset = INIT_ARG_STACK_SLOT * TARGET_POINTER_SIZE;
stkLevel = oldArgInfo->stkLevel;
#if defined(UNIX_X86_ABI)
alignmentDone = oldArgInfo->alignmentDone;
stkSizeBytes = oldArgInfo->stkSizeBytes;
padStkAlign = oldArgInfo->padStkAlign;
#endif
#if FEATURE_FIXED_OUT_ARGS
outArgSize = oldArgInfo->outArgSize;
#endif
argTableSize = oldArgInfo->argTableSize;
argsComplete = false;
argTable = nullptr;
assert(oldArgInfo->argsComplete);
if (argTableSize > 0)
{
argTable = new (compiler, CMK_fgArgInfoPtrArr) fgArgTabEntry*[argTableSize];
// Copy the old arg entries
for (unsigned i = 0; i < argTableSize; i++)
{
argTable[i] = new (compiler, CMK_fgArgInfo) fgArgTabEntry(*oldArgInfo->argTable[i]);
}
// The copied arg entries contain pointers to old uses, they need
// to be updated to point to new uses.
if (newCall->gtCallThisArg != nullptr)
{
for (unsigned i = 0; i < argTableSize; i++)
{
if (argTable[i]->use == oldCall->gtCallThisArg)
{
argTable[i]->use = newCall->gtCallThisArg;
break;
}
}
}
GenTreeCall::UseIterator newUse = newCall->Args().begin();
GenTreeCall::UseIterator newUseEnd = newCall->Args().end();
GenTreeCall::UseIterator oldUse = oldCall->Args().begin();
GenTreeCall::UseIterator oldUseEnd = newCall->Args().end();
for (; newUse != newUseEnd; ++newUse, ++oldUse)
{
for (unsigned i = 0; i < argTableSize; i++)
{
if (argTable[i]->use == oldUse.GetUse())
{
argTable[i]->use = newUse.GetUse();
break;
}
}
}
newUse = newCall->LateArgs().begin();
newUseEnd = newCall->LateArgs().end();
oldUse = oldCall->LateArgs().begin();
oldUseEnd = newCall->LateArgs().end();
for (; newUse != newUseEnd; ++newUse, ++oldUse)
{
for (unsigned i = 0; i < argTableSize; i++)
{
if (argTable[i]->lateUse == oldUse.GetUse())
{
argTable[i]->lateUse = newUse.GetUse();
break;
}
}
}
}
argCount = oldArgInfo->argCount;
DEBUG_ARG_SLOTS_ONLY(nextSlotNum = oldArgInfo->nextSlotNum;)
nextStackByteOffset = oldArgInfo->nextStackByteOffset;
hasRegArgs = oldArgInfo->hasRegArgs;
hasStackArgs = oldArgInfo->hasStackArgs;
argsComplete = true;
argsSorted = true;
}
void fgArgInfo::AddArg(fgArgTabEntry* curArgTabEntry)
{
assert(argCount < argTableSize);
argTable[argCount] = curArgTabEntry;
argCount++;
}
fgArgTabEntry* fgArgInfo::AddRegArg(unsigned argNum,
GenTree* node,
GenTreeCall::Use* use,
regNumber regNum,
unsigned numRegs,
unsigned byteSize,
unsigned byteAlignment,
bool isStruct,
bool isFloatHfa,
bool isVararg /*=false*/)
{
fgArgTabEntry* curArgTabEntry = new (compiler, CMK_fgArgInfo) fgArgTabEntry;
// Any additional register numbers are set by the caller.
// This is primarily because on ARM we don't yet know if it
// will be split or if it is a double HFA, so the number of registers
// may actually be less.
curArgTabEntry->setRegNum(0, regNum);
curArgTabEntry->argNum = argNum;
curArgTabEntry->argType = node->TypeGet();
curArgTabEntry->use = use;
curArgTabEntry->lateUse = nullptr;
curArgTabEntry->numRegs = numRegs;
#if defined(DEBUG_ARG_SLOTS)
curArgTabEntry->slotNum = 0;
curArgTabEntry->numSlots = 0;
#endif
curArgTabEntry->SetLateArgInx(UINT_MAX);
curArgTabEntry->tmpNum = BAD_VAR_NUM;
curArgTabEntry->SetSplit(false);
curArgTabEntry->isTmp = false;
curArgTabEntry->needTmp = false;
curArgTabEntry->needPlace = false;
curArgTabEntry->processed = false;
if (GlobalJitOptions::compFeatureHfa)
{
curArgTabEntry->SetHfaElemKind(CORINFO_HFA_ELEM_NONE);
}
curArgTabEntry->isBackFilled = false;
curArgTabEntry->nonStandardArgKind = NonStandardArgKind::None;
curArgTabEntry->isStruct = isStruct;
curArgTabEntry->SetIsVararg(isVararg);
curArgTabEntry->SetByteAlignment(byteAlignment);
curArgTabEntry->SetByteSize(byteSize, isStruct, isFloatHfa);
curArgTabEntry->SetByteOffset(0);
#ifdef TARGET_LOONGARCH64
curArgTabEntry->structFloatFieldType[0] = TYP_UNDEF;
curArgTabEntry->structFloatFieldType[1] = TYP_UNDEF;
#endif
hasRegArgs = true;
if (argCount >= argTableSize)
{
fgArgTabEntry** oldTable = argTable;
argTable = new (compiler, CMK_fgArgInfoPtrArr) fgArgTabEntry*[argCount + 1];
memcpy(argTable, oldTable, argCount * sizeof(fgArgTabEntry*));
argTableSize++;
}
AddArg(curArgTabEntry);
return curArgTabEntry;
}
#if defined(UNIX_AMD64_ABI)
fgArgTabEntry* fgArgInfo::AddRegArg(unsigned argNum,
GenTree* node,
GenTreeCall::Use* use,
regNumber regNum,
unsigned numRegs,
unsigned byteSize,
unsigned byteAlignment,
const bool isStruct,
const bool isFloatHfa,
const bool isVararg,
const regNumber otherRegNum,
const unsigned structIntRegs,
const unsigned structFloatRegs,
const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* const structDescPtr)
{
fgArgTabEntry* curArgTabEntry =
AddRegArg(argNum, node, use, regNum, numRegs, byteSize, byteAlignment, isStruct, isFloatHfa, isVararg);
assert(curArgTabEntry != nullptr);
curArgTabEntry->isStruct = isStruct; // is this a struct arg
curArgTabEntry->structIntRegs = structIntRegs;
curArgTabEntry->structFloatRegs = structFloatRegs;
INDEBUG(curArgTabEntry->checkIsStruct();)
assert(numRegs <= 2);
if (numRegs == 2)
{
curArgTabEntry->setRegNum(1, otherRegNum);
}
if (isStruct && structDescPtr != nullptr)
{
curArgTabEntry->structDesc.CopyFrom(*structDescPtr);
}
return curArgTabEntry;
}
#endif // defined(UNIX_AMD64_ABI)
#if defined(TARGET_LOONGARCH64)
fgArgTabEntry* fgArgInfo::AddRegArg(unsigned argNum,
GenTree* node,
GenTreeCall::Use* use,
regNumber regNum,
unsigned numRegs,
unsigned byteSize,
unsigned byteAlignment,
bool isStruct,
bool isFloatHfa, /* unused */
bool isVararg,
const regNumber otherRegNum)
{
fgArgTabEntry* curArgTabEntry =
AddRegArg(argNum, node, use, regNum, numRegs, byteSize, byteAlignment, isStruct, false, isVararg);
assert(curArgTabEntry != nullptr);
INDEBUG(curArgTabEntry->checkIsStruct();)
assert(numRegs <= 2);
if (numRegs == 2)
{
curArgTabEntry->setRegNum(1, otherRegNum);
}
return curArgTabEntry;
}
#endif // defined(TARGET_LOONGARCH64)
fgArgTabEntry* fgArgInfo::AddStkArg(unsigned argNum,
GenTree* node,
GenTreeCall::Use* use,
unsigned numSlots,
unsigned byteSize,
unsigned byteAlignment,
bool isStruct,
bool isFloatHfa,
bool isVararg /*=false*/)
{
fgArgTabEntry* curArgTabEntry = new (compiler, CMK_fgArgInfo) fgArgTabEntry;
#if defined(DEBUG_ARG_SLOTS)
if (!compMacOsArm64Abi())
{
nextSlotNum = roundUp(nextSlotNum, byteAlignment / TARGET_POINTER_SIZE);
}
#endif
nextStackByteOffset = roundUp(nextStackByteOffset, byteAlignment);
DEBUG_ARG_SLOTS_ASSERT(nextStackByteOffset / TARGET_POINTER_SIZE == nextSlotNum);
curArgTabEntry->setRegNum(0, REG_STK);
curArgTabEntry->argNum = argNum;
curArgTabEntry->argType = node->TypeGet();
curArgTabEntry->use = use;
curArgTabEntry->lateUse = nullptr;
#if defined(DEBUG_ARG_SLOTS)
curArgTabEntry->numSlots = numSlots;
curArgTabEntry->slotNum = nextSlotNum;
#endif
curArgTabEntry->numRegs = 0;
#if defined(UNIX_AMD64_ABI)
curArgTabEntry->structIntRegs = 0;
curArgTabEntry->structFloatRegs = 0;
#endif // defined(UNIX_AMD64_ABI)
curArgTabEntry->SetLateArgInx(UINT_MAX);
curArgTabEntry->tmpNum = BAD_VAR_NUM;
curArgTabEntry->SetSplit(false);
curArgTabEntry->isTmp = false;
curArgTabEntry->needTmp = false;
curArgTabEntry->needPlace = false;
curArgTabEntry->processed = false;
if (GlobalJitOptions::compFeatureHfa)
{
curArgTabEntry->SetHfaElemKind(CORINFO_HFA_ELEM_NONE);
}
curArgTabEntry->isBackFilled = false;
curArgTabEntry->nonStandardArgKind = NonStandardArgKind::None;
curArgTabEntry->isStruct = isStruct;
curArgTabEntry->SetIsVararg(isVararg);
curArgTabEntry->SetByteAlignment(byteAlignment);
curArgTabEntry->SetByteSize(byteSize, isStruct, isFloatHfa);
curArgTabEntry->SetByteOffset(nextStackByteOffset);
hasStackArgs = true;
AddArg(curArgTabEntry);
DEBUG_ARG_SLOTS_ONLY(nextSlotNum += numSlots;)
nextStackByteOffset += curArgTabEntry->GetByteSize();
return curArgTabEntry;
}
void fgArgInfo::RemorphReset()
{
DEBUG_ARG_SLOTS_ONLY(nextSlotNum = INIT_ARG_STACK_SLOT;)
nextStackByteOffset = INIT_ARG_STACK_SLOT * TARGET_POINTER_SIZE;
}
//------------------------------------------------------------------------
// UpdateRegArg: Update the given fgArgTabEntry while morphing.
//
// Arguments:
// curArgTabEntry - the fgArgTabEntry to update.
// node - the tree node that defines the argument
// reMorphing - a boolean value indicate whether we are remorphing the call
//
// Assumptions:
// This must have already been determined to be at least partially passed in registers.
//
void fgArgInfo::UpdateRegArg(fgArgTabEntry* curArgTabEntry, GenTree* node, bool reMorphing)
{
bool isLateArg = curArgTabEntry->isLateArg();
// If this is a late arg, we'd better be updating it with a correctly marked node, and vice-versa.
assert((isLateArg && ((node->gtFlags & GTF_LATE_ARG) != 0)) ||
(!isLateArg && ((node->gtFlags & GTF_LATE_ARG) == 0)));
assert(curArgTabEntry->numRegs != 0);
assert(curArgTabEntry->use->GetNode() == node);
}
//------------------------------------------------------------------------
// UpdateStkArg: Update the given fgArgTabEntry while morphing.
//
// Arguments:
// curArgTabEntry - the fgArgTabEntry to update.
// node - the tree node that defines the argument
// reMorphing - a boolean value indicate whether we are remorphing the call
//
// Assumptions:
// This must have already been determined to be passed on the stack.
//
void fgArgInfo::UpdateStkArg(fgArgTabEntry* curArgTabEntry, GenTree* node, bool reMorphing)
{
bool isLateArg = curArgTabEntry->isLateArg();
// If this is a late arg, we'd better be updating it with a correctly marked node, and vice-versa.
assert((isLateArg && ((node->gtFlags & GTF_LATE_ARG) != 0)) ||
(!isLateArg && ((node->gtFlags & GTF_LATE_ARG) == 0)));
noway_assert(curArgTabEntry->use != callTree->gtCallThisArg);
assert((curArgTabEntry->GetRegNum() == REG_STK) || curArgTabEntry->IsSplit());
assert(curArgTabEntry->use->GetNode() == node);
#if defined(DEBUG_ARG_SLOTS)
if (!compMacOsArm64Abi())
{
nextSlotNum = roundUp(nextSlotNum, curArgTabEntry->GetByteAlignment() / TARGET_POINTER_SIZE);
assert(curArgTabEntry->slotNum == nextSlotNum);
nextSlotNum += curArgTabEntry->numSlots;
}
#endif
nextStackByteOffset = roundUp(nextStackByteOffset, curArgTabEntry->GetByteAlignment());
assert(curArgTabEntry->GetByteOffset() == nextStackByteOffset);
nextStackByteOffset += curArgTabEntry->GetStackByteSize();
}
void fgArgInfo::SplitArg(unsigned argNum, unsigned numRegs, unsigned numSlots)
{
fgArgTabEntry* curArgTabEntry = nullptr;
assert(argNum < argCount);
for (unsigned inx = 0; inx < argCount; inx++)
{
curArgTabEntry = argTable[inx];
if (curArgTabEntry->argNum == argNum)
{
break;
}
}
assert(numRegs > 0);
assert(numSlots > 0);
if (argsComplete)
{
assert(curArgTabEntry->IsSplit() == true);
assert(curArgTabEntry->numRegs == numRegs);
DEBUG_ARG_SLOTS_ONLY(assert(curArgTabEntry->numSlots == numSlots);)
assert(hasStackArgs == true);
}
else
{
curArgTabEntry->SetSplit(true);
curArgTabEntry->numRegs = numRegs;
DEBUG_ARG_SLOTS_ONLY(curArgTabEntry->numSlots = numSlots;)
curArgTabEntry->SetByteOffset(0);
hasStackArgs = true;
}
DEBUG_ARG_SLOTS_ONLY(nextSlotNum += numSlots;)
// TODO-Cleanup: structs are aligned to 8 bytes on arm64 apple, so it would work, but pass the precise size.
nextStackByteOffset += numSlots * TARGET_POINTER_SIZE;
}
//------------------------------------------------------------------------
// EvalToTmp: Replace the node in the given fgArgTabEntry with a temp
//
// Arguments:
// curArgTabEntry - the fgArgTabEntry for the argument
// tmpNum - the varNum for the temp
// newNode - the assignment of the argument value to the temp
//
// Notes:
// Although the name of this method is EvalToTmp, it doesn't actually create
// the temp or the copy.
//
void fgArgInfo::EvalToTmp(fgArgTabEntry* curArgTabEntry, unsigned tmpNum, GenTree* newNode)
{
assert(curArgTabEntry->use != callTree->gtCallThisArg);
assert(curArgTabEntry->use->GetNode() == newNode);
assert(curArgTabEntry->GetNode() == newNode);
curArgTabEntry->tmpNum = tmpNum;
curArgTabEntry->isTmp = true;
}
void fgArgInfo::ArgsComplete()
{
bool hasStructRegArg = false;
for (unsigned curInx = 0; curInx < argCount; curInx++)
{
fgArgTabEntry* curArgTabEntry = argTable[curInx];
assert(curArgTabEntry != nullptr);
GenTree* argx = curArgTabEntry->GetNode();
if (curArgTabEntry->GetRegNum() == REG_STK)
{
assert(hasStackArgs == true);
#if !FEATURE_FIXED_OUT_ARGS
// On x86 we use push instructions to pass arguments:
// The non-register arguments are evaluated and pushed in order
// and they are never evaluated into temps
//
continue;
#endif
}
#if FEATURE_ARG_SPLIT
else if (curArgTabEntry->IsSplit())
{
hasStructRegArg = true;
assert(hasStackArgs == true);
}
#endif // FEATURE_ARG_SPLIT
else // we have a register argument, next we look for a struct type.
{
if (varTypeIsStruct(argx) UNIX_AMD64_ABI_ONLY(|| curArgTabEntry->isStruct))
{
hasStructRegArg = true;
}
}
/* If the argument tree contains an assignment (GTF_ASG) then the argument and
and every earlier argument (except constants) must be evaluated into temps
since there may be other arguments that follow and they may use the value being assigned.
EXAMPLE: ArgTab is "a, a=5, a"
-> when we see the second arg "a=5"
we know the first two arguments "a, a=5" have to be evaluated into temps
For the case of an assignment, we only know that there exist some assignment someplace
in the tree. We don't know what is being assigned so we are very conservative here
and assume that any local variable could have been assigned.
*/
if (argx->gtFlags & GTF_ASG)
{
// If this is not the only argument, or it's a copyblk, or it already evaluates the expression to
// a tmp, then we need a temp in the late arg list.
if ((argCount > 1) || argx->OperIsCopyBlkOp()
#ifdef FEATURE_FIXED_OUT_ARGS
|| curArgTabEntry->isTmp // I protect this by "FEATURE_FIXED_OUT_ARGS" to preserve the property
// that we only have late non-register args when that feature is on.
#endif // FEATURE_FIXED_OUT_ARGS
)
{
curArgTabEntry->needTmp = true;
needsTemps = true;
}
// For all previous arguments, unless they are a simple constant
// we require that they be evaluated into temps
for (unsigned prevInx = 0; prevInx < curInx; prevInx++)
{
fgArgTabEntry* prevArgTabEntry = argTable[prevInx];
assert(prevArgTabEntry->argNum < curArgTabEntry->argNum);
if (!prevArgTabEntry->GetNode()->IsInvariant())
{
prevArgTabEntry->needTmp = true;
needsTemps = true;
}
}
}
bool treatLikeCall = ((argx->gtFlags & GTF_CALL) != 0);
#if FEATURE_FIXED_OUT_ARGS
// Like calls, if this argument has a tree that will do an inline throw,
// a call to a jit helper, then we need to treat it like a call (but only
// if there are/were any stack args).
// This means unnesting, sorting, etc. Technically this is overly
// conservative, but I want to avoid as much special-case debug-only code
// as possible, so leveraging the GTF_CALL flag is the easiest.
//
if (!treatLikeCall && (argx->gtFlags & GTF_EXCEPT) && (argCount > 1) && compiler->opts.compDbgCode &&
(compiler->fgWalkTreePre(&argx, Compiler::fgChkThrowCB) == Compiler::WALK_ABORT))
{
for (unsigned otherInx = 0; otherInx < argCount; otherInx++)
{
if (otherInx == curInx)
{
continue;
}
if (argTable[otherInx]->GetRegNum() == REG_STK)
{
treatLikeCall = true;
break;
}
}
}
#endif // FEATURE_FIXED_OUT_ARGS
/* If it contains a call (GTF_CALL) then itself and everything before the call
with a GLOB_EFFECT must eval to temp (this is because everything with SIDE_EFFECT
has to be kept in the right order since we will move the call to the first position)
For calls we don't have to be quite as conservative as we are with an assignment
since the call won't be modifying any non-address taken LclVars.
*/
if (treatLikeCall)
{
if (argCount > 1) // If this is not the only argument
{
curArgTabEntry->needTmp = true;
needsTemps = true;
}
else if (varTypeIsFloating(argx->TypeGet()) && (argx->OperGet() == GT_CALL))
{
// Spill all arguments that are floating point calls
curArgTabEntry->needTmp = true;
needsTemps = true;
}
// All previous arguments may need to be evaluated into temps
for (unsigned prevInx = 0; prevInx < curInx; prevInx++)
{
fgArgTabEntry* prevArgTabEntry = argTable[prevInx];
assert(prevArgTabEntry->argNum < curArgTabEntry->argNum);
// For all previous arguments, if they have any GTF_ALL_EFFECT
// we require that they be evaluated into a temp
if ((prevArgTabEntry->GetNode()->gtFlags & GTF_ALL_EFFECT) != 0)
{
prevArgTabEntry->needTmp = true;
needsTemps = true;
}
#if FEATURE_FIXED_OUT_ARGS
// Or, if they are stored into the FIXED_OUT_ARG area
// we require that they be moved to the gtCallLateArgs
// and replaced with a placeholder node
else if (prevArgTabEntry->GetRegNum() == REG_STK)
{
prevArgTabEntry->needPlace = true;
}
#if FEATURE_ARG_SPLIT
else if (prevArgTabEntry->IsSplit())
{
prevArgTabEntry->needPlace = true;
}
#endif // FEATURE_ARG_SPLIT
#endif
}
}
#if FEATURE_MULTIREG_ARGS
// For RyuJIT backend we will expand a Multireg arg into a GT_FIELD_LIST
// with multiple indirections, so here we consider spilling it into a tmp LclVar.
//
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_ARM
bool isMultiRegArg =
(curArgTabEntry->numRegs > 0) && (curArgTabEntry->numRegs + curArgTabEntry->GetStackSlotsNumber() > 1);
#else
bool isMultiRegArg = (curArgTabEntry->numRegs > 1);
#endif
if ((varTypeIsStruct(argx->TypeGet())) && (curArgTabEntry->needTmp == false))
{
if (isMultiRegArg && ((argx->gtFlags & GTF_PERSISTENT_SIDE_EFFECTS) != 0))
{
// Spill multireg struct arguments that have Assignments or Calls embedded in them
curArgTabEntry->needTmp = true;
needsTemps = true;
}
else
{
// We call gtPrepareCost to measure the cost of evaluating this tree
compiler->gtPrepareCost(argx);
if (isMultiRegArg && (argx->GetCostEx() > (6 * IND_COST_EX)))
{
// Spill multireg struct arguments that are expensive to evaluate twice
curArgTabEntry->needTmp = true;
needsTemps = true;
}
#if defined(FEATURE_SIMD) && defined(TARGET_ARM64)
else if (isMultiRegArg && varTypeIsSIMD(argx->TypeGet()))
{
// SIMD types do not need the optimization below due to their sizes
if (argx->OperIsSimdOrHWintrinsic() ||
(argx->OperIs(GT_OBJ) && argx->AsObj()->gtOp1->OperIs(GT_ADDR) &&
argx->AsObj()->gtOp1->AsOp()->gtOp1->OperIsSimdOrHWintrinsic()))
{
curArgTabEntry->needTmp = true;
needsTemps = true;
}
}
#endif
#ifndef TARGET_ARM
// TODO-Arm: This optimization is not implemented for ARM32
// so we skip this for ARM32 until it is ported to use RyuJIT backend
//
else if (argx->OperGet() == GT_OBJ)
{
GenTreeObj* argObj = argx->AsObj();
unsigned structSize = argObj->GetLayout()->GetSize();
switch (structSize)
{
case 3:
case 5:
case 6:
case 7:
// If we have a stack based LclVar we can perform a wider read of 4 or 8 bytes
//
if (argObj->AsObj()->gtOp1->IsLocalAddrExpr() == nullptr) // Is the source not a LclVar?
{
// If we don't have a LclVar we need to read exactly 3,5,6 or 7 bytes
// For now we use a a GT_CPBLK to copy the exact size into a GT_LCL_VAR temp.
//
curArgTabEntry->needTmp = true;
needsTemps = true;
}
break;
case 11:
case 13:
case 14:
case 15:
// Spill any GT_OBJ multireg structs that are difficult to extract
//
// When we have a GT_OBJ of a struct with the above sizes we would need
// to use 3 or 4 load instructions to load the exact size of this struct.
// Instead we spill the GT_OBJ into a new GT_LCL_VAR temp and this sequence
// will use a GT_CPBLK to copy the exact size into the GT_LCL_VAR temp.
// Then we can just load all 16 bytes of the GT_LCL_VAR temp when passing
// the argument.
//
curArgTabEntry->needTmp = true;
needsTemps = true;
break;
default:
break;
}
}
#endif // !TARGET_ARM
}
}
#endif // FEATURE_MULTIREG_ARGS
}
// We only care because we can't spill structs and qmarks involve a lot of spilling, but
// if we don't have qmarks, then it doesn't matter.
// So check for Qmark's globally once here, instead of inside the loop.
//
const bool hasStructRegArgWeCareAbout = (hasStructRegArg && compiler->compQmarkUsed);
#if FEATURE_FIXED_OUT_ARGS
// For Arm/x64 we only care because we can't reorder a register
// argument that uses GT_LCLHEAP. This is an optimization to
// save a check inside the below loop.
//
const bool hasStackArgsWeCareAbout = (hasStackArgs && compiler->compLocallocUsed);
#else
const bool hasStackArgsWeCareAbout = hasStackArgs;
#endif // FEATURE_FIXED_OUT_ARGS
// If we have any stack args we have to force the evaluation
// of any arguments passed in registers that might throw an exception
//
// Technically we only a required to handle the following two cases:
// a GT_IND with GTF_IND_RNGCHK (only on x86) or
// a GT_LCLHEAP node that allocates stuff on the stack
//
if (hasStackArgsWeCareAbout || hasStructRegArgWeCareAbout)
{
for (unsigned curInx = 0; curInx < argCount; curInx++)
{
fgArgTabEntry* curArgTabEntry = argTable[curInx];
assert(curArgTabEntry != nullptr);
GenTree* argx = curArgTabEntry->GetNode();
// Examine the register args that are currently not marked needTmp
//
if (!curArgTabEntry->needTmp && (curArgTabEntry->GetRegNum() != REG_STK))
{
if (hasStackArgsWeCareAbout)
{
#if !FEATURE_FIXED_OUT_ARGS
// On x86 we previously recorded a stack depth of zero when
// morphing the register arguments of any GT_IND with a GTF_IND_RNGCHK flag
// Thus we can not reorder the argument after any stack based argument
// (Note that GT_LCLHEAP sets the GTF_EXCEPT flag so we don't need to
// check for it explicitly.)
//
if (argx->gtFlags & GTF_EXCEPT)
{
curArgTabEntry->needTmp = true;
needsTemps = true;
continue;
}
#else
// For Arm/X64 we can't reorder a register argument that uses a GT_LCLHEAP
//
if (argx->gtFlags & GTF_EXCEPT)
{
assert(compiler->compLocallocUsed);
// Returns WALK_ABORT if a GT_LCLHEAP node is encountered in the argx tree
//
if (compiler->fgWalkTreePre(&argx, Compiler::fgChkLocAllocCB) == Compiler::WALK_ABORT)
{
curArgTabEntry->needTmp = true;
needsTemps = true;
continue;
}
}
#endif
}
if (hasStructRegArgWeCareAbout)
{
// Returns true if a GT_QMARK node is encountered in the argx tree
//
if (compiler->fgWalkTreePre(&argx, Compiler::fgChkQmarkCB) == Compiler::WALK_ABORT)
{
curArgTabEntry->needTmp = true;
needsTemps = true;
continue;
}
}
}
}
}
// When CFG is enabled and this is a delegate call or vtable call we must
// compute the call target before all late args. However this will
// effectively null-check 'this', which should happen only after all
// arguments are evaluated. Thus we must evaluate all args with side
// effects to a temp.
if (compiler->opts.IsCFGEnabled() && (callTree->IsVirtualVtable() || callTree->IsDelegateInvoke()))
{
// Always evaluate 'this' to temp.
argTable[0]->needTmp = true;
needsTemps = true;
for (unsigned curInx = 1; curInx < argCount; curInx++)
{
fgArgTabEntry* curArgTabEntry = argTable[curInx];
GenTree* arg = curArgTabEntry->GetNode();
if ((arg->gtFlags & GTF_ALL_EFFECT) != 0)
{
curArgTabEntry->needTmp = true;
needsTemps = true;
}
}
}
argsComplete = true;
}
void fgArgInfo::SortArgs()
{
assert(argsComplete == true);
#ifdef DEBUG
if (compiler->verbose)
{
printf("\nSorting the arguments:\n");
}
#endif
/* Shuffle the arguments around before we build the gtCallLateArgs list.
The idea is to move all "simple" arguments like constants and local vars
to the end of the table, and move the complex arguments towards the beginning
of the table. This will help prevent registers from being spilled by
allowing us to evaluate the more complex arguments before the simpler arguments.
The argTable ends up looking like:
+------------------------------------+ <--- argTable[argCount - 1]
| constants |
+------------------------------------+
| local var / local field |
+------------------------------------+
| remaining arguments sorted by cost |
+------------------------------------+
| temps (argTable[].needTmp = true) |
+------------------------------------+
| args with calls (GTF_CALL) |
+------------------------------------+ <--- argTable[0]
*/
/* Set the beginning and end for the new argument table */
unsigned curInx;
int regCount = 0;
unsigned begTab = 0;
unsigned endTab = argCount - 1;
unsigned argsRemaining = argCount;
// First take care of arguments that are constants.
// [We use a backward iterator pattern]
//
curInx = argCount;
do
{
curInx--;
fgArgTabEntry* curArgTabEntry = argTable[curInx];
if (curArgTabEntry->GetRegNum() != REG_STK)
{
regCount++;
}
assert(curArgTabEntry->lateUse == nullptr);
// Skip any already processed args
//
if (!curArgTabEntry->processed)
{
GenTree* argx = curArgTabEntry->GetNode();
// put constants at the end of the table
//
if (argx->gtOper == GT_CNS_INT)
{
noway_assert(curInx <= endTab);
curArgTabEntry->processed = true;
// place curArgTabEntry at the endTab position by performing a swap
//
if (curInx != endTab)
{
argTable[curInx] = argTable[endTab];
argTable[endTab] = curArgTabEntry;
}
endTab--;
argsRemaining--;
}
}
} while (curInx > 0);
if (argsRemaining > 0)
{
// Next take care of arguments that are calls.
// [We use a forward iterator pattern]
//
for (curInx = begTab; curInx <= endTab; curInx++)
{
fgArgTabEntry* curArgTabEntry = argTable[curInx];
// Skip any already processed args
//
if (!curArgTabEntry->processed)
{
GenTree* argx = curArgTabEntry->GetNode();
// put calls at the beginning of the table
//
if (argx->gtFlags & GTF_CALL)
{
curArgTabEntry->processed = true;
// place curArgTabEntry at the begTab position by performing a swap
//
if (curInx != begTab)
{
argTable[curInx] = argTable[begTab];
argTable[begTab] = curArgTabEntry;
}
begTab++;
argsRemaining--;
}
}
}
}
if (argsRemaining > 0)
{
// Next take care arguments that are temps.
// These temps come before the arguments that are
// ordinary local vars or local fields
// since this will give them a better chance to become
// enregistered into their actual argument register.
// [We use a forward iterator pattern]
//
for (curInx = begTab; curInx <= endTab; curInx++)
{
fgArgTabEntry* curArgTabEntry = argTable[curInx];
// Skip any already processed args
//
if (!curArgTabEntry->processed)
{
if (curArgTabEntry->needTmp)
{
curArgTabEntry->processed = true;
// place curArgTabEntry at the begTab position by performing a swap
//
if (curInx != begTab)
{
argTable[curInx] = argTable[begTab];
argTable[begTab] = curArgTabEntry;
}
begTab++;
argsRemaining--;
}
}
}
}
if (argsRemaining > 0)
{
// Next take care of local var and local field arguments.
// These are moved towards the end of the argument evaluation.
// [We use a backward iterator pattern]
//
curInx = endTab + 1;
do
{
curInx--;
fgArgTabEntry* curArgTabEntry = argTable[curInx];
// Skip any already processed args
//
if (!curArgTabEntry->processed)
{
GenTree* argx = curArgTabEntry->GetNode();
if ((argx->gtOper == GT_LCL_VAR) || (argx->gtOper == GT_LCL_FLD))
{
noway_assert(curInx <= endTab);
curArgTabEntry->processed = true;
// place curArgTabEntry at the endTab position by performing a swap
//
if (curInx != endTab)
{
argTable[curInx] = argTable[endTab];
argTable[endTab] = curArgTabEntry;
}
endTab--;
argsRemaining--;
}
}
} while (curInx > begTab);
}
// Finally, take care of all the remaining arguments.
// Note that we fill in one arg at a time using a while loop.
bool costsPrepared = false; // Only prepare tree costs once, the first time through this loop
while (argsRemaining > 0)
{
/* Find the most expensive arg remaining and evaluate it next */
fgArgTabEntry* expensiveArgTabEntry = nullptr;
unsigned expensiveArg = UINT_MAX;
unsigned expensiveArgCost = 0;
// [We use a forward iterator pattern]
//
for (curInx = begTab; curInx <= endTab; curInx++)
{
fgArgTabEntry* curArgTabEntry = argTable[curInx];
// Skip any already processed args
//
if (!curArgTabEntry->processed)
{
GenTree* argx = curArgTabEntry->GetNode();
// We should have already handled these kinds of args
assert(argx->gtOper != GT_LCL_VAR);
assert(argx->gtOper != GT_LCL_FLD);
assert(argx->gtOper != GT_CNS_INT);
// This arg should either have no persistent side effects or be the last one in our table
// assert(((argx->gtFlags & GTF_PERSISTENT_SIDE_EFFECTS) == 0) || (curInx == (argCount-1)));
if (argsRemaining == 1)
{
// This is the last arg to place
expensiveArg = curInx;
expensiveArgTabEntry = curArgTabEntry;
assert(begTab == endTab);
break;
}
else
{
if (!costsPrepared)
{
/* We call gtPrepareCost to measure the cost of evaluating this tree */
compiler->gtPrepareCost(argx);
}
if (argx->GetCostEx() > expensiveArgCost)
{
// Remember this arg as the most expensive one that we have yet seen
expensiveArgCost = argx->GetCostEx();
expensiveArg = curInx;
expensiveArgTabEntry = curArgTabEntry;
}
}
}
}
noway_assert(expensiveArg != UINT_MAX);
// put the most expensive arg towards the beginning of the table
expensiveArgTabEntry->processed = true;
// place expensiveArgTabEntry at the begTab position by performing a swap
//
if (expensiveArg != begTab)
{
argTable[expensiveArg] = argTable[begTab];
argTable[begTab] = expensiveArgTabEntry;
}
begTab++;
argsRemaining--;
costsPrepared = true; // If we have more expensive arguments, don't re-evaluate the tree cost on the next loop
}
// The table should now be completely filled and thus begTab should now be adjacent to endTab
// and regArgsRemaining should be zero
assert(begTab == (endTab + 1));
assert(argsRemaining == 0);
argsSorted = true;
}
#ifdef DEBUG
void fgArgInfo::Dump(Compiler* compiler) const
{
for (unsigned curInx = 0; curInx < ArgCount(); curInx++)
{
fgArgTabEntry* curArgEntry = ArgTable()[curInx];
curArgEntry->Dump();
}
}
#endif
//------------------------------------------------------------------------------
// fgMakeTmpArgNode : This function creates a tmp var only if needed.
// We need this to be done in order to enforce ordering
// of the evaluation of arguments.
//
// Arguments:
// curArgTabEntry
//
// Return Value:
// the newly created temp var tree.
GenTree* Compiler::fgMakeTmpArgNode(fgArgTabEntry* curArgTabEntry)
{
unsigned tmpVarNum = curArgTabEntry->tmpNum;
LclVarDsc* varDsc = lvaGetDesc(tmpVarNum);
assert(varDsc->lvIsTemp);
var_types type = varDsc->TypeGet();
// Create a copy of the temp to go into the late argument list
GenTree* arg = gtNewLclvNode(tmpVarNum, type);
GenTree* addrNode = nullptr;
if (varTypeIsStruct(type))
{
#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_ARM) || defined(TARGET_LOONGARCH64)
// Can this type be passed as a primitive type?
// If so, the following call will return the corresponding primitive type.
// Otherwise, it will return TYP_UNKNOWN and we will pass it as a struct type.
bool passedAsPrimitive = false;
if (curArgTabEntry->TryPassAsPrimitive())
{
CORINFO_CLASS_HANDLE clsHnd = varDsc->GetStructHnd();
var_types structBaseType =
getPrimitiveTypeForStruct(lvaLclExactSize(tmpVarNum), clsHnd, curArgTabEntry->IsVararg());
if (structBaseType != TYP_UNKNOWN)
{
passedAsPrimitive = true;
#if defined(UNIX_AMD64_ABI)
// TODO-Cleanup: This is inelegant, but eventually we'll track this in the fgArgTabEntry,
// and otherwise we'd have to either modify getPrimitiveTypeForStruct() to take
// a structDesc or call eeGetSystemVAmd64PassStructInRegisterDescriptor yet again.
//
if (genIsValidFloatReg(curArgTabEntry->GetRegNum()))
{
if (structBaseType == TYP_INT)
{
structBaseType = TYP_FLOAT;
}
else
{
assert(structBaseType == TYP_LONG);
structBaseType = TYP_DOUBLE;
}
}
#endif
type = structBaseType;
}
}
// If it is passed in registers, don't get the address of the var. Make it a
// field instead. It will be loaded in registers with putarg_reg tree in lower.
if (passedAsPrimitive)
{
arg->ChangeOper(GT_LCL_FLD);
arg->gtType = type;
lvaSetVarDoNotEnregister(tmpVarNum DEBUGARG(DoNotEnregisterReason::SwizzleArg));
}
else
{
var_types addrType = TYP_BYREF;
arg = gtNewOperNode(GT_ADDR, addrType, arg);
lvaSetVarAddrExposed(tmpVarNum DEBUGARG(AddressExposedReason::ESCAPE_ADDRESS));
addrNode = arg;
#if FEATURE_MULTIREG_ARGS
#if defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)
assert(varTypeIsStruct(type));
if (lvaIsMultiregStruct(varDsc, curArgTabEntry->IsVararg()))
{
// We will create a GT_OBJ for the argument below.
// This will be passed by value in two registers.
assert(addrNode != nullptr);
// Create an Obj of the temp to use it as a call argument.
arg = gtNewObjNode(lvaGetStruct(tmpVarNum), arg);
}
#else
// Always create an Obj of the temp to use it as a call argument.
arg = gtNewObjNode(lvaGetStruct(tmpVarNum), arg);
#endif // !(TARGET_ARM64 || TARGET_LOONGARCH64)
#endif // FEATURE_MULTIREG_ARGS
}
#else // not (TARGET_AMD64 or TARGET_ARM64 or TARGET_ARM or TARGET_LOONGARCH64)
// other targets, we pass the struct by value
assert(varTypeIsStruct(type));
addrNode = gtNewOperNode(GT_ADDR, TYP_BYREF, arg);
// Get a new Obj node temp to use it as a call argument.
// gtNewObjNode will set the GTF_EXCEPT flag if this is not a local stack object.
arg = gtNewObjNode(lvaGetStruct(tmpVarNum), addrNode);
#endif // not (TARGET_AMD64 or TARGET_ARM64 or TARGET_ARM or TARGET_LOONGARCH64)
} // (varTypeIsStruct(type))
if (addrNode != nullptr)
{
assert(addrNode->gtOper == GT_ADDR);
// the child of a GT_ADDR is required to have this flag set
addrNode->AsOp()->gtOp1->gtFlags |= GTF_DONT_CSE;
}
return arg;
}
//------------------------------------------------------------------------------
// EvalArgsToTemps : Create temp assignments and populate the LateArgs list.
void fgArgInfo::EvalArgsToTemps()
{
assert(argsSorted);
unsigned regArgInx = 0;
// Now go through the argument table and perform the necessary evaluation into temps
GenTreeCall::Use* tmpRegArgNext = nullptr;
for (unsigned curInx = 0; curInx < argCount; curInx++)
{
fgArgTabEntry* curArgTabEntry = argTable[curInx];
assert(curArgTabEntry->lateUse == nullptr);
GenTree* argx = curArgTabEntry->GetNode();
GenTree* setupArg = nullptr;
GenTree* defArg;
#if !FEATURE_FIXED_OUT_ARGS
// Only ever set for FEATURE_FIXED_OUT_ARGS
assert(curArgTabEntry->needPlace == false);
// On x86 and other archs that use push instructions to pass arguments:
// Only the register arguments need to be replaced with placeholder nodes.
// Stacked arguments are evaluated and pushed (or stored into the stack) in order.
//
if (curArgTabEntry->GetRegNum() == REG_STK)
continue;
#endif
if (curArgTabEntry->needTmp)
{
if (curArgTabEntry->isTmp)
{
// Create a copy of the temp to go into the late argument list
defArg = compiler->fgMakeTmpArgNode(curArgTabEntry);
// mark the original node as a late argument
argx->gtFlags |= GTF_LATE_ARG;
}
else
{
// Create a temp assignment for the argument
// Put the temp in the gtCallLateArgs list
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (compiler->verbose)
{
printf("Argument with 'side effect'...\n");
compiler->gtDispTree(argx);
}
#endif
#if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)
noway_assert(argx->gtType != TYP_STRUCT);
#endif
unsigned tmpVarNum = compiler->lvaGrabTemp(true DEBUGARG("argument with side effect"));
if (argx->gtOper == GT_MKREFANY)
{
// For GT_MKREFANY, typically the actual struct copying does
// not have any side-effects and can be delayed. So instead
// of using a temp for the whole struct, we can just use a temp
// for operand that that has a side-effect
GenTree* operand;
if ((argx->AsOp()->gtOp2->gtFlags & GTF_ALL_EFFECT) == 0)
{
operand = argx->AsOp()->gtOp1;
// In the early argument evaluation, place an assignment to the temp
// from the source operand of the mkrefany
setupArg = compiler->gtNewTempAssign(tmpVarNum, operand);
// Replace the operand for the mkrefany with the new temp.
argx->AsOp()->gtOp1 = compiler->gtNewLclvNode(tmpVarNum, operand->TypeGet());
}
else if ((argx->AsOp()->gtOp1->gtFlags & GTF_ALL_EFFECT) == 0)
{
operand = argx->AsOp()->gtOp2;
// In the early argument evaluation, place an assignment to the temp
// from the source operand of the mkrefany
setupArg = compiler->gtNewTempAssign(tmpVarNum, operand);
// Replace the operand for the mkrefany with the new temp.
argx->AsOp()->gtOp2 = compiler->gtNewLclvNode(tmpVarNum, operand->TypeGet());
}
}
if (setupArg != nullptr)
{
// Now keep the mkrefany for the late argument list
defArg = argx;
// Clear the side-effect flags because now both op1 and op2 have no side-effects
defArg->gtFlags &= ~GTF_ALL_EFFECT;
}
else
{
setupArg = compiler->gtNewTempAssign(tmpVarNum, argx);
LclVarDsc* varDsc = compiler->lvaGetDesc(tmpVarNum);
var_types lclVarType = genActualType(argx->gtType);
var_types scalarType = TYP_UNKNOWN;
if (setupArg->OperIsCopyBlkOp())
{
setupArg = compiler->fgMorphCopyBlock(setupArg);
#if defined(TARGET_ARMARCH) || defined(UNIX_AMD64_ABI) || defined(TARGET_LOONGARCH64)
#if defined(TARGET_LOONGARCH64)
// On LoongArch64, "getPrimitiveTypeForStruct" will incorrectly return "TYP_LONG"
// for "struct { float, float }", and retyping to a primitive here will cause the
// multi-reg morphing to not kick in (the struct in question needs to be passed in
// two FP registers).
// TODO-LoongArch64: fix "getPrimitiveTypeForStruct" or use the ABI information in
// the arg entry instead of calling it here.
if ((lclVarType == TYP_STRUCT) && (curArgTabEntry->numRegs == 1))
#else
if (lclVarType == TYP_STRUCT)
#endif
{
// This scalar LclVar widening step is only performed for ARM architectures.
//
CORINFO_CLASS_HANDLE clsHnd = compiler->lvaGetStruct(tmpVarNum);
unsigned structSize = varDsc->lvExactSize;
scalarType =
compiler->getPrimitiveTypeForStruct(structSize, clsHnd, curArgTabEntry->IsVararg());
}
#endif // TARGET_ARMARCH || defined (UNIX_AMD64_ABI) || defined(TARGET_LOONGARCH64)
}
// scalarType can be set to a wider type for ARM or unix amd64 architectures: (3 => 4) or (5,6,7 =>
// 8)
if ((scalarType != TYP_UNKNOWN) && (scalarType != lclVarType))
{
// Create a GT_LCL_FLD using the wider type to go to the late argument list
defArg = compiler->gtNewLclFldNode(tmpVarNum, scalarType, 0);
}
else
{
// Create a copy of the temp to go to the late argument list
defArg = compiler->gtNewLclvNode(tmpVarNum, lclVarType);
}
curArgTabEntry->isTmp = true;
curArgTabEntry->tmpNum = tmpVarNum;
#ifdef TARGET_ARM
// Previously we might have thought the local was promoted, and thus the 'COPYBLK'
// might have left holes in the used registers (see
// fgAddSkippedRegsInPromotedStructArg).
// Too bad we're not that smart for these intermediate temps...
if (isValidIntArgReg(curArgTabEntry->GetRegNum()) && (curArgTabEntry->numRegs > 1))
{
regNumber argReg = curArgTabEntry->GetRegNum();
regMaskTP allUsedRegs = genRegMask(curArgTabEntry->GetRegNum());
for (unsigned i = 1; i < curArgTabEntry->numRegs; i++)
{
argReg = genRegArgNext(argReg);
allUsedRegs |= genRegMask(argReg);
}
}
#endif // TARGET_ARM
}
/* mark the assignment as a late argument */
setupArg->gtFlags |= GTF_LATE_ARG;
#ifdef DEBUG
if (compiler->verbose)
{
printf("\n Evaluate to a temp:\n");
compiler->gtDispTree(setupArg);
}
#endif
}
}
else // curArgTabEntry->needTmp == false
{
// On x86 -
// Only register args are replaced with placeholder nodes
// and the stack based arguments are evaluated and pushed in order.
//
// On Arm/x64 - When needTmp is false and needPlace is false,
// the non-register arguments are evaluated and stored in order.
// When needPlace is true we have a nested call that comes after
// this argument so we have to replace it in the gtCallArgs list
// (the initial argument evaluation list) with a placeholder.
//
if ((curArgTabEntry->GetRegNum() == REG_STK) && (curArgTabEntry->needPlace == false))
{
continue;
}
/* No temp needed - move the whole node to the gtCallLateArgs list */
/* The argument is deferred and put in the late argument list */
defArg = argx;
// Create a placeholder node to put in its place in gtCallLateArgs.
// For a struct type we also need to record the class handle of the arg.
CORINFO_CLASS_HANDLE clsHnd = NO_CLASS_HANDLE;
#if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)
// All structs are either passed (and retyped) as integral types, OR they
// are passed by reference.
noway_assert(argx->gtType != TYP_STRUCT);
#else // !defined(TARGET_AMD64) || defined(UNIX_AMD64_ABI)
if (defArg->TypeGet() == TYP_STRUCT)
{
clsHnd = compiler->gtGetStructHandleIfPresent(defArg);
noway_assert(clsHnd != NO_CLASS_HANDLE);
}
#endif // !(defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI))
setupArg = compiler->gtNewArgPlaceHolderNode(defArg->gtType, clsHnd);
/* mark the placeholder node as a late argument */
setupArg->gtFlags |= GTF_LATE_ARG;
#ifdef DEBUG
if (compiler->verbose)
{
if (curArgTabEntry->GetRegNum() == REG_STK)
{
printf("Deferred stack argument :\n");
}
else
{
printf("Deferred argument ('%s'):\n", getRegName(curArgTabEntry->GetRegNum()));
}
compiler->gtDispTree(argx);
printf("Replaced with placeholder node:\n");
compiler->gtDispTree(setupArg);
}
#endif
}
if (setupArg != nullptr)
{
noway_assert(curArgTabEntry->use->GetNode() == argx);
curArgTabEntry->use->SetNode(setupArg);
}
/* deferred arg goes into the late argument list */
if (tmpRegArgNext == nullptr)
{
tmpRegArgNext = compiler->gtNewCallArgs(defArg);
callTree->AsCall()->gtCallLateArgs = tmpRegArgNext;
}
else
{
noway_assert(tmpRegArgNext->GetNode() != nullptr);
tmpRegArgNext->SetNext(compiler->gtNewCallArgs(defArg));
tmpRegArgNext = tmpRegArgNext->GetNext();
}
curArgTabEntry->lateUse = tmpRegArgNext;
curArgTabEntry->SetLateArgInx(regArgInx++);
if ((setupArg != nullptr) && setupArg->OperIs(GT_ARGPLACE) && (callTree->gtRetBufArg == curArgTabEntry->use))
{
callTree->SetLclRetBufArg(tmpRegArgNext);
}
}
#ifdef DEBUG
if (compiler->verbose)
{
printf("\nShuffled argument table: ");
for (unsigned curInx = 0; curInx < argCount; curInx++)
{
fgArgTabEntry* curArgTabEntry = argTable[curInx];
if (curArgTabEntry->GetRegNum() != REG_STK)
{
printf("%s ", getRegName(curArgTabEntry->GetRegNum()));
}
}
printf("\n");
}
#endif
}
//------------------------------------------------------------------------------
// fgMakeMultiUse : If the node is an unaliased local or constant clone it,
// otherwise insert a comma form temp
//
// Arguments:
// ppTree - a pointer to the child node we will be replacing with the comma expression that
// evaluates ppTree to a temp and returns the result
//
// Return Value:
// A fresh GT_LCL_VAR node referencing the temp which has not been used
//
// Notes:
// Caller must ensure that if the node is an unaliased local, the second use this
// creates will be evaluated before the local can be reassigned.
//
// Can be safely called in morph preorder, before GTF_GLOB_REF is reliable.
//
GenTree* Compiler::fgMakeMultiUse(GenTree** pOp)
{
GenTree* const tree = *pOp;
if (tree->IsInvariant())
{
return gtClone(tree);
}
else if (tree->IsLocal())
{
// Can't rely on GTF_GLOB_REF here.
//
if (!lvaGetDesc(tree->AsLclVarCommon())->IsAddressExposed())
{
return gtClone(tree);
}
}
return fgInsertCommaFormTemp(pOp);
}
//------------------------------------------------------------------------------
// fgInsertCommaFormTemp: Create a new temporary variable to hold the result of *ppTree,
// and replace *ppTree with comma(asg(newLcl, *ppTree), newLcl)
//
// Arguments:
// ppTree - a pointer to the child node we will be replacing with the comma expression that
// evaluates ppTree to a temp and returns the result
//
// structType - value type handle if the temp created is of TYP_STRUCT.
//
// Return Value:
// A fresh GT_LCL_VAR node referencing the temp which has not been used
//
GenTree* Compiler::fgInsertCommaFormTemp(GenTree** ppTree, CORINFO_CLASS_HANDLE structType /*= nullptr*/)
{
GenTree* subTree = *ppTree;
unsigned lclNum = lvaGrabTemp(true DEBUGARG("fgInsertCommaFormTemp is creating a new local variable"));
if (varTypeIsStruct(subTree))
{
assert(structType != nullptr);
lvaSetStruct(lclNum, structType, false);
}
// If subTree->TypeGet() == TYP_STRUCT, gtNewTempAssign() will create a GT_COPYBLK tree.
// The type of GT_COPYBLK is TYP_VOID. Therefore, we should use subTree->TypeGet() for
// setting type of lcl vars created.
GenTree* asg = gtNewTempAssign(lclNum, subTree);
GenTree* load = new (this, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, subTree->TypeGet(), lclNum);
GenTree* comma = gtNewOperNode(GT_COMMA, subTree->TypeGet(), asg, load);
*ppTree = comma;
return new (this, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, subTree->TypeGet(), lclNum);
}
//------------------------------------------------------------------------
// fgInitArgInfo: Construct the fgArgInfo for the call with the fgArgEntry for each arg
//
// Arguments:
// callNode - the call for which we are generating the fgArgInfo
//
// Return Value:
// None
//
// Notes:
// This method is idempotent in that it checks whether the fgArgInfo has already been
// constructed, and just returns.
// This method only computes the arg table and arg entries for the call (the fgArgInfo),
// and makes no modification of the args themselves.
//
// The IR for the call args can change for calls with non-standard arguments: some non-standard
// arguments add new call argument IR nodes.
//
void Compiler::fgInitArgInfo(GenTreeCall* call)
{
GenTreeCall::Use* args;
GenTree* argx;
unsigned argIndex = 0;
unsigned intArgRegNum = 0;
unsigned fltArgRegNum = 0;
DEBUG_ARG_SLOTS_ONLY(unsigned argSlots = 0;)
bool callHasRetBuffArg = call->HasRetBufArg();
bool callIsVararg = call->IsVarargs();
#ifdef TARGET_ARM
regMaskTP argSkippedRegMask = RBM_NONE;
regMaskTP fltArgSkippedRegMask = RBM_NONE;
#endif // TARGET_ARM
#if defined(TARGET_X86)
unsigned maxRegArgs = MAX_REG_ARG; // X86: non-const, must be calculated
#else
const unsigned maxRegArgs = MAX_REG_ARG; // other arch: fixed constant number
#endif
if (call->fgArgInfo != nullptr)
{
// We've already initialized and set the fgArgInfo.
return;
}
JITDUMP("Initializing arg info for %d.%s:\n", call->gtTreeID, GenTree::OpName(call->gtOper));
// At this point, we should never have gtCallLateArgs, as this needs to be done before those are determined.
assert(call->gtCallLateArgs == nullptr);
if (TargetOS::IsUnix && callIsVararg)
{
// Currently native varargs is not implemented on non windows targets.
//
// Note that some targets like Arm64 Unix should not need much work as
// the ABI is the same. While other targets may only need small changes
// such as amd64 Unix, which just expects RAX to pass numFPArguments.
NYI("Morphing Vararg call not yet implemented on non Windows targets.");
}
// Data structure for keeping track of non-standard args. Non-standard args are those that are not passed
// following the normal calling convention or in the normal argument registers. We either mark existing
// arguments as non-standard (such as the x8 return buffer register on ARM64), or we manually insert the
// non-standard arguments into the argument list, below.
class NonStandardArgs
{
struct NonStandardArg
{
GenTree* node; // The tree node representing this non-standard argument.
// Note that this must be updated if the tree node changes due to morphing!
regNumber reg; // The register to be assigned to this non-standard argument.
NonStandardArgKind kind; // The kind of the non-standard arg
};
ArrayStack<NonStandardArg> args;
public:
NonStandardArgs(CompAllocator alloc) : args(alloc, 3) // We will have at most 3 non-standard arguments
{
}
//-----------------------------------------------------------------------------
// Add: add a non-standard argument to the table of non-standard arguments
//
// Arguments:
// node - a GenTree node that has a non-standard argument.
// reg - the register to assign to this node.
//
// Return Value:
// None.
//
void Add(GenTree* node, regNumber reg, NonStandardArgKind kind)
{
NonStandardArg nsa = {node, reg, kind};
args.Push(nsa);
}
//-----------------------------------------------------------------------------
// Find: Look for a GenTree* in the set of non-standard args.
//
// Arguments:
// node - a GenTree node to look for
//
// Return Value:
// The index of the non-standard argument (a non-negative, unique, stable number).
// If the node is not a non-standard argument, return -1.
//
int Find(GenTree* node)
{
for (int i = 0; i < args.Height(); i++)
{
if (node == args.Top(i).node)
{
return i;
}
}
return -1;
}
//-----------------------------------------------------------------------------
// Find: Look for a GenTree node in the non-standard arguments set. If found,
// set the register to use for the node.
//
// Arguments:
// node - a GenTree node to look for
// pReg - an OUT argument. *pReg is set to the non-standard register to use if
// 'node' is found in the non-standard argument set.
// pKind - an OUT argument. *pKind is set to the kind of the non-standard arg.
//
// Return Value:
// 'true' if 'node' is a non-standard argument. In this case, *pReg and *pKing are set.
// 'false' otherwise (in this case, *pReg and *pKind are unmodified).
//
bool Find(GenTree* node, regNumber* pReg, NonStandardArgKind* pKind)
{
for (int i = 0; i < args.Height(); i++)
{
NonStandardArg& nsa = args.TopRef(i);
if (node == nsa.node)
{
*pReg = nsa.reg;
*pKind = nsa.kind;
return true;
}
}
return false;
}
//-----------------------------------------------------------------------------
// Replace: Replace the non-standard argument node at a given index. This is done when
// the original node was replaced via morphing, but we need to continue to assign a
// particular non-standard arg to it.
//
// Arguments:
// index - the index of the non-standard arg. It must exist.
// node - the new GenTree node.
//
// Return Value:
// None.
//
void Replace(int index, GenTree* node)
{
args.TopRef(index).node = node;
}
} nonStandardArgs(getAllocator(CMK_ArrayStack));
// Count of args. On first morph, this is counted before we've filled in the arg table.
// On remorph, we grab it from the arg table.
unsigned numArgs = 0;
// First we need to count the args
if (call->gtCallThisArg != nullptr)
{
numArgs++;
}
for (GenTreeCall::Use& use : call->Args())
{
numArgs++;
}
// Insert or mark non-standard args. These are either outside the normal calling convention, or
// arguments registers that don't follow the normal progression of argument registers in the calling
// convention (such as for the ARM64 fixed return buffer argument x8).
//
// *********** NOTE *************
// The logic here must remain in sync with GetNonStandardAddedArgCount(), which is used to map arguments
// in the implementation of fast tail call.
// *********** END NOTE *********
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_X86) || defined(TARGET_ARM)
// The x86 and arm32 CORINFO_HELP_INIT_PINVOKE_FRAME helpers has a custom calling convention.
// Set the argument registers correctly here.
if (call->IsHelperCall(this, CORINFO_HELP_INIT_PINVOKE_FRAME))
{
GenTreeCall::Use* args = call->gtCallArgs;
GenTree* arg1 = args->GetNode();
assert(arg1 != nullptr);
nonStandardArgs.Add(arg1, REG_PINVOKE_FRAME, NonStandardArgKind::PInvokeFrame);
}
#endif // defined(TARGET_X86) || defined(TARGET_ARM)
#if defined(TARGET_ARM)
// A non-standard calling convention using wrapper delegate invoke is used on ARM, only, for wrapper
// delegates. It is used for VSD delegate calls where the VSD custom calling convention ABI requires passing
// R4, a callee-saved register, with a special value. Since R4 is a callee-saved register, its value needs
// to be preserved. Thus, the VM uses a wrapper delegate IL stub, which preserves R4 and also sets up R4
// correctly for the VSD call. The VM is simply reusing an existing mechanism (wrapper delegate IL stub)
// to achieve its goal for delegate VSD call. See COMDelegate::NeedsWrapperDelegate() in the VM for details.
else if (call->gtCallMoreFlags & GTF_CALL_M_WRAPPER_DELEGATE_INV)
{
GenTree* arg = call->gtCallThisArg->GetNode();
if (arg->OperIsLocal())
{
arg = gtClone(arg, true);
}
else
{
GenTree* tmp = fgInsertCommaFormTemp(&arg);
call->gtCallThisArg->SetNode(arg);
call->gtFlags |= GTF_ASG;
arg = tmp;
}
noway_assert(arg != nullptr);
GenTree* newArg = new (this, GT_ADDR)
GenTreeAddrMode(TYP_BYREF, arg, nullptr, 0, eeGetEEInfo()->offsetOfWrapperDelegateIndirectCell);
// Append newArg as the last arg
GenTreeCall::Use** insertionPoint = &call->gtCallArgs;
for (; *insertionPoint != nullptr; insertionPoint = &((*insertionPoint)->NextRef()))
{
}
*insertionPoint = gtNewCallArgs(newArg);
numArgs++;
nonStandardArgs.Add(newArg, virtualStubParamInfo->GetReg(), NonStandardArgKind::WrapperDelegateCell);
}
#endif // defined(TARGET_ARM)
#if defined(TARGET_X86)
// The x86 shift helpers have custom calling conventions and expect the lo part of the long to be in EAX and the
// hi part to be in EDX. This sets the argument registers up correctly.
else if (call->IsHelperCall(this, CORINFO_HELP_LLSH) || call->IsHelperCall(this, CORINFO_HELP_LRSH) ||
call->IsHelperCall(this, CORINFO_HELP_LRSZ))
{
GenTreeCall::Use* args = call->gtCallArgs;
GenTree* arg1 = args->GetNode();
assert(arg1 != nullptr);
nonStandardArgs.Add(arg1, REG_LNGARG_LO, NonStandardArgKind::ShiftLow);
args = args->GetNext();
GenTree* arg2 = args->GetNode();
assert(arg2 != nullptr);
nonStandardArgs.Add(arg2, REG_LNGARG_HI, NonStandardArgKind::ShiftHigh);
}
#else // !TARGET_X86
// TODO-X86-CQ: Currently RyuJIT/x86 passes args on the stack, so this is not needed.
// If/when we change that, the following code needs to be changed to correctly support the (TBD) managed calling
// convention for x86/SSE.
// If we have a Fixed Return Buffer argument register then we setup a non-standard argument for it.
//
// We don't use the fixed return buffer argument if we have the special unmanaged instance call convention.
// That convention doesn't use the fixed return buffer register.
//
CLANG_FORMAT_COMMENT_ANCHOR;
if (call->HasFixedRetBufArg())
{
args = call->gtCallArgs;
assert(args != nullptr);
argx = call->gtCallArgs->GetNode();
// We don't increment numArgs here, since we already counted this argument above.
nonStandardArgs.Add(argx, theFixedRetBuffReg(), NonStandardArgKind::FixedRetBuffer);
}
// We are allowed to have a Fixed Return Buffer argument combined
// with any of the remaining non-standard arguments
//
CLANG_FORMAT_COMMENT_ANCHOR;
if (call->IsVirtualStub())
{
if (!call->IsTailCallViaJitHelper())
{
GenTree* stubAddrArg = fgGetStubAddrArg(call);
// And push the stub address onto the list of arguments
call->gtCallArgs = gtPrependNewCallArg(stubAddrArg, call->gtCallArgs);
numArgs++;
nonStandardArgs.Add(stubAddrArg, stubAddrArg->GetRegNum(), NonStandardArgKind::VirtualStubCell);
}
else
{
// If it is a VSD call getting dispatched via tail call helper,
// fgMorphTailCallViaJitHelper() would materialize stub addr as an additional
// parameter added to the original arg list and hence no need to
// add as a non-standard arg.
}
}
else
#endif // !TARGET_X86
if (call->gtCallType == CT_INDIRECT && (call->gtCallCookie != nullptr))
{
assert(!call->IsUnmanaged());
GenTree* arg = call->gtCallCookie;
noway_assert(arg != nullptr);
call->gtCallCookie = nullptr;
// All architectures pass the cookie in a register.
call->gtCallArgs = gtPrependNewCallArg(arg, call->gtCallArgs);
nonStandardArgs.Add(arg, REG_PINVOKE_COOKIE_PARAM, NonStandardArgKind::PInvokeCookie);
numArgs++;
// put destination into R10/EAX
arg = gtClone(call->gtCallAddr, true);
call->gtCallArgs = gtPrependNewCallArg(arg, call->gtCallArgs);
numArgs++;
nonStandardArgs.Add(arg, REG_PINVOKE_TARGET_PARAM, NonStandardArgKind::PInvokeTarget);
// finally change this call to a helper call
call->gtCallType = CT_HELPER;
call->gtCallMethHnd = eeFindHelper(CORINFO_HELP_PINVOKE_CALLI);
}
#if defined(FEATURE_READYTORUN)
// For arm/arm64, we dispatch code same as VSD using virtualStubParamInfo->GetReg()
// for indirection cell address, which ZapIndirectHelperThunk expects.
// For x64/x86 we use return address to get the indirection cell by disassembling the call site.
// That is not possible for fast tailcalls, so we only need this logic for fast tailcalls on xarch.
// Note that we call this before we know if something will be a fast tailcall or not.
// That's ok; after making something a tailcall, we will invalidate this information
// and reconstruct it if necessary. The tailcalling decision does not change since
// this is a non-standard arg in a register.
bool needsIndirectionCell = call->IsR2RRelativeIndir() && !call->IsDelegateInvoke();
#if defined(TARGET_XARCH)
needsIndirectionCell &= call->IsFastTailCall();
#endif
if (needsIndirectionCell)
{
assert(call->gtEntryPoint.addr != nullptr);
size_t addrValue = (size_t)call->gtEntryPoint.addr;
GenTree* indirectCellAddress = gtNewIconHandleNode(addrValue, GTF_ICON_FTN_ADDR);
#ifdef DEBUG
indirectCellAddress->AsIntCon()->gtTargetHandle = (size_t)call->gtCallMethHnd;
#endif
indirectCellAddress->SetRegNum(REG_R2R_INDIRECT_PARAM);
#ifdef TARGET_ARM
// Issue #xxxx : Don't attempt to CSE this constant on ARM32
//
// This constant has specific register requirements, and LSRA doesn't currently correctly
// handle them when the value is in a CSE'd local.
indirectCellAddress->SetDoNotCSE();
#endif // TARGET_ARM
// Push the stub address onto the list of arguments.
call->gtCallArgs = gtPrependNewCallArg(indirectCellAddress, call->gtCallArgs);
numArgs++;
nonStandardArgs.Add(indirectCellAddress, indirectCellAddress->GetRegNum(),
NonStandardArgKind::R2RIndirectionCell);
}
#endif
if ((REG_VALIDATE_INDIRECT_CALL_ADDR != REG_ARG_0) && call->IsHelperCall(this, CORINFO_HELP_VALIDATE_INDIRECT_CALL))
{
assert(call->gtCallArgs != nullptr);
GenTreeCall::Use* args = call->gtCallArgs;
GenTree* tar = args->GetNode();
nonStandardArgs.Add(tar, REG_VALIDATE_INDIRECT_CALL_ADDR, NonStandardArgKind::ValidateIndirectCallTarget);
}
// Allocate the fgArgInfo for the call node;
//
call->fgArgInfo = new (this, CMK_Unknown) fgArgInfo(this, call, numArgs);
// Add the 'this' argument value, if present.
if (call->gtCallThisArg != nullptr)
{
argx = call->gtCallThisArg->GetNode();
assert(argIndex == 0);
assert(call->gtCallType == CT_USER_FUNC || call->gtCallType == CT_INDIRECT);
assert(varTypeIsGC(argx) || (argx->gtType == TYP_I_IMPL));
const regNumber regNum = genMapIntRegArgNumToRegNum(intArgRegNum);
const unsigned numRegs = 1;
const unsigned byteSize = TARGET_POINTER_SIZE;
const unsigned byteAlignment = TARGET_POINTER_SIZE;
const bool isStruct = false;
const bool isFloatHfa = false;
// This is a register argument - put it in the table.
call->fgArgInfo->AddRegArg(argIndex, argx, call->gtCallThisArg, regNum, numRegs, byteSize, byteAlignment,
isStruct, isFloatHfa,
callIsVararg UNIX_AMD64_ABI_ONLY_ARG(REG_STK) UNIX_AMD64_ABI_ONLY_ARG(0)
UNIX_AMD64_ABI_ONLY_ARG(0) UNIX_AMD64_ABI_ONLY_ARG(nullptr));
intArgRegNum++;
#ifdef WINDOWS_AMD64_ABI
// Whenever we pass an integer register argument
// we skip the corresponding floating point register argument
fltArgRegNum++;
#endif // WINDOWS_AMD64_ABI
argIndex++;
DEBUG_ARG_SLOTS_ONLY(argSlots++;)
}
#ifdef TARGET_X86
// Compute the maximum number of arguments that can be passed in registers.
// For X86 we handle the varargs and unmanaged calling conventions
#ifndef UNIX_X86_ABI
if (call->gtFlags & GTF_CALL_POP_ARGS)
{
noway_assert(intArgRegNum < MAX_REG_ARG);
// No more register arguments for varargs (CALL_POP_ARGS)
maxRegArgs = intArgRegNum;
// Add in the ret buff arg
if (callHasRetBuffArg)
maxRegArgs++;
}
#endif // UNIX_X86_ABI
if (call->IsUnmanaged())
{
noway_assert(intArgRegNum == 0);
if (call->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
{
noway_assert(call->gtCallArgs->GetNode()->TypeGet() == TYP_I_IMPL ||
call->gtCallArgs->GetNode()->TypeGet() == TYP_BYREF ||
call->gtCallArgs->GetNode()->gtOper ==
GT_NOP); // the arg was already morphed to a register (fgMorph called twice)
maxRegArgs = 1;
}
else
{
maxRegArgs = 0;
}
#ifdef UNIX_X86_ABI
// Add in the ret buff arg
if (callHasRetBuffArg &&
call->unmgdCallConv != CorInfoCallConvExtension::C && // C and Stdcall calling conventions do not
call->unmgdCallConv != CorInfoCallConvExtension::Stdcall) // use registers to pass arguments.
maxRegArgs++;
#endif
}
#endif // TARGET_X86
/* Morph the user arguments */
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_ARM)
// The ARM ABI has a concept of back-filling of floating-point argument registers, according
// to the "Procedure Call Standard for the ARM Architecture" document, especially
// section 6.1.2.3 "Parameter passing". Back-filling is where floating-point argument N+1 can
// appear in a lower-numbered register than floating point argument N. That is, argument
// register allocation is not strictly increasing. To support this, we need to keep track of unused
// floating-point argument registers that we can back-fill. We only support 4-byte float and
// 8-byte double types, and one to four element HFAs composed of these types. With this, we will
// only back-fill single registers, since there is no way with these types to create
// an alignment hole greater than one register. However, there can be up to 3 back-fill slots
// available (with 16 FP argument registers). Consider this code:
//
// struct HFA { float x, y, z; }; // a three element HFA
// void bar(float a1, // passed in f0
// double a2, // passed in f2/f3; skip f1 for alignment
// HFA a3, // passed in f4/f5/f6
// double a4, // passed in f8/f9; skip f7 for alignment. NOTE: it doesn't fit in the f1 back-fill slot
// HFA a5, // passed in f10/f11/f12
// double a6, // passed in f14/f15; skip f13 for alignment. NOTE: it doesn't fit in the f1 or f7 back-fill
// // slots
// float a7, // passed in f1 (back-filled)
// float a8, // passed in f7 (back-filled)
// float a9, // passed in f13 (back-filled)
// float a10) // passed on the stack in [OutArg+0]
//
// Note that if we ever support FP types with larger alignment requirements, then there could
// be more than single register back-fills.
//
// Once we assign a floating-pointer register to the stack, they all must be on the stack.
// See "Procedure Call Standard for the ARM Architecture", section 6.1.2.3, "The back-filling
// continues only so long as no VFP CPRC has been allocated to a slot on the stack."
// We set anyFloatStackArgs to true when a floating-point argument has been assigned to the stack
// and prevent any additional floating-point arguments from going in registers.
bool anyFloatStackArgs = false;
#endif // TARGET_ARM
#ifdef UNIX_AMD64_ABI
SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc;
#endif // UNIX_AMD64_ABI
#if defined(DEBUG)
// Check that we have valid information about call's argument types.
// For example:
// load byte; call(int) -> CALL(PUTARG_TYPE byte(IND byte));
// load int; call(byte) -> CALL(PUTARG_TYPE int (IND int));
// etc.
if (call->callSig != nullptr)
{
CORINFO_SIG_INFO* sig = call->callSig;
const unsigned sigArgsCount = sig->numArgs;
GenTreeCall::Use* nodeArgs = call->gtCallArgs;
// It could include many arguments not included in `sig->numArgs`, for example, `this`, runtime lookup, cookie
// etc.
unsigned nodeArgsCount = 0;
call->VisitOperands([&nodeArgsCount](GenTree* operand) -> GenTree::VisitResult {
nodeArgsCount++;
return GenTree::VisitResult::Continue;
});
if (call->gtCallThisArg != nullptr)
{
// Handle the most common argument not in the `sig->numArgs`.
// so the following check works on more methods.
nodeArgsCount--;
}
assert(nodeArgsCount >= sigArgsCount);
if ((nodeArgsCount == sigArgsCount) &&
((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (nodeArgsCount == 1)))
{
CORINFO_ARG_LIST_HANDLE sigArg = sig->args;
for (unsigned i = 0; i < sig->numArgs; ++i)
{
CORINFO_CLASS_HANDLE argClass;
const CorInfoType corType = strip(info.compCompHnd->getArgType(sig, sigArg, &argClass));
const var_types sigType = JITtype2varType(corType);
assert(nodeArgs != nullptr);
const GenTree* nodeArg = nodeArgs->GetNode();
assert(nodeArg != nullptr);
const var_types nodeType = nodeArg->TypeGet();
assert((nodeType == sigType) || varTypeIsStruct(sigType) ||
genTypeSize(nodeType) == genTypeSize(sigType));
sigArg = info.compCompHnd->getArgNext(sigArg);
nodeArgs = nodeArgs->GetNext();
}
assert(nodeArgs == nullptr);
}
}
#endif // DEBUG
for (args = call->gtCallArgs; args != nullptr; args = args->GetNext(), argIndex++)
{
argx = args->GetNode()->gtSkipPutArgType();
// Change the node to TYP_I_IMPL so we don't report GC info
// NOTE: We deferred this from the importer because of the inliner.
if (argx->IsLocalAddrExpr() != nullptr)
{
argx->gtType = TYP_I_IMPL;
}
// We should never have any ArgPlaceHolder nodes at this point.
assert(!argx->IsArgPlaceHolderNode());
// Setup any HFA information about 'argx'
bool isHfaArg = false;
var_types hfaType = TYP_UNDEF;
unsigned hfaSlots = 0;
bool passUsingFloatRegs;
unsigned argAlignBytes = TARGET_POINTER_SIZE;
unsigned size = 0;
unsigned byteSize = 0;
if (GlobalJitOptions::compFeatureHfa)
{
hfaType = GetHfaType(argx);
isHfaArg = varTypeIsValidHfaType(hfaType);
#if defined(TARGET_ARM64)
if (TargetOS::IsWindows)
{
// Make sure for vararg methods isHfaArg is not true.
isHfaArg = callIsVararg ? false : isHfaArg;
}
#endif // defined(TARGET_ARM64)
if (isHfaArg)
{
isHfaArg = true;
hfaSlots = GetHfaCount(argx);
// If we have a HFA struct it's possible we transition from a method that originally
// only had integer types to now start having FP types. We have to communicate this
// through this flag since LSRA later on will use this flag to determine whether
// or not to track the FP register set.
//
compFloatingPointUsed = true;
}
}
const bool isFloatHfa = (hfaType == TYP_FLOAT);
#ifdef TARGET_ARM
passUsingFloatRegs = !callIsVararg && (isHfaArg || varTypeUsesFloatReg(argx)) && !opts.compUseSoftFP;
bool passUsingIntRegs = passUsingFloatRegs ? false : (intArgRegNum < MAX_REG_ARG);
// We don't use the "size" return value from InferOpSizeAlign().
codeGen->InferOpSizeAlign(argx, &argAlignBytes);
argAlignBytes = roundUp(argAlignBytes, TARGET_POINTER_SIZE);
if (argAlignBytes == 2 * TARGET_POINTER_SIZE)
{
if (passUsingFloatRegs)
{
if (fltArgRegNum % 2 == 1)
{
fltArgSkippedRegMask |= genMapArgNumToRegMask(fltArgRegNum, TYP_FLOAT);
fltArgRegNum++;
}
}
else if (passUsingIntRegs)
{
if (intArgRegNum % 2 == 1)
{
argSkippedRegMask |= genMapArgNumToRegMask(intArgRegNum, TYP_I_IMPL);
intArgRegNum++;
}
}
#if defined(DEBUG)
if (argSlots % 2 == 1)
{
argSlots++;
}
#endif
}
#elif defined(TARGET_ARM64)
assert(!callIsVararg || !isHfaArg);
passUsingFloatRegs = !callIsVararg && (isHfaArg || varTypeUsesFloatReg(argx));
#elif defined(TARGET_AMD64)
passUsingFloatRegs = varTypeIsFloating(argx);
#elif defined(TARGET_X86)
passUsingFloatRegs = false;
#elif defined(TARGET_LOONGARCH64)
assert(!callIsVararg && !isHfaArg);
passUsingFloatRegs = varTypeUsesFloatReg(argx);
DWORD floatFieldFlags = STRUCT_NO_FLOAT_FIELD;
#else
#error Unsupported or unset target architecture
#endif // TARGET*
bool isBackFilled = false;
unsigned nextFltArgRegNum = fltArgRegNum; // This is the next floating-point argument register number to use
var_types structBaseType = TYP_STRUCT;
unsigned structSize = 0;
bool passStructByRef = false;
bool isStructArg;
GenTree* actualArg = argx->gtEffectiveVal(true /* Commas only */);
//
// Figure out the size of the argument. This is either in number of registers, or number of
// TARGET_POINTER_SIZE stack slots, or the sum of these if the argument is split between the registers and
// the stack.
//
isStructArg = varTypeIsStruct(argx);
CORINFO_CLASS_HANDLE objClass = NO_CLASS_HANDLE;
if (isStructArg)
{
objClass = gtGetStructHandle(argx);
if (argx->TypeGet() == TYP_STRUCT)
{
// For TYP_STRUCT arguments we must have an OBJ, LCL_VAR or MKREFANY
switch (actualArg->OperGet())
{
case GT_OBJ:
structSize = actualArg->AsObj()->GetLayout()->GetSize();
assert(structSize == info.compCompHnd->getClassSize(objClass));
break;
case GT_LCL_VAR:
structSize = lvaGetDesc(actualArg->AsLclVarCommon())->lvExactSize;
break;
case GT_MKREFANY:
structSize = info.compCompHnd->getClassSize(objClass);
break;
default:
BADCODE("illegal argument tree in fgInitArgInfo");
break;
}
}
else
{
structSize = genTypeSize(argx);
assert(structSize == info.compCompHnd->getClassSize(objClass));
}
}
#if defined(TARGET_AMD64)
#ifdef UNIX_AMD64_ABI
if (!isStructArg)
{
size = 1; // On AMD64, all primitives fit in a single (64-bit) 'slot'
byteSize = genTypeSize(argx);
}
else
{
size = (unsigned)(roundUp(structSize, TARGET_POINTER_SIZE)) / TARGET_POINTER_SIZE;
byteSize = structSize;
eeGetSystemVAmd64PassStructInRegisterDescriptor(objClass, &structDesc);
}
#else // !UNIX_AMD64_ABI
size = 1; // On AMD64 Windows, all args fit in a single (64-bit) 'slot'
if (!isStructArg)
{
byteSize = genTypeSize(argx);
}
#endif // UNIX_AMD64_ABI
#elif defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)
if (isStructArg)
{
if (isHfaArg)
{
// HFA structs are passed by value in multiple registers.
// The "size" in registers may differ the size in pointer-sized units.
CORINFO_CLASS_HANDLE structHnd = gtGetStructHandle(argx);
size = GetHfaCount(structHnd);
byteSize = info.compCompHnd->getClassSize(structHnd);
}
else
{
// Structs are either passed in 1 or 2 (64-bit) slots.
// Structs that are the size of 2 pointers are passed by value in multiple registers,
// if sufficient registers are available.
// Structs that are larger than 2 pointers (except for HFAs) are passed by
// reference (to a copy)
size = (unsigned)(roundUp(structSize, TARGET_POINTER_SIZE)) / TARGET_POINTER_SIZE;
byteSize = structSize;
if (size > 2)
{
size = 1;
}
}
// Note that there are some additional rules for multireg structs.
// (i.e they cannot be split between registers and the stack)
}
else
{
size = 1; // Otherwise, all primitive types fit in a single (64-bit) 'slot'
byteSize = genTypeSize(argx);
}
#elif defined(TARGET_ARM) || defined(TARGET_X86)
if (isStructArg)
{
size = (unsigned)(roundUp(structSize, TARGET_POINTER_SIZE)) / TARGET_POINTER_SIZE;
byteSize = structSize;
}
else
{
// The typical case.
// Long/double type argument(s) will be modified as needed in Lowering.
size = genTypeStSz(argx->gtType);
byteSize = genTypeSize(argx);
}
#else
#error Unsupported or unset target architecture
#endif // TARGET_XXX
if (isStructArg)
{
assert(argx == args->GetNode());
assert(structSize != 0);
structPassingKind howToPassStruct;
structBaseType = getArgTypeForStruct(objClass, &howToPassStruct, callIsVararg, structSize);
passStructByRef = (howToPassStruct == SPK_ByReference);
#if defined(TARGET_LOONGARCH64)
if (!passStructByRef)
{
assert((howToPassStruct == SPK_ByValue) || (howToPassStruct == SPK_PrimitiveType));
floatFieldFlags = info.compCompHnd->getLoongArch64PassStructInRegisterFlags(objClass);
passUsingFloatRegs = (floatFieldFlags & STRUCT_HAS_FLOAT_FIELDS_MASK) ? true : false;
compFloatingPointUsed |= passUsingFloatRegs;
if ((floatFieldFlags & (STRUCT_HAS_FLOAT_FIELDS_MASK ^ STRUCT_FLOAT_FIELD_ONLY_ONE)) != 0)
{
// On LoongArch64, "getPrimitiveTypeForStruct" will incorrectly return "TYP_LONG"
// for "struct { float, float }", and retyping to a primitive here will cause the
// multi-reg morphing to not kick in (the struct in question needs to be passed in
// two FP registers). Here is just keep "structBaseType" as "TYP_STRUCT".
// TODO-LoongArch64: fix "getPrimitiveTypeForStruct" or use the ABI information in
// the arg entry instead of calling it here.
structBaseType = TYP_STRUCT;
}
if ((floatFieldFlags & (STRUCT_HAS_FLOAT_FIELDS_MASK ^ STRUCT_FLOAT_FIELD_ONLY_TWO)) != 0)
{
size = 1;
}
else if ((floatFieldFlags & STRUCT_FLOAT_FIELD_ONLY_TWO) != 0)
{
size = 2;
}
}
else // if (passStructByRef)
{
size = 1;
byteSize = TARGET_POINTER_SIZE;
}
#else
if (howToPassStruct == SPK_ByReference)
{
byteSize = TARGET_POINTER_SIZE;
}
else
{
byteSize = structSize;
}
if (howToPassStruct == SPK_PrimitiveType)
{
#ifdef TARGET_ARM
// TODO-CQ: getArgTypeForStruct should *not* return TYP_DOUBLE for a double struct,
// or for a struct of two floats. This causes the struct to be address-taken.
if (structBaseType == TYP_DOUBLE)
{
size = 2;
}
else
#endif // TARGET_ARM
{
size = 1;
}
}
else if (passStructByRef)
{
size = 1;
}
#endif
}
const var_types argType = args->GetNode()->TypeGet();
if (args->GetNode()->OperIs(GT_PUTARG_TYPE))
{
byteSize = genTypeSize(argType);
}
// The 'size' value has now must have been set. (the original value of zero is an invalid value)
assert(size != 0);
assert(byteSize != 0);
if (compMacOsArm64Abi())
{
// Arm64 Apple has a special ABI for passing small size arguments on stack,
// bytes are aligned to 1-byte, shorts to 2-byte, int/float to 4-byte, etc.
// It means passing 8 1-byte arguments on stack can take as small as 8 bytes.
argAlignBytes = eeGetArgSizeAlignment(argType, isFloatHfa);
}
#ifdef TARGET_LOONGARCH64
regNumber nextOtherRegNum = REG_STK;
#endif
//
// Figure out if the argument will be passed in a register.
//
bool isRegArg = false;
NonStandardArgKind nonStandardArgKind = NonStandardArgKind::None;
regNumber nonStdRegNum = REG_NA;
if (isRegParamType(genActualType(argx->TypeGet()))
#ifdef UNIX_AMD64_ABI
&& (!isStructArg || structDesc.passedInRegisters)
#elif defined(TARGET_X86)
|| (isStructArg && isTrivialPointerSizedStruct(objClass))
#endif
)
{
#ifdef TARGET_ARM
if (passUsingFloatRegs)
{
// First, see if it can be back-filled
if (!anyFloatStackArgs && // Is it legal to back-fill? (We haven't put any FP args on the stack yet)
(fltArgSkippedRegMask != RBM_NONE) && // Is there an available back-fill slot?
(size == 1)) // The size to back-fill is one float register
{
// Back-fill the register.
isBackFilled = true;
regMaskTP backFillBitMask = genFindLowestBit(fltArgSkippedRegMask);
fltArgSkippedRegMask &=
~backFillBitMask; // Remove the back-filled register(s) from the skipped mask
nextFltArgRegNum = genMapFloatRegNumToRegArgNum(genRegNumFromMask(backFillBitMask));
assert(nextFltArgRegNum < MAX_FLOAT_REG_ARG);
}
// Does the entire float, double, or HFA fit in the FP arg registers?
// Check if the last register needed is still in the argument register range.
isRegArg = (nextFltArgRegNum + size - 1) < MAX_FLOAT_REG_ARG;
if (!isRegArg)
{
anyFloatStackArgs = true;
}
}
else
{
isRegArg = intArgRegNum < MAX_REG_ARG;
}
#elif defined(TARGET_ARM64)
if (passUsingFloatRegs)
{
// Check if the last register needed is still in the fp argument register range.
isRegArg = (nextFltArgRegNum + (size - 1)) < MAX_FLOAT_REG_ARG;
// Do we have a HFA arg that we wanted to pass in registers, but we ran out of FP registers?
if (isHfaArg && !isRegArg)
{
// recompute the 'size' so that it represent the number of stack slots rather than the number of
// registers
//
unsigned roundupSize = (unsigned)roundUp(structSize, TARGET_POINTER_SIZE);
size = roundupSize / TARGET_POINTER_SIZE;
// We also must update fltArgRegNum so that we no longer try to
// allocate any new floating point registers for args
// This prevents us from backfilling a subsequent arg into d7
//
fltArgRegNum = MAX_FLOAT_REG_ARG;
}
}
else
{
// Check if the last register needed is still in the int argument register range.
isRegArg = (intArgRegNum + (size - 1)) < maxRegArgs;
// Did we run out of registers when we had a 16-byte struct (size===2) ?
// (i.e we only have one register remaining but we needed two registers to pass this arg)
// This prevents us from backfilling a subsequent arg into x7
//
if (!isRegArg && (size > 1))
{
// Arm64 windows native varargs allows splitting a 16 byte struct between stack
// and the last general purpose register.
if (TargetOS::IsWindows && callIsVararg)
{
// Override the decision and force a split.
isRegArg = (intArgRegNum + (size - 1)) <= maxRegArgs;
}
else
{
// We also must update intArgRegNum so that we no longer try to
// allocate any new general purpose registers for args
//
intArgRegNum = maxRegArgs;
}
}
}
#elif defined(TARGET_LOONGARCH64)
if (passUsingFloatRegs)
{
// Check if the last register needed is still in the fp argument register range.
passUsingFloatRegs = isRegArg = (nextFltArgRegNum + (size - 1)) < MAX_FLOAT_REG_ARG;
if (isStructArg)
{
if ((floatFieldFlags & (STRUCT_FLOAT_FIELD_FIRST | STRUCT_FLOAT_FIELD_SECOND)) &&
passUsingFloatRegs)
{
passUsingFloatRegs = isRegArg = intArgRegNum < maxRegArgs;
}
if (!passUsingFloatRegs)
{
size = structSize > 8 ? 2 : 1;
floatFieldFlags = 0;
}
else if (passUsingFloatRegs)
{
if ((floatFieldFlags & STRUCT_FLOAT_FIELD_ONLY_TWO) != 0)
{
nextOtherRegNum = genMapFloatRegArgNumToRegNum(nextFltArgRegNum + 1);
}
else if ((floatFieldFlags & STRUCT_FLOAT_FIELD_SECOND) != 0)
{
assert(size == 1);
size = 2;
passUsingFloatRegs = false;
nextOtherRegNum = genMapFloatRegArgNumToRegNum(nextFltArgRegNum);
}
else if ((floatFieldFlags & STRUCT_FLOAT_FIELD_FIRST) != 0)
{
assert(size == 1);
size = 2;
nextOtherRegNum = genMapIntRegArgNumToRegNum(intArgRegNum);
}
}
}
assert(!isHfaArg); // LoongArch64 does not support HFA.
}
// if we run out of floating-point argument registers, try the int argument registers.
if (!isRegArg)
{
// Check if the last register needed is still in the int argument register range.
isRegArg = (intArgRegNum + (size - 1)) < maxRegArgs;
if (!passUsingFloatRegs && isRegArg && (size > 1))
{
nextOtherRegNum = genMapIntRegArgNumToRegNum(intArgRegNum + 1);
}
// Did we run out of registers when we had a 16-byte struct (size===2) ?
// (i.e we only have one register remaining but we needed two registers to pass this arg)
//
if (!isRegArg && (size > 1))
{
// We also must update intArgRegNum so that we no longer try to
// allocate any new general purpose registers for args
//
isRegArg = intArgRegNum < maxRegArgs; // the split-struct case.
nextOtherRegNum = REG_STK;
}
}
#else // not TARGET_ARM or TARGET_ARM64 or TARGET_LOONGARCH64
#if defined(UNIX_AMD64_ABI)
// Here a struct can be passed in register following the classifications of its members and size.
// Now make sure there are actually enough registers to do so.
if (isStructArg)
{
unsigned int structFloatRegs = 0;
unsigned int structIntRegs = 0;
for (unsigned int i = 0; i < structDesc.eightByteCount; i++)
{
if (structDesc.IsIntegralSlot(i))
{
structIntRegs++;
}
else if (structDesc.IsSseSlot(i))
{
structFloatRegs++;
}
}
isRegArg = ((nextFltArgRegNum + structFloatRegs) <= MAX_FLOAT_REG_ARG) &&
((intArgRegNum + structIntRegs) <= MAX_REG_ARG);
}
else
{
if (passUsingFloatRegs)
{
isRegArg = nextFltArgRegNum < MAX_FLOAT_REG_ARG;
}
else
{
isRegArg = intArgRegNum < MAX_REG_ARG;
}
}
#else // !defined(UNIX_AMD64_ABI)
isRegArg = (intArgRegNum + (size - 1)) < maxRegArgs;
#endif // !defined(UNIX_AMD64_ABI)
#endif // TARGET_ARM
}
else
{
isRegArg = false;
}
// If there are nonstandard args (outside the calling convention) they were inserted above
// and noted them in a table so we can recognize them here and build their argInfo.
//
// They should not affect the placement of any other args or stack space required.
// Example: on AMD64 R10 and R11 are used for indirect VSD (generic interface) and cookie calls.
bool isNonStandard = nonStandardArgs.Find(argx, &nonStdRegNum, &nonStandardArgKind);
if (isNonStandard)
{
isRegArg = (nonStdRegNum != REG_STK);
}
else if (call->IsTailCallViaJitHelper())
{
// We have already (before calling fgMorphArgs()) appended the 4 special args
// required by the x86 tailcall helper. These args are required to go on the
// stack. Force them to the stack here.
assert(numArgs >= 4);
if (argIndex >= numArgs - 4)
{
isRegArg = false;
}
}
// Now we know if the argument goes in registers or not and how big it is.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_ARM
// If we ever allocate a floating point argument to the stack, then all
// subsequent HFA/float/double arguments go on the stack.
if (!isRegArg && passUsingFloatRegs)
{
for (; fltArgRegNum < MAX_FLOAT_REG_ARG; ++fltArgRegNum)
{
fltArgSkippedRegMask |= genMapArgNumToRegMask(fltArgRegNum, TYP_FLOAT);
}
}
// If we think we're going to split a struct between integer registers and the stack, check to
// see if we've already assigned a floating-point arg to the stack.
if (isRegArg && // We decided above to use a register for the argument
!passUsingFloatRegs && // We're using integer registers
(intArgRegNum + size > MAX_REG_ARG) && // We're going to split a struct type onto registers and stack
anyFloatStackArgs) // We've already used the stack for a floating-point argument
{
isRegArg = false; // Change our mind; don't pass this struct partially in registers
// Skip the rest of the integer argument registers
for (; intArgRegNum < MAX_REG_ARG; ++intArgRegNum)
{
argSkippedRegMask |= genMapArgNumToRegMask(intArgRegNum, TYP_I_IMPL);
}
}
#endif // TARGET_ARM
// Now create the fgArgTabEntry.
fgArgTabEntry* newArgEntry;
if (isRegArg)
{
regNumber nextRegNum = REG_STK;
#if defined(UNIX_AMD64_ABI)
regNumber nextOtherRegNum = REG_STK;
unsigned int structFloatRegs = 0;
unsigned int structIntRegs = 0;
#endif // defined(UNIX_AMD64_ABI)
if (isNonStandard)
{
nextRegNum = nonStdRegNum;
}
#if defined(UNIX_AMD64_ABI)
else if (isStructArg && structDesc.passedInRegisters)
{
// It is a struct passed in registers. Assign the next available register.
assert((structDesc.eightByteCount <= 2) && "Too many eightbytes.");
regNumber* nextRegNumPtrs[2] = {&nextRegNum, &nextOtherRegNum};
for (unsigned int i = 0; i < structDesc.eightByteCount; i++)
{
if (structDesc.IsIntegralSlot(i))
{
*nextRegNumPtrs[i] = genMapIntRegArgNumToRegNum(intArgRegNum + structIntRegs);
++structIntRegs;
}
else if (structDesc.IsSseSlot(i))
{
*nextRegNumPtrs[i] = genMapFloatRegArgNumToRegNum(nextFltArgRegNum + structFloatRegs);
++structFloatRegs;
}
}
}
#endif // defined(UNIX_AMD64_ABI)
else
{
// fill in or update the argInfo table
nextRegNum = passUsingFloatRegs ? genMapFloatRegArgNumToRegNum(nextFltArgRegNum)
: genMapIntRegArgNumToRegNum(intArgRegNum);
}
#ifdef TARGET_AMD64
#ifndef UNIX_AMD64_ABI
assert(size == 1);
#endif
#endif
// This is a register argument - put it in the table
newArgEntry =
call->fgArgInfo->AddRegArg(argIndex, argx, args, nextRegNum, size, byteSize, argAlignBytes, isStructArg,
isFloatHfa, callIsVararg UNIX_LOONGARCH64_ONLY_ARG(nextOtherRegNum)
UNIX_AMD64_ABI_ONLY_ARG(nextOtherRegNum)
UNIX_AMD64_ABI_ONLY_ARG(structIntRegs)
UNIX_AMD64_ABI_ONLY_ARG(structFloatRegs)
UNIX_AMD64_ABI_ONLY_ARG(&structDesc));
newArgEntry->SetIsBackFilled(isBackFilled);
// Set up the next intArgRegNum and fltArgRegNum values.
if (!isBackFilled)
{
#if defined(TARGET_LOONGARCH64)
// Increment intArgRegNum by 'size' registers
if (!isNonStandard)
{
if ((size > 1) && ((intArgRegNum + 1) == maxRegArgs) && (nextOtherRegNum == REG_STK))
{
assert(!passUsingFloatRegs);
assert(size == 2);
intArgRegNum = maxRegArgs;
}
else if ((floatFieldFlags & STRUCT_HAS_FLOAT_FIELDS_MASK) == 0x0)
{
if (passUsingFloatRegs)
{
fltArgRegNum += 1;
}
else
{
intArgRegNum += size;
}
}
else if ((floatFieldFlags & STRUCT_FLOAT_FIELD_ONLY_ONE) != 0)
{
structBaseType = structSize == 8 ? TYP_DOUBLE : TYP_FLOAT;
fltArgRegNum += 1;
newArgEntry->structFloatFieldType[0] = structBaseType;
}
else if ((floatFieldFlags & (STRUCT_FLOAT_FIELD_FIRST | STRUCT_FLOAT_FIELD_SECOND)) != 0)
{
fltArgRegNum += 1;
intArgRegNum += 1;
if ((floatFieldFlags & STRUCT_FLOAT_FIELD_FIRST) != 0)
{
newArgEntry->structFloatFieldType[0] =
(floatFieldFlags & STRUCT_FIRST_FIELD_SIZE_IS8) ? TYP_DOUBLE : TYP_FLOAT;
newArgEntry->structFloatFieldType[1] =
(floatFieldFlags & STRUCT_SECOND_FIELD_SIZE_IS8) ? TYP_LONG : TYP_INT;
}
else
{
newArgEntry->structFloatFieldType[0] =
(floatFieldFlags & STRUCT_FIRST_FIELD_SIZE_IS8) ? TYP_LONG : TYP_INT;
newArgEntry->structFloatFieldType[1] =
(floatFieldFlags & STRUCT_SECOND_FIELD_SIZE_IS8) ? TYP_DOUBLE : TYP_FLOAT;
}
}
else if ((floatFieldFlags & STRUCT_FLOAT_FIELD_ONLY_TWO) != 0)
{
fltArgRegNum += 2;
newArgEntry->structFloatFieldType[0] =
(floatFieldFlags & STRUCT_FIRST_FIELD_SIZE_IS8) ? TYP_DOUBLE : TYP_FLOAT;
newArgEntry->structFloatFieldType[1] =
(floatFieldFlags & STRUCT_SECOND_FIELD_SIZE_IS8) ? TYP_DOUBLE : TYP_FLOAT;
}
}
#else
#if defined(UNIX_AMD64_ABI)
if (isStructArg)
{
// For this case, we've already set the regNums in the argTabEntry
intArgRegNum += structIntRegs;
fltArgRegNum += structFloatRegs;
}
else
#endif // defined(UNIX_AMD64_ABI)
{
if (!isNonStandard)
{
#if FEATURE_ARG_SPLIT
// Check for a split (partially enregistered) struct
if (compFeatureArgSplit() && !passUsingFloatRegs && ((intArgRegNum + size) > MAX_REG_ARG))
{
// This indicates a partial enregistration of a struct type
assert((isStructArg) || argx->OperIs(GT_FIELD_LIST) || argx->OperIsCopyBlkOp() ||
(argx->gtOper == GT_COMMA && (argx->gtFlags & GTF_ASG)));
unsigned numRegsPartial = MAX_REG_ARG - intArgRegNum;
assert((unsigned char)numRegsPartial == numRegsPartial);
call->fgArgInfo->SplitArg(argIndex, numRegsPartial, size - numRegsPartial);
}
#endif // FEATURE_ARG_SPLIT
if (passUsingFloatRegs)
{
fltArgRegNum += size;
#ifdef WINDOWS_AMD64_ABI
// Whenever we pass an integer register argument
// we skip the corresponding floating point register argument
intArgRegNum = min(intArgRegNum + size, MAX_REG_ARG);
#endif // WINDOWS_AMD64_ABI
// No supported architecture supports partial structs using float registers.
assert(fltArgRegNum <= MAX_FLOAT_REG_ARG);
}
else
{
// Increment intArgRegNum by 'size' registers
intArgRegNum += size;
#ifdef WINDOWS_AMD64_ABI
fltArgRegNum = min(fltArgRegNum + size, MAX_FLOAT_REG_ARG);
#endif // WINDOWS_AMD64_ABI
}
}
}
#endif // defined(TARGET_LOONGARCH64)
}
}
else // We have an argument that is not passed in a register
{
// This is a stack argument - put it in the table
newArgEntry = call->fgArgInfo->AddStkArg(argIndex, argx, args, size, byteSize, argAlignBytes, isStructArg,
isFloatHfa, callIsVararg);
#ifdef UNIX_AMD64_ABI
// TODO-Amd64-Unix-CQ: This is temporary (see also in fgMorphArgs).
if (structDesc.passedInRegisters)
{
newArgEntry->structDesc.CopyFrom(structDesc);
}
#endif
}
newArgEntry->nonStandardArgKind = nonStandardArgKind;
if (GlobalJitOptions::compFeatureHfa)
{
if (isHfaArg)
{
newArgEntry->SetHfaType(hfaType, hfaSlots);
}
}
newArgEntry->SetMultiRegNums();
noway_assert(newArgEntry != nullptr);
if (newArgEntry->isStruct)
{
newArgEntry->passedByRef = passStructByRef;
newArgEntry->argType = (structBaseType == TYP_UNKNOWN) ? argx->TypeGet() : structBaseType;
}
else
{
newArgEntry->argType = argx->TypeGet();
}
DEBUG_ARG_SLOTS_ONLY(argSlots += size;)
} // end foreach argument loop
#ifdef DEBUG
if (verbose)
{
JITDUMP("ArgTable for %d.%s after fgInitArgInfo:\n", call->gtTreeID, GenTree::OpName(call->gtOper));
call->fgArgInfo->Dump(this);
JITDUMP("\n");
}
#endif
}
//------------------------------------------------------------------------
// fgMorphArgs: Walk and transform (morph) the arguments of a call
//
// Arguments:
// callNode - the call for which we are doing the argument morphing
//
// Return Value:
// Like most morph methods, this method returns the morphed node,
// though in this case there are currently no scenarios where the
// node itself is re-created.
//
// Notes:
// This calls fgInitArgInfo to create the 'fgArgInfo' for the call.
// If it has already been created, that method will simply return.
//
// This method changes the state of the call node. It uses the existence
// of gtCallLateArgs (the late arguments list) to determine if it has
// already done the first round of morphing.
//
// The first time it is called (i.e. during global morphing), this method
// computes the "late arguments". This is when it determines which arguments
// need to be evaluated to temps prior to the main argument setup, and which
// can be directly evaluated into the argument location. It also creates a
// second argument list (gtCallLateArgs) that does the final placement of the
// arguments, e.g. into registers or onto the stack.
//
// The "non-late arguments", aka the gtCallArgs, are doing the in-order
// evaluation of the arguments that might have side-effects, such as embedded
// assignments, calls or possible throws. In these cases, it and earlier
// arguments must be evaluated to temps.
//
// On targets with a fixed outgoing argument area (FEATURE_FIXED_OUT_ARGS),
// if we have any nested calls, we need to defer the copying of the argument
// into the fixed argument area until after the call. If the argument did not
// otherwise need to be computed into a temp, it is moved to gtCallLateArgs and
// replaced in the "early" arg list (gtCallArgs) with a placeholder node.
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call)
{
GenTreeCall::Use* args;
GenTree* argx;
GenTreeFlags flagsSummary = GTF_EMPTY;
unsigned argIndex = 0;
DEBUG_ARG_SLOTS_ONLY(unsigned argSlots = 0;)
bool reMorphing = call->AreArgsComplete();
// Set up the fgArgInfo.
fgInitArgInfo(call);
JITDUMP("%sMorphing args for %d.%s:\n", (reMorphing) ? "Re" : "", call->gtTreeID, GenTree::OpName(call->gtOper));
// If we are remorphing, process the late arguments (which were determined by a previous caller).
if (reMorphing)
{
for (GenTreeCall::Use& use : call->LateArgs())
{
use.SetNode(fgMorphTree(use.GetNode()));
flagsSummary |= use.GetNode()->gtFlags;
}
assert(call->fgArgInfo != nullptr);
}
call->fgArgInfo->RemorphReset();
// First we morph the argument subtrees ('this' pointer, arguments, etc.).
// During the first call to fgMorphArgs we also record the
// information about late arguments we have in 'fgArgInfo'.
// This information is used later to contruct the gtCallLateArgs */
// Process the 'this' argument value, if present.
if (call->gtCallThisArg != nullptr)
{
argx = call->gtCallThisArg->GetNode();
fgArgTabEntry* thisArgEntry = call->fgArgInfo->GetArgEntry(0, reMorphing);
argx = fgMorphTree(argx);
call->gtCallThisArg->SetNode(argx);
// This is a register argument - possibly update it in the table.
call->fgArgInfo->UpdateRegArg(thisArgEntry, argx, reMorphing);
flagsSummary |= argx->gtFlags;
if (!reMorphing && call->IsExpandedEarly() && call->IsVirtualVtable())
{
if (!argx->OperIsLocal())
{
thisArgEntry->needTmp = true;
call->fgArgInfo->SetNeedsTemps();
}
}
assert(argIndex == 0);
argIndex++;
DEBUG_ARG_SLOTS_ONLY(argSlots++;)
}
// Note that this name is a bit of a misnomer - it indicates that there are struct args
// that occupy more than a single slot that are passed by value (not necessarily in regs).
bool hasMultiregStructArgs = false;
for (args = call->gtCallArgs; args != nullptr; args = args->GetNext(), argIndex++)
{
GenTree** parentArgx = &args->NodeRef();
fgArgTabEntry* argEntry = call->fgArgInfo->GetArgEntry(argIndex, reMorphing);
// Morph the arg node, and update the parent and argEntry pointers.
argx = *parentArgx;
argx = fgMorphTree(argx);
*parentArgx = argx;
assert(argx == args->GetNode());
DEBUG_ARG_SLOTS_ONLY(unsigned size = argEntry->getSize();)
CORINFO_CLASS_HANDLE copyBlkClass = NO_CLASS_HANDLE;
#if defined(DEBUG_ARG_SLOTS)
if (!compMacOsArm64Abi())
{
if (argEntry->GetByteAlignment() == 2 * TARGET_POINTER_SIZE)
{
if (argSlots % 2 == 1)
{
argSlots++;
}
}
}
#endif // DEBUG
if (argEntry->isNonStandard() && argEntry->isPassedInRegisters())
{
// We need to update the node field for this nonStandard arg here
// as it may have been changed by the call to fgMorphTree.
call->fgArgInfo->UpdateRegArg(argEntry, argx, reMorphing);
flagsSummary |= argx->gtFlags;
continue;
}
DEBUG_ARG_SLOTS_ASSERT(size != 0);
DEBUG_ARG_SLOTS_ONLY(argSlots += argEntry->getSlotCount();)
if (argx->IsLocalAddrExpr() != nullptr)
{
argx->gtType = TYP_I_IMPL;
}
// Get information about this argument.
var_types hfaType = argEntry->GetHfaType();
bool isHfaArg = (hfaType != TYP_UNDEF);
bool passUsingFloatRegs = argEntry->isPassedInFloatRegisters();
unsigned structSize = 0;
// Struct arguments may be morphed into a node that is not a struct type.
// In such case the fgArgTabEntry keeps track of whether the original node (before morphing)
// was a struct and the struct classification.
bool isStructArg = argEntry->isStruct;
GenTree* argObj = argx->gtEffectiveVal(true /*commaOnly*/);
if (isStructArg && varTypeIsStruct(argObj) && !argObj->OperIs(GT_ASG, GT_MKREFANY, GT_FIELD_LIST, GT_ARGPLACE))
{
CORINFO_CLASS_HANDLE objClass = gtGetStructHandle(argObj);
unsigned originalSize;
if (argObj->TypeGet() == TYP_STRUCT)
{
if (argObj->OperIs(GT_OBJ))
{
// Get the size off the OBJ node.
originalSize = argObj->AsObj()->GetLayout()->GetSize();
assert(originalSize == info.compCompHnd->getClassSize(objClass));
}
else
{
// We have a BADCODE assert for this in fgInitArgInfo.
assert(argObj->OperIs(GT_LCL_VAR));
originalSize = lvaGetDesc(argObj->AsLclVarCommon())->lvExactSize;
}
}
else
{
originalSize = genTypeSize(argx);
assert(originalSize == info.compCompHnd->getClassSize(objClass));
}
unsigned roundupSize = (unsigned)roundUp(originalSize, TARGET_POINTER_SIZE);
var_types structBaseType = argEntry->argType;
// First, handle the case where the argument is passed by reference.
if (argEntry->passedByRef)
{
DEBUG_ARG_SLOTS_ASSERT(size == 1);
copyBlkClass = objClass;
#ifdef UNIX_AMD64_ABI
assert(!"Structs are not passed by reference on x64/ux");
#endif // UNIX_AMD64_ABI
}
else // This is passed by value.
{
#if defined(TARGET_LOONGARCH64)
// For LoongArch64 the struct {float a; float b;} can be passed by two float registers.
DEBUG_ARG_SLOTS_ASSERT((size == roundupSize / TARGET_POINTER_SIZE) ||
((structBaseType == TYP_STRUCT) && (originalSize == TARGET_POINTER_SIZE) &&
(size == 2) && (size == argEntry->numRegs)));
#else
// Check to see if we can transform this into load of a primitive type.
// 'size' must be the number of pointer sized items
DEBUG_ARG_SLOTS_ASSERT(size == roundupSize / TARGET_POINTER_SIZE);
#endif
structSize = originalSize;
unsigned passingSize = originalSize;
// Check to see if we can transform this struct load (GT_OBJ) into a GT_IND of the appropriate size.
// When it can do this is platform-dependent:
// - In general, it can be done for power of 2 structs that fit in a single register.
// - For ARM and ARM64 it must also be a non-HFA struct, or have a single field.
// - This is irrelevant for X86, since structs are always passed by value on the stack.
GenTree* lclVar = fgIsIndirOfAddrOfLocal(argObj);
bool canTransform = false;
if (structBaseType != TYP_STRUCT)
{
if (isPow2(passingSize))
{
canTransform = (!argEntry->IsHfaArg() || (passingSize == genTypeSize(argEntry->GetHfaType())));
}
#if defined(TARGET_ARM64) || defined(UNIX_AMD64_ABI) || defined(TARGET_LOONGARCH64)
// For ARM64 or AMD64/UX we can pass non-power-of-2 structs in a register, but we can
// only transform in that case if the arg is a local.
// TODO-CQ: This transformation should be applicable in general, not just for the ARM64
// or UNIX_AMD64_ABI cases where they will be passed in registers.
else
{
canTransform = (lclVar != nullptr);
passingSize = genTypeSize(structBaseType);
}
#endif // TARGET_ARM64 || UNIX_AMD64_ABI || TARGET_LOONGARCH64
}
if (!canTransform)
{
#if defined(TARGET_AMD64)
#ifndef UNIX_AMD64_ABI
// On Windows structs are always copied and passed by reference (handled above) unless they are
// passed by value in a single register.
assert(size == 1);
copyBlkClass = objClass;
#else // UNIX_AMD64_ABI
// On Unix, structs are always passed by value.
// We only need a copy if we have one of the following:
// - The sizes don't match for a non-lclVar argument.
// - We have a known struct type (e.g. SIMD) that requires multiple registers.
// TODO-Amd64-Unix-Throughput: We don't need to keep the structDesc in the argEntry if it's not
// actually passed in registers.
if (argEntry->isPassedInRegisters())
{
if (argObj->OperIs(GT_OBJ))
{
if (passingSize != structSize)
{
copyBlkClass = objClass;
}
}
else if (lclVar == nullptr)
{
// This should only be the case of a value directly producing a known struct type.
assert(argObj->TypeGet() != TYP_STRUCT);
if (argEntry->numRegs > 1)
{
copyBlkClass = objClass;
}
}
}
#endif // UNIX_AMD64_ABI
#elif defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)
if ((passingSize != structSize) && (lclVar == nullptr))
{
copyBlkClass = objClass;
}
#endif
#ifdef TARGET_ARM
// TODO-1stClassStructs: Unify these conditions across targets.
if (((lclVar != nullptr) &&
(lvaGetPromotionType(lclVar->AsLclVarCommon()->GetLclNum()) == PROMOTION_TYPE_INDEPENDENT)) ||
((argObj->OperIs(GT_OBJ)) && (passingSize != structSize)))
{
copyBlkClass = objClass;
}
if (structSize < TARGET_POINTER_SIZE)
{
copyBlkClass = objClass;
}
#endif // TARGET_ARM
}
else
{
// We have a struct argument that fits into a register, and it is either a power of 2,
// or a local.
// Change our argument, as needed, into a value of the appropriate type.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_ARM
DEBUG_ARG_SLOTS_ASSERT((size == 1) || ((structBaseType == TYP_DOUBLE) && (size == 2)));
#else
DEBUG_ARG_SLOTS_ASSERT((size == 1) || (varTypeIsSIMD(structBaseType) &&
size == (genTypeSize(structBaseType) / REGSIZE_BYTES)));
#endif
assert((structBaseType != TYP_STRUCT) && (genTypeSize(structBaseType) >= originalSize));
if (argObj->OperIs(GT_OBJ))
{
argObj->ChangeOper(GT_IND);
// Now see if we can fold *(&X) into X
if (argObj->AsOp()->gtOp1->gtOper == GT_ADDR)
{
GenTree* temp = argObj->AsOp()->gtOp1->AsOp()->gtOp1;
// Keep the DONT_CSE flag in sync
// (as the addr always marks it for its op1)
temp->gtFlags &= ~GTF_DONT_CSE;
temp->gtFlags |= (argObj->gtFlags & GTF_DONT_CSE);
DEBUG_DESTROY_NODE(argObj->AsOp()->gtOp1); // GT_ADDR
DEBUG_DESTROY_NODE(argObj); // GT_IND
argObj = temp;
*parentArgx = temp;
argx = temp;
}
}
if (argObj->gtOper == GT_LCL_VAR)
{
unsigned lclNum = argObj->AsLclVarCommon()->GetLclNum();
LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (varDsc->lvPromoted)
{
if (varDsc->lvFieldCnt == 1)
{
// get the first and only promoted field
LclVarDsc* fieldVarDsc = lvaGetDesc(varDsc->lvFieldLclStart);
if (genTypeSize(fieldVarDsc->TypeGet()) >= originalSize)
{
// we will use the first and only promoted field
argObj->AsLclVarCommon()->SetLclNum(varDsc->lvFieldLclStart);
if (varTypeIsEnregisterable(fieldVarDsc->TypeGet()) &&
(genTypeSize(fieldVarDsc->TypeGet()) == originalSize))
{
// Just use the existing field's type
argObj->gtType = fieldVarDsc->TypeGet();
}
else
{
// Can't use the existing field's type, so use GT_LCL_FLD to swizzle
// to a new type
lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::SwizzleArg));
argObj->ChangeOper(GT_LCL_FLD);
argObj->gtType = structBaseType;
}
assert(varTypeIsEnregisterable(argObj->TypeGet()));
assert(copyBlkClass == NO_CLASS_HANDLE);
}
else
{
// use GT_LCL_FLD to swizzle the single field struct to a new type
lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::SwizzleArg));
argObj->ChangeOper(GT_LCL_FLD);
argObj->gtType = structBaseType;
}
}
else
{
// The struct fits into a single register, but it has been promoted into its
// constituent fields, and so we have to re-assemble it
copyBlkClass = objClass;
}
}
else if (genTypeSize(varDsc->TypeGet()) != genTypeSize(structBaseType))
{
// Not a promoted struct, so just swizzle the type by using GT_LCL_FLD
lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::SwizzleArg));
argObj->ChangeOper(GT_LCL_FLD);
argObj->gtType = structBaseType;
}
}
else
{
// Not a GT_LCL_VAR, so we can just change the type on the node
argObj->gtType = structBaseType;
}
assert(varTypeIsEnregisterable(argObj->TypeGet()) ||
((copyBlkClass != NO_CLASS_HANDLE) && varTypeIsEnregisterable(structBaseType)));
}
#if !defined(UNIX_AMD64_ABI) && !defined(TARGET_ARMARCH) && !defined(TARGET_LOONGARCH64)
// TODO-CQ-XARCH: there is no need for a temp copy if we improve our code generation in
// `genPutStructArgStk` for xarch like we did it for Arm/Arm64.
// We still have a struct unless we converted the GT_OBJ into a GT_IND above...
if (isHfaArg && passUsingFloatRegs)
{
}
else if (structBaseType == TYP_STRUCT)
{
// If the valuetype size is not a multiple of TARGET_POINTER_SIZE,
// we must copyblk to a temp before doing the obj to avoid
// the obj reading memory past the end of the valuetype
CLANG_FORMAT_COMMENT_ANCHOR;
if (roundupSize > originalSize)
{
copyBlkClass = objClass;
// There are a few special cases where we can omit using a CopyBlk
// where we normally would need to use one.
if (argObj->OperIs(GT_OBJ) &&
argObj->AsObj()->gtGetOp1()->IsLocalAddrExpr() != nullptr) // Is the source a LclVar?
{
copyBlkClass = NO_CLASS_HANDLE;
}
}
}
#endif // !UNIX_AMD64_ABI
}
}
if (argEntry->isPassedInRegisters())
{
call->fgArgInfo->UpdateRegArg(argEntry, argx, reMorphing);
}
else
{
call->fgArgInfo->UpdateStkArg(argEntry, argx, reMorphing);
}
if (copyBlkClass != NO_CLASS_HANDLE)
{
fgMakeOutgoingStructArgCopy(call, args, copyBlkClass);
}
if (argx->gtOper == GT_MKREFANY)
{
// 'Lower' the MKREFANY tree and insert it.
noway_assert(!reMorphing);
#ifdef TARGET_X86
// Build the mkrefany as a GT_FIELD_LIST
GenTreeFieldList* fieldList = new (this, GT_FIELD_LIST) GenTreeFieldList();
fieldList->AddField(this, argx->AsOp()->gtGetOp1(), OFFSETOF__CORINFO_TypedReference__dataPtr, TYP_BYREF);
fieldList->AddField(this, argx->AsOp()->gtGetOp2(), OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL);
fgArgTabEntry* fp = gtArgEntryByNode(call, argx);
args->SetNode(fieldList);
assert(fp->GetNode() == fieldList);
#else // !TARGET_X86
// Get a new temp
// Here we don't need unsafe value cls check since the addr of temp is used only in mkrefany
unsigned tmp = lvaGrabTemp(true DEBUGARG("by-value mkrefany struct argument"));
lvaSetStruct(tmp, impGetRefAnyClass(), false);
// Build the mkrefany as a comma node:
// (tmp.ptr=argx),(tmp.type=handle)
GenTreeLclFld* destPtrSlot = gtNewLclFldNode(tmp, TYP_I_IMPL, OFFSETOF__CORINFO_TypedReference__dataPtr);
GenTreeLclFld* destTypeSlot = gtNewLclFldNode(tmp, TYP_I_IMPL, OFFSETOF__CORINFO_TypedReference__type);
destPtrSlot->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(GetRefanyDataField()));
destPtrSlot->gtFlags |= GTF_VAR_DEF;
destTypeSlot->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField()));
destTypeSlot->gtFlags |= GTF_VAR_DEF;
GenTree* asgPtrSlot = gtNewAssignNode(destPtrSlot, argx->AsOp()->gtOp1);
GenTree* asgTypeSlot = gtNewAssignNode(destTypeSlot, argx->AsOp()->gtOp2);
GenTree* asg = gtNewOperNode(GT_COMMA, TYP_VOID, asgPtrSlot, asgTypeSlot);
// Change the expression to "(tmp=val)"
args->SetNode(asg);
// EvalArgsToTemps will cause tmp to actually get loaded as the argument
call->fgArgInfo->EvalToTmp(argEntry, tmp, asg);
lvaSetVarAddrExposed(tmp DEBUGARG(AddressExposedReason::TOO_CONSERVATIVE));
#endif // !TARGET_X86
}
#if FEATURE_MULTIREG_ARGS
if (isStructArg)
{
if (((argEntry->numRegs + argEntry->GetStackSlotsNumber()) > 1) ||
(isHfaArg && argx->TypeGet() == TYP_STRUCT))
{
hasMultiregStructArgs = true;
}
}
#ifdef TARGET_ARM
else if ((argEntry->argType == TYP_LONG) || (argEntry->argType == TYP_DOUBLE))
{
assert((argEntry->numRegs == 2) || (argEntry->numSlots == 2));
}
#endif
else
{
// We must have exactly one register or slot.
assert(((argEntry->numRegs == 1) && (argEntry->GetStackSlotsNumber() == 0)) ||
((argEntry->numRegs == 0) && (argEntry->GetStackSlotsNumber() == 1)));
}
#endif
#if defined(TARGET_X86)
if (isStructArg)
{
GenTree* lclNode = argx->OperIs(GT_LCL_VAR) ? argx : fgIsIndirOfAddrOfLocal(argx);
if ((lclNode != nullptr) &&
(lvaGetPromotionType(lclNode->AsLclVarCommon()->GetLclNum()) == Compiler::PROMOTION_TYPE_INDEPENDENT))
{
// Make a GT_FIELD_LIST of the field lclVars.
GenTreeLclVarCommon* lcl = lclNode->AsLclVarCommon();
LclVarDsc* varDsc = lvaGetDesc(lcl);
GenTreeFieldList* fieldList = new (this, GT_FIELD_LIST) GenTreeFieldList();
fgArgTabEntry* fp = gtArgEntryByNode(call, argx);
args->SetNode(fieldList);
assert(fp->GetNode() == fieldList);
for (unsigned fieldLclNum = varDsc->lvFieldLclStart;
fieldLclNum < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++fieldLclNum)
{
LclVarDsc* fieldVarDsc = lvaGetDesc(fieldLclNum);
GenTree* fieldLcl;
if (fieldLclNum == varDsc->lvFieldLclStart)
{
lcl->SetLclNum(fieldLclNum);
lcl->SetOperResetFlags(GT_LCL_VAR);
lcl->gtType = fieldVarDsc->TypeGet();
fieldLcl = lcl;
}
else
{
fieldLcl = gtNewLclvNode(fieldLclNum, fieldVarDsc->TypeGet());
}
fieldList->AddField(this, fieldLcl, fieldVarDsc->lvFldOffset, fieldVarDsc->TypeGet());
}
}
}
#endif // TARGET_X86
flagsSummary |= args->GetNode()->gtFlags;
} // end foreach argument loop
if (!reMorphing)
{
call->fgArgInfo->ArgsComplete();
}
/* Process the function address, if indirect call */
if (call->gtCallType == CT_INDIRECT)
{
call->gtCallAddr = fgMorphTree(call->gtCallAddr);
// Const CSE may create an assignment node here
flagsSummary |= call->gtCallAddr->gtFlags;
}
#if FEATURE_FIXED_OUT_ARGS
// Record the outgoing argument size. If the call is a fast tail
// call, it will setup its arguments in incoming arg area instead
// of the out-going arg area, so we don't need to track the
// outgoing arg size.
if (!call->IsFastTailCall())
{
#if defined(UNIX_AMD64_ABI)
// This is currently required for the UNIX ABI to work correctly.
opts.compNeedToAlignFrame = true;
#endif // UNIX_AMD64_ABI
const unsigned outgoingArgSpaceSize = GetOutgoingArgByteSize(call->fgArgInfo->GetNextSlotByteOffset());
#if defined(DEBUG_ARG_SLOTS)
unsigned preallocatedArgCount = 0;
if (!compMacOsArm64Abi())
{
preallocatedArgCount = call->fgArgInfo->GetNextSlotNum();
assert(outgoingArgSpaceSize == preallocatedArgCount * REGSIZE_BYTES);
}
#endif
call->fgArgInfo->SetOutArgSize(max(outgoingArgSpaceSize, MIN_ARG_AREA_FOR_CALL));
#ifdef DEBUG
if (verbose)
{
const fgArgInfo* argInfo = call->fgArgInfo;
#if defined(DEBUG_ARG_SLOTS)
if (!compMacOsArm64Abi())
{
printf("argSlots=%d, preallocatedArgCount=%d, nextSlotNum=%d, nextSlotByteOffset=%d, "
"outgoingArgSpaceSize=%d\n",
argSlots, preallocatedArgCount, argInfo->GetNextSlotNum(), argInfo->GetNextSlotByteOffset(),
outgoingArgSpaceSize);
}
else
{
printf("nextSlotByteOffset=%d, outgoingArgSpaceSize=%d\n", argInfo->GetNextSlotByteOffset(),
outgoingArgSpaceSize);
}
#else
printf("nextSlotByteOffset=%d, outgoingArgSpaceSize=%d\n", argInfo->GetNextSlotByteOffset(),
outgoingArgSpaceSize);
#endif
}
#endif
}
#endif // FEATURE_FIXED_OUT_ARGS
// Clear the ASG and EXCEPT (if possible) flags on the call node
call->gtFlags &= ~GTF_ASG;
if (!call->OperMayThrow(this))
{
call->gtFlags &= ~GTF_EXCEPT;
}
// Union in the side effect flags from the call's operands
call->gtFlags |= flagsSummary & GTF_ALL_EFFECT;
// If we are remorphing or don't have any register arguments or other arguments that need
// temps, then we don't need to call SortArgs() and EvalArgsToTemps().
//
if (!reMorphing && (call->fgArgInfo->HasRegArgs() || call->fgArgInfo->NeedsTemps()))
{
// Do the 'defer or eval to temp' analysis.
call->fgArgInfo->SortArgs();
call->fgArgInfo->EvalArgsToTemps();
}
if (hasMultiregStructArgs)
{
fgMorphMultiregStructArgs(call);
}
#ifdef DEBUG
if (verbose)
{
JITDUMP("ArgTable for %d.%s after fgMorphArgs:\n", call->gtTreeID, GenTree::OpName(call->gtOper));
call->fgArgInfo->Dump(this);
JITDUMP("\n");
}
#endif
return call;
}
#ifdef _PREFAST_
#pragma warning(pop)
#endif
//-----------------------------------------------------------------------------
// fgMorphMultiregStructArgs: Locate the TYP_STRUCT arguments and
// call fgMorphMultiregStructArg on each of them.
//
// Arguments:
// call : a GenTreeCall node that has one or more TYP_STRUCT arguments\.
//
// Notes:
// We only call fgMorphMultiregStructArg for struct arguments that are not passed as simple types.
// It will ensure that the struct arguments are in the correct form.
// If this method fails to find any TYP_STRUCT arguments it will assert.
//
void Compiler::fgMorphMultiregStructArgs(GenTreeCall* call)
{
bool foundStructArg = false;
GenTreeFlags flagsSummary = GTF_EMPTY;
#ifdef TARGET_X86
assert(!"Logic error: no MultiregStructArgs for X86");
#endif
#if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)
assert(!"Logic error: no MultiregStructArgs for Windows X64 ABI");
#endif
for (GenTreeCall::Use& use : call->Args())
{
// For late arguments the arg tree that is overridden is in the gtCallLateArgs list.
// For such late args the gtCallArgList contains the setup arg node (evaluating the arg.)
// The tree from the gtCallLateArgs list is passed to the callee. The fgArgEntry node contains the mapping
// between the nodes in both lists. If the arg is not a late arg, the fgArgEntry->node points to itself,
// otherwise points to the list in the late args list.
bool isLateArg = (use.GetNode()->gtFlags & GTF_LATE_ARG) != 0;
fgArgTabEntry* fgEntryPtr = gtArgEntryByNode(call, use.GetNode());
assert(fgEntryPtr != nullptr);
GenTree* argx = fgEntryPtr->GetNode();
GenTreeCall::Use* lateUse = nullptr;
GenTree* lateNode = nullptr;
if (isLateArg)
{
for (GenTreeCall::Use& lateArgUse : call->LateArgs())
{
GenTree* argNode = lateArgUse.GetNode();
if (argx == argNode)
{
lateUse = &lateArgUse;
lateNode = argNode;
break;
}
}
assert((lateUse != nullptr) && (lateNode != nullptr));
}
if (!fgEntryPtr->isStruct)
{
continue;
}
unsigned size = (fgEntryPtr->numRegs + fgEntryPtr->GetStackSlotsNumber());
if ((size > 1) || (fgEntryPtr->IsHfaArg() && argx->TypeGet() == TYP_STRUCT))
{
foundStructArg = true;
if (varTypeIsStruct(argx) && !argx->OperIs(GT_FIELD_LIST))
{
if (fgEntryPtr->IsHfaRegArg())
{
var_types hfaType = fgEntryPtr->GetHfaType();
unsigned structSize;
if (argx->OperIs(GT_OBJ))
{
structSize = argx->AsObj()->GetLayout()->GetSize();
}
else if (varTypeIsSIMD(argx))
{
structSize = genTypeSize(argx);
}
else
{
assert(argx->OperIs(GT_LCL_VAR));
structSize = lvaGetDesc(argx->AsLclVar())->lvExactSize;
}
assert(structSize > 0);
if (structSize == genTypeSize(hfaType))
{
if (argx->OperIs(GT_OBJ))
{
argx->SetOper(GT_IND);
}
argx->gtType = hfaType;
}
}
GenTree* newArgx = fgMorphMultiregStructArg(argx, fgEntryPtr);
// Did we replace 'argx' with a new tree?
if (newArgx != argx)
{
// link the new arg node into either the late arg list or the gtCallArgs list
if (isLateArg)
{
lateUse->SetNode(newArgx);
}
else
{
use.SetNode(newArgx);
}
assert(fgEntryPtr->GetNode() == newArgx);
}
}
}
}
// We should only call this method when we actually have one or more multireg struct args
assert(foundStructArg);
// Update the flags
call->gtFlags |= (flagsSummary & GTF_ALL_EFFECT);
}
//-----------------------------------------------------------------------------
// fgMorphMultiregStructArg: Given a TYP_STRUCT arg from a call argument list,
// morph the argument as needed to be passed correctly.
//
// Arguments:
// arg - A GenTree node containing a TYP_STRUCT arg
// fgEntryPtr - the fgArgTabEntry information for the current 'arg'
//
// Notes:
// The arg must be a GT_OBJ or GT_LCL_VAR or GT_LCL_FLD of TYP_STRUCT.
// If 'arg' is a lclVar passed on the stack, we will ensure that any lclVars that must be on the
// stack are marked as doNotEnregister, and then we return.
//
// If it is passed by register, we mutate the argument into the GT_FIELD_LIST form
// which is only used for struct arguments.
//
// If arg is a LclVar we check if it is struct promoted and has the right number of fields
// and if they are at the appropriate offsets we will use the struct promted fields
// in the GT_FIELD_LIST nodes that we create.
// If we have a GT_LCL_VAR that isn't struct promoted or doesn't meet the requirements
// we will use a set of GT_LCL_FLDs nodes to access the various portions of the struct
// this also forces the struct to be stack allocated into the local frame.
// For the GT_OBJ case will clone the address expression and generate two (or more)
// indirections.
//
GenTree* Compiler::fgMorphMultiregStructArg(GenTree* arg, fgArgTabEntry* fgEntryPtr)
{
assert(varTypeIsStruct(arg->TypeGet()));
#if !defined(TARGET_ARMARCH) && !defined(UNIX_AMD64_ABI) && !defined(TARGET_LOONGARCH64)
NYI("fgMorphMultiregStructArg requires implementation for this target");
#endif
#ifdef TARGET_ARM
if ((fgEntryPtr->IsSplit() && fgEntryPtr->GetStackSlotsNumber() + fgEntryPtr->numRegs > 4) ||
(!fgEntryPtr->IsSplit() && fgEntryPtr->GetRegNum() == REG_STK))
#else
if (fgEntryPtr->GetRegNum() == REG_STK)
#endif
{
GenTreeLclVarCommon* lcl = nullptr;
GenTree* actualArg = arg->gtEffectiveVal();
if (actualArg->OperGet() == GT_OBJ)
{
if (actualArg->gtGetOp1()->OperIs(GT_ADDR) && actualArg->gtGetOp1()->gtGetOp1()->OperIs(GT_LCL_VAR))
{
lcl = actualArg->gtGetOp1()->gtGetOp1()->AsLclVarCommon();
}
}
else if (actualArg->OperGet() == GT_LCL_VAR)
{
lcl = actualArg->AsLclVarCommon();
}
if (lcl != nullptr)
{
if (lvaGetPromotionType(lcl->GetLclNum()) == PROMOTION_TYPE_INDEPENDENT)
{
arg = fgMorphLclArgToFieldlist(lcl);
}
else if (arg->TypeGet() == TYP_STRUCT)
{
// If this is a non-register struct, it must be referenced from memory.
if (!actualArg->OperIs(GT_OBJ))
{
// Create an Obj of the temp to use it as a call argument.
arg = gtNewOperNode(GT_ADDR, TYP_I_IMPL, arg);
arg = gtNewObjNode(lvaGetStruct(lcl->GetLclNum()), arg);
}
// Its fields will need to be accessed by address.
lvaSetVarDoNotEnregister(lcl->GetLclNum() DEBUG_ARG(DoNotEnregisterReason::IsStructArg));
}
}
return arg;
}
#if FEATURE_MULTIREG_ARGS
// Examine 'arg' and setup argValue objClass and structSize
//
const CORINFO_CLASS_HANDLE objClass = gtGetStructHandle(arg);
GenTree* argValue = arg; // normally argValue will be arg, but see right below
unsigned structSize = 0;
if (arg->TypeGet() != TYP_STRUCT)
{
structSize = genTypeSize(arg->TypeGet());
assert(structSize == info.compCompHnd->getClassSize(objClass));
}
else if (arg->OperGet() == GT_OBJ)
{
GenTreeObj* argObj = arg->AsObj();
const ClassLayout* objLayout = argObj->GetLayout();
structSize = objLayout->GetSize();
assert(structSize == info.compCompHnd->getClassSize(objClass));
// If we have a GT_OBJ of a GT_ADDR then we set argValue to the child node of the GT_ADDR.
GenTree* op1 = argObj->gtOp1;
if (op1->OperGet() == GT_ADDR)
{
GenTree* underlyingTree = op1->AsOp()->gtOp1;
// Only update to the same type.
if (underlyingTree->OperIs(GT_LCL_VAR))
{
const LclVarDsc* varDsc = lvaGetDesc(underlyingTree->AsLclVar());
if (ClassLayout::AreCompatible(varDsc->GetLayout(), objLayout))
{
argValue = underlyingTree;
}
}
}
}
else if (arg->OperGet() == GT_LCL_VAR)
{
LclVarDsc* varDsc = lvaGetDesc(arg->AsLclVarCommon());
structSize = varDsc->lvExactSize;
assert(structSize == info.compCompHnd->getClassSize(objClass));
}
else
{
structSize = info.compCompHnd->getClassSize(objClass);
}
var_types hfaType = TYP_UNDEF;
var_types elemType = TYP_UNDEF;
unsigned elemCount = 0;
unsigned elemSize = 0;
var_types type[MAX_ARG_REG_COUNT] = {}; // TYP_UNDEF = 0
hfaType = fgEntryPtr->GetHfaType();
if (varTypeIsValidHfaType(hfaType) && fgEntryPtr->isPassedInFloatRegisters())
{
elemType = hfaType;
elemSize = genTypeSize(elemType);
elemCount = structSize / elemSize;
assert(elemSize * elemCount == structSize);
for (unsigned inx = 0; inx < elemCount; inx++)
{
type[inx] = elemType;
}
}
else
{
assert(structSize <= MAX_ARG_REG_COUNT * TARGET_POINTER_SIZE);
BYTE gcPtrs[MAX_ARG_REG_COUNT];
info.compCompHnd->getClassGClayout(objClass, &gcPtrs[0]);
elemCount = roundUp(structSize, TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE;
#ifdef TARGET_LOONGARCH64
// For LoongArch64's ABI, the struct which size is TARGET_POINTER_SIZE
// may be passed by two registers.
// e.g `struct {int a; float b;}` passed by an integer register and a float register.
if (fgEntryPtr->numRegs == 2)
{
elemCount = 2;
}
#endif
for (unsigned inx = 0; inx < elemCount; inx++)
{
#if defined(UNIX_AMD64_ABI)
if (gcPtrs[inx] == TYPE_GC_NONE)
{
type[inx] = GetTypeFromClassificationAndSizes(fgEntryPtr->structDesc.eightByteClassifications[inx],
fgEntryPtr->structDesc.eightByteSizes[inx]);
}
else
#elif defined(TARGET_LOONGARCH64)
if (varTypeIsFloating(fgEntryPtr->structFloatFieldType[inx]) ||
(genTypeSize(fgEntryPtr->structFloatFieldType[inx]) == 4))
{
type[inx] = fgEntryPtr->structFloatFieldType[inx];
}
else
#endif // TARGET_LOONGARCH64
{
type[inx] = getJitGCType(gcPtrs[inx]);
}
}
#ifndef UNIX_AMD64_ABI
if ((argValue->OperGet() == GT_LCL_FLD) || (argValue->OperGet() == GT_LCL_VAR))
{
elemSize = TARGET_POINTER_SIZE;
// We can safely widen this to aligned bytes since we are loading from
// a GT_LCL_VAR or a GT_LCL_FLD which is properly padded and
// lives in the stack frame or will be a promoted field.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifndef TARGET_LOONGARCH64
// For LoongArch64's ABI, the struct which size is TARGET_POINTER_SIZE
// may be passed by two registers.
// e.g `struct {int a; float b;}` passed by an integer register and a float register.
structSize = elemCount * TARGET_POINTER_SIZE;
#endif
}
else // we must have a GT_OBJ
{
assert(argValue->OperGet() == GT_OBJ);
// We need to load the struct from an arbitrary address
// and we can't read past the end of the structSize
// We adjust the last load type here
//
unsigned remainingBytes = structSize % TARGET_POINTER_SIZE;
unsigned lastElem = elemCount - 1;
if (remainingBytes != 0)
{
switch (remainingBytes)
{
case 1:
type[lastElem] = TYP_BYTE;
break;
case 2:
type[lastElem] = TYP_SHORT;
break;
#if defined(TARGET_ARM64) || defined(UNIX_AMD64_ABI) || defined(TARGET_LOONGARCH64)
case 4:
type[lastElem] = TYP_INT;
break;
#endif // (TARGET_ARM64) || (UNIX_AMD64_ABI) || (TARGET_LOONGARCH64)
default:
noway_assert(!"NYI: odd sized struct in fgMorphMultiregStructArg");
break;
}
}
}
#endif // !UNIX_AMD64_ABI
}
// We should still have a TYP_STRUCT
assert(varTypeIsStruct(argValue->TypeGet()));
GenTreeFieldList* newArg = nullptr;
// Are we passing a struct LclVar?
//
if (argValue->OperGet() == GT_LCL_VAR)
{
GenTreeLclVarCommon* varNode = argValue->AsLclVarCommon();
unsigned varNum = varNode->GetLclNum();
LclVarDsc* varDsc = lvaGetDesc(varNum);
// At this point any TYP_STRUCT LclVar must be an aligned struct
// or an HFA struct, both which are passed by value.
//
assert((varDsc->lvSize() == elemCount * TARGET_POINTER_SIZE) || varDsc->lvIsHfa());
varDsc->lvIsMultiRegArg = true;
#ifdef DEBUG
if (verbose)
{
JITDUMP("Multireg struct argument V%02u : ", varNum);
fgEntryPtr->Dump();
}
#endif // DEBUG
#ifndef UNIX_AMD64_ABI
// This local variable must match the layout of the 'objClass' type exactly
if (varDsc->lvIsHfa() && fgEntryPtr->isPassedInFloatRegisters())
{
// We have a HFA struct.
noway_assert(elemType == varDsc->GetHfaType());
noway_assert(elemSize == genTypeSize(elemType));
noway_assert(elemCount == (varDsc->lvExactSize / elemSize));
noway_assert(elemSize * elemCount == varDsc->lvExactSize);
for (unsigned inx = 0; (inx < elemCount); inx++)
{
noway_assert(type[inx] == elemType);
}
}
else
{
#if defined(TARGET_ARM64)
// We must have a 16-byte struct (non-HFA)
noway_assert(elemCount == 2);
#elif defined(TARGET_ARM)
noway_assert(elemCount <= 4);
#endif
for (unsigned inx = 0; inx < elemCount; inx++)
{
var_types currentGcLayoutType = varDsc->GetLayout()->GetGCPtrType(inx);
// We setup the type[inx] value above using the GC info from 'objClass'
// This GT_LCL_VAR must have the same GC layout info
//
if (varTypeIsGC(currentGcLayoutType))
{
noway_assert(type[inx] == currentGcLayoutType);
}
else
{
// We may have use a small type when we setup the type[inx] values above
// We can safely widen this to TYP_I_IMPL
type[inx] = TYP_I_IMPL;
}
}
}
if (varDsc->lvPromoted && varDsc->lvIsHfa() && fgEntryPtr->isPassedInFloatRegisters())
{
bool canMorphToFieldList = true;
for (unsigned fldOffset = 0; fldOffset < varDsc->lvExactSize; fldOffset += elemSize)
{
const unsigned fldVarNum = lvaGetFieldLocal(varDsc, fldOffset);
if ((fldVarNum == BAD_VAR_NUM) || !varTypeUsesFloatReg(lvaGetDesc(fldVarNum)))
{
canMorphToFieldList = false;
break;
}
}
if (canMorphToFieldList)
{
newArg = fgMorphLclArgToFieldlist(varNode);
}
}
else
#endif // !UNIX_AMD64_ABI
#if defined(TARGET_ARM64) || defined(UNIX_AMD64_ABI) || defined(TARGET_LOONGARCH64)
// Is this LclVar a promoted struct with exactly 2 fields?
if (varDsc->lvPromoted && (varDsc->lvFieldCnt == 2) && !varDsc->lvIsHfa())
{
// See if we have two promoted fields that start at offset 0 and 8?
unsigned loVarNum = lvaGetFieldLocal(varDsc, 0);
unsigned hiVarNum = lvaGetFieldLocal(varDsc, TARGET_POINTER_SIZE);
// Did we find the promoted fields at the necessary offsets?
if ((loVarNum != BAD_VAR_NUM) && (hiVarNum != BAD_VAR_NUM))
{
LclVarDsc* loVarDsc = lvaGetDesc(loVarNum);
LclVarDsc* hiVarDsc = lvaGetDesc(hiVarNum);
var_types loType = loVarDsc->lvType;
var_types hiType = hiVarDsc->lvType;
if ((varTypeIsFloating(loType) != genIsValidFloatReg(fgEntryPtr->GetRegNum(0))) ||
(varTypeIsFloating(hiType) != genIsValidFloatReg(fgEntryPtr->GetRegNum(1))))
{
// TODO-LSRA - It currently doesn't support the passing of floating point LCL_VARS in the integer
// registers. So for now we will use GT_LCLFLD's to pass this struct (it won't be enregistered)
//
JITDUMP("Multireg struct V%02u will be passed using GT_LCLFLD because it has float fields.\n",
varNum);
//
// we call lvaSetVarDoNotEnregister and do the proper transformation below.
//
}
else
{
// We can use the struct promoted field as the two arguments
// Create a new tree for 'arg'
// replace the existing LDOBJ(ADDR(LCLVAR))
// with a FIELD_LIST(LCLVAR-LO, FIELD_LIST(LCLVAR-HI, nullptr))
//
newArg = new (this, GT_FIELD_LIST) GenTreeFieldList();
newArg->AddField(this, gtNewLclvNode(loVarNum, loType), 0, loType);
newArg->AddField(this, gtNewLclvNode(hiVarNum, hiType), TARGET_POINTER_SIZE, hiType);
}
}
}
else
{
//
// We will create a list of GT_LCL_FLDs nodes to pass this struct
//
lvaSetVarDoNotEnregister(varNum DEBUG_ARG(DoNotEnregisterReason::LocalField));
}
#elif defined(TARGET_ARM)
// Is this LclVar a promoted struct with exactly same size?
if (varDsc->lvPromoted && (varDsc->lvFieldCnt == elemCount) && !varDsc->lvIsHfa())
{
// See if we have promoted fields?
unsigned varNums[4];
bool hasBadVarNum = false;
for (unsigned inx = 0; inx < elemCount; inx++)
{
varNums[inx] = lvaGetFieldLocal(varDsc, TARGET_POINTER_SIZE * inx);
if (varNums[inx] == BAD_VAR_NUM)
{
hasBadVarNum = true;
break;
}
}
// Did we find the promoted fields at the necessary offsets?
if (!hasBadVarNum)
{
LclVarDsc* varDscs[4];
var_types varType[4];
bool varIsFloat = false;
for (unsigned inx = 0; inx < elemCount; inx++)
{
varDscs[inx] = lvaGetDesc(varNums[inx]);
varType[inx] = varDscs[inx]->lvType;
if (varTypeIsFloating(varType[inx]))
{
// TODO-LSRA - It currently doesn't support the passing of floating point LCL_VARS in the
// integer
// registers. So for now we will use GT_LCLFLD's to pass this struct (it won't be enregistered)
//
JITDUMP("Multireg struct V%02u will be passed using GT_LCLFLD because it has float fields.\n",
varNum);
//
// we call lvaSetVarDoNotEnregister and do the proper transformation below.
//
varIsFloat = true;
break;
}
}
if (!varIsFloat)
{
newArg = fgMorphLclArgToFieldlist(varNode);
}
}
}
else
{
//
// We will create a list of GT_LCL_FLDs nodes to pass this struct
//
lvaSetVarDoNotEnregister(varNum DEBUG_ARG(DoNotEnregisterReason::LocalField));
}
#endif // TARGET_ARM
}
// If we didn't set newarg to a new List Node tree
//
if (newArg == nullptr)
{
if (fgEntryPtr->GetRegNum() == REG_STK)
{
// We leave this stack passed argument alone
return arg;
}
// Are we passing a GT_LCL_FLD (or a GT_LCL_VAR that was not struct promoted )
// A GT_LCL_FLD could also contain a 16-byte struct or HFA struct inside it?
//
if ((argValue->OperGet() == GT_LCL_FLD) || (argValue->OperGet() == GT_LCL_VAR))
{
GenTreeLclVarCommon* varNode = argValue->AsLclVarCommon();
unsigned varNum = varNode->GetLclNum();
LclVarDsc* varDsc = lvaGetDesc(varNum);
unsigned baseOffset = varNode->GetLclOffs();
unsigned lastOffset = baseOffset + structSize;
// The allocated size of our LocalVar must be at least as big as lastOffset
assert(varDsc->lvSize() >= lastOffset);
if (varDsc->HasGCPtr())
{
// alignment of the baseOffset is required
noway_assert((baseOffset % TARGET_POINTER_SIZE) == 0);
#ifndef UNIX_AMD64_ABI
noway_assert(elemSize == TARGET_POINTER_SIZE);
#endif
unsigned baseIndex = baseOffset / TARGET_POINTER_SIZE;
ClassLayout* layout = varDsc->GetLayout();
for (unsigned inx = 0; (inx < elemCount); inx++)
{
// The GC information must match what we setup using 'objClass'
if (layout->IsGCPtr(baseIndex + inx) || varTypeGCtype(type[inx]))
{
noway_assert(type[inx] == layout->GetGCPtrType(baseIndex + inx));
}
}
}
else // this varDsc contains no GC pointers
{
for (unsigned inx = 0; inx < elemCount; inx++)
{
// The GC information must match what we setup using 'objClass'
noway_assert(!varTypeIsGC(type[inx]));
}
}
//
// We create a list of GT_LCL_FLDs nodes to pass this struct
//
lvaSetVarDoNotEnregister(varNum DEBUG_ARG(DoNotEnregisterReason::LocalField));
// Create a new tree for 'arg'
// replace the existing LDOBJ(ADDR(LCLVAR))
// with a FIELD_LIST(LCLFLD-LO, LCLFLD-HI)
//
unsigned offset = baseOffset;
newArg = new (this, GT_FIELD_LIST) GenTreeFieldList();
for (unsigned inx = 0; inx < elemCount; inx++)
{
GenTree* nextLclFld = gtNewLclFldNode(varNum, type[inx], offset);
newArg->AddField(this, nextLclFld, offset, type[inx]);
#ifdef TARGET_LOONGARCH64
if (structSize > TARGET_POINTER_SIZE)
{
// For LoongArch64's ABI, maybe there is a padding.
// e.g. `struct {float a; long b;}`
offset += TARGET_POINTER_SIZE;
}
else
#endif
{
offset += genTypeSize(type[inx]);
}
}
}
// Are we passing a GT_OBJ struct?
//
else if (argValue->OperGet() == GT_OBJ)
{
GenTreeObj* argObj = argValue->AsObj();
GenTree* baseAddr = argObj->gtOp1;
var_types addrType = baseAddr->TypeGet();
if (baseAddr->OperGet() == GT_ADDR)
{
GenTree* addrTaken = baseAddr->AsOp()->gtOp1;
if (addrTaken->IsLocal())
{
GenTreeLclVarCommon* varNode = addrTaken->AsLclVarCommon();
unsigned varNum = varNode->GetLclNum();
// We access non-struct type (for example, long) as a struct type.
// Make sure lclVar lives on stack to make sure its fields are accessible by address.
lvaSetVarDoNotEnregister(varNum DEBUGARG(DoNotEnregisterReason::LocalField));
}
}
// Create a new tree for 'arg'
// replace the existing LDOBJ(EXPR)
// with a FIELD_LIST(IND(EXPR), FIELD_LIST(IND(EXPR+8), nullptr) ...)
//
newArg = new (this, GT_FIELD_LIST) GenTreeFieldList();
unsigned offset = 0;
for (unsigned inx = 0; inx < elemCount; inx++)
{
GenTree* curAddr = baseAddr;
if (offset != 0)
{
GenTree* baseAddrDup = gtCloneExpr(baseAddr);
noway_assert(baseAddrDup != nullptr);
curAddr = gtNewOperNode(GT_ADD, addrType, baseAddrDup, gtNewIconNode(offset, TYP_I_IMPL));
}
else
{
curAddr = baseAddr;
}
GenTree* curItem = gtNewIndir(type[inx], curAddr);
// For safety all GT_IND should have at least GT_GLOB_REF set.
curItem->gtFlags |= GTF_GLOB_REF;
newArg->AddField(this, curItem, offset, type[inx]);
#ifdef TARGET_LOONGARCH64
if (structSize > TARGET_POINTER_SIZE)
{
// For LoongArch64's ABI, maybe there is a padding.
// e.g. `struct {float a; long b;}`
offset += TARGET_POINTER_SIZE;
}
else
#endif
{
offset += genTypeSize(type[inx]);
}
}
}
}
#ifdef DEBUG
// If we reach here we should have set newArg to something
if (newArg == nullptr)
{
gtDispTree(argValue);
assert(!"Missing case in fgMorphMultiregStructArg");
}
#endif
noway_assert(newArg != nullptr);
#ifdef DEBUG
if (verbose)
{
printf("fgMorphMultiregStructArg created tree:\n");
gtDispTree(newArg);
}
#endif
arg = newArg; // consider calling fgMorphTree(newArg);
#endif // FEATURE_MULTIREG_ARGS
return arg;
}
//------------------------------------------------------------------------
// fgMorphLclArgToFieldlist: Morph a GT_LCL_VAR node to a GT_FIELD_LIST of its promoted fields
//
// Arguments:
// lcl - The GT_LCL_VAR node we will transform
//
// Return value:
// The new GT_FIELD_LIST that we have created.
//
GenTreeFieldList* Compiler::fgMorphLclArgToFieldlist(GenTreeLclVarCommon* lcl)
{
LclVarDsc* varDsc = lvaGetDesc(lcl);
assert(varDsc->lvPromoted);
unsigned fieldCount = varDsc->lvFieldCnt;
unsigned fieldLclNum = varDsc->lvFieldLclStart;
GenTreeFieldList* fieldList = new (this, GT_FIELD_LIST) GenTreeFieldList();
for (unsigned i = 0; i < fieldCount; i++)
{
LclVarDsc* fieldVarDsc = lvaGetDesc(fieldLclNum);
GenTree* lclVar = gtNewLclvNode(fieldLclNum, fieldVarDsc->TypeGet());
fieldList->AddField(this, lclVar, fieldVarDsc->lvFldOffset, fieldVarDsc->TypeGet());
fieldLclNum++;
}
return fieldList;
}
//------------------------------------------------------------------------
// fgMakeOutgoingStructArgCopy: make a copy of a struct variable if necessary,
// to pass to a callee.
//
// Arguments:
// call - call being processed
// args - args for the call
// copyBlkClass - class handle for the struct
//
// The arg is updated if necessary with the copy.
//
void Compiler::fgMakeOutgoingStructArgCopy(GenTreeCall* call, GenTreeCall::Use* args, CORINFO_CLASS_HANDLE copyBlkClass)
{
GenTree* argx = args->GetNode();
noway_assert(argx->gtOper != GT_MKREFANY);
fgArgTabEntry* argEntry = Compiler::gtArgEntryByNode(call, argx);
// If we're optimizing, see if we can avoid making a copy.
//
// We don't need a copy if this is the last use of an implicit by-ref local.
//
if (opts.OptimizationEnabled())
{
GenTreeLclVar* const lcl = argx->IsImplicitByrefParameterValue(this);
if (lcl != nullptr)
{
const unsigned varNum = lcl->GetLclNum();
LclVarDsc* const varDsc = lvaGetDesc(varNum);
const unsigned short totalAppearances = varDsc->lvRefCnt(RCS_EARLY);
// We don't have liveness so we rely on other indications of last use.
//
// We handle these cases:
//
// * (must not copy) If the call is a tail call, the use is a last use.
// We must skip the copy if we have a fast tail call.
//
// * (may not copy) if the call is noreturn, the use is a last use.
// We also check for just one reference here as we are not doing
// alias analysis of the call's parameters, or checking if the call
// site is not within some try region.
//
// * (may not copy) if there is exactly one use of the local in the method,
// and the call is not in loop, this is a last use.
//
// fgMightHaveLoop() is expensive; check it last, only if necessary.
//
if (call->IsTailCall() || //
((totalAppearances == 1) && call->IsNoReturn()) || //
((totalAppearances == 1) && !fgMightHaveLoop()))
{
args->SetNode(lcl);
assert(argEntry->GetNode() == lcl);
JITDUMP("did not need to make outgoing copy for last use of implicit byref V%2d\n", varNum);
return;
}
}
}
JITDUMP("making an outgoing copy for struct arg\n");
if (fgOutgoingArgTemps == nullptr)
{
fgOutgoingArgTemps = hashBv::Create(this);
}
unsigned tmp = 0;
bool found = false;
// Attempt to find a local we have already used for an outgoing struct and reuse it.
// We do not reuse within a statement.
if (!opts.MinOpts())
{
indexType lclNum;
FOREACH_HBV_BIT_SET(lclNum, fgOutgoingArgTemps)
{
LclVarDsc* varDsc = lvaGetDesc((unsigned)lclNum);
if (typeInfo::AreEquivalent(varDsc->lvVerTypeInfo, typeInfo(TI_STRUCT, copyBlkClass)) &&
!fgCurrentlyInUseArgTemps->testBit(lclNum))
{
tmp = (unsigned)lclNum;
found = true;
JITDUMP("reusing outgoing struct arg");
break;
}
}
NEXT_HBV_BIT_SET;
}
// Create the CopyBlk tree and insert it.
if (!found)
{
// Get a new temp
// Here We don't need unsafe value cls check, since the addr of this temp is used only in copyblk.
tmp = lvaGrabTemp(true DEBUGARG("by-value struct argument"));
lvaSetStruct(tmp, copyBlkClass, false);
if (call->IsVarargs())
{
lvaSetStructUsedAsVarArg(tmp);
}
fgOutgoingArgTemps->setBit(tmp);
}
fgCurrentlyInUseArgTemps->setBit(tmp);
// TYP_SIMD structs should not be enregistered, since ABI requires it to be
// allocated on stack and address of it needs to be passed.
if (lclVarIsSIMDType(tmp))
{
// TODO: check if we need this block here or other parts already deal with it.
lvaSetVarDoNotEnregister(tmp DEBUGARG(DoNotEnregisterReason::IsStructArg));
}
// Create a reference to the temp
GenTree* dest = gtNewLclvNode(tmp, lvaTable[tmp].lvType);
dest->gtFlags |= (GTF_DONT_CSE | GTF_VAR_DEF); // This is a def of the local, "entire" by construction.
// Copy the valuetype to the temp
GenTree* copyBlk = gtNewBlkOpNode(dest, argx, false /* not volatile */, true /* copyBlock */);
copyBlk = fgMorphCopyBlock(copyBlk);
#if FEATURE_FIXED_OUT_ARGS
// Do the copy early, and evalute the temp later (see EvalArgsToTemps)
// When on Unix create LCL_FLD for structs passed in more than one registers. See fgMakeTmpArgNode
GenTree* arg = copyBlk;
#else // FEATURE_FIXED_OUT_ARGS
// Structs are always on the stack, and thus never need temps
// so we have to put the copy and temp all into one expression.
argEntry->tmpNum = tmp;
GenTree* arg = fgMakeTmpArgNode(argEntry);
// Change the expression to "(tmp=val),tmp"
arg = gtNewOperNode(GT_COMMA, arg->TypeGet(), copyBlk, arg);
#endif // FEATURE_FIXED_OUT_ARGS
args->SetNode(arg);
call->fgArgInfo->EvalToTmp(argEntry, tmp, arg);
}
#ifdef TARGET_ARM
// See declaration for specification comment.
void Compiler::fgAddSkippedRegsInPromotedStructArg(LclVarDsc* varDsc,
unsigned firstArgRegNum,
regMaskTP* pArgSkippedRegMask)
{
assert(varDsc->lvPromoted);
// There's no way to do these calculations without breaking abstraction and assuming that
// integer register arguments are consecutive ints. They are on ARM.
// To start, figure out what register contains the last byte of the first argument.
LclVarDsc* firstFldVarDsc = lvaGetDesc(varDsc->lvFieldLclStart);
unsigned lastFldRegOfLastByte =
(firstFldVarDsc->lvFldOffset + firstFldVarDsc->lvExactSize - 1) / TARGET_POINTER_SIZE;
;
// Now we're keeping track of the register that the last field ended in; see what registers
// subsequent fields start in, and whether any are skipped.
// (We assume here the invariant that the fields are sorted in offset order.)
for (unsigned fldVarOffset = 1; fldVarOffset < varDsc->lvFieldCnt; fldVarOffset++)
{
unsigned fldVarNum = varDsc->lvFieldLclStart + fldVarOffset;
LclVarDsc* fldVarDsc = lvaGetDesc(fldVarNum);
unsigned fldRegOffset = fldVarDsc->lvFldOffset / TARGET_POINTER_SIZE;
assert(fldRegOffset >= lastFldRegOfLastByte); // Assuming sorted fields.
// This loop should enumerate the offsets of any registers skipped.
// Find what reg contains the last byte:
// And start at the first register after that. If that isn't the first reg of the current
for (unsigned skippedRegOffsets = lastFldRegOfLastByte + 1; skippedRegOffsets < fldRegOffset;
skippedRegOffsets++)
{
// If the register number would not be an arg reg, we're done.
if (firstArgRegNum + skippedRegOffsets >= MAX_REG_ARG)
return;
*pArgSkippedRegMask |= genRegMask(regNumber(firstArgRegNum + skippedRegOffsets));
}
lastFldRegOfLastByte = (fldVarDsc->lvFldOffset + fldVarDsc->lvExactSize - 1) / TARGET_POINTER_SIZE;
}
}
#endif // TARGET_ARM
/*****************************************************************************
*
* A little helper used to rearrange nested commutative operations. The
* effect is that nested associative, commutative operations are transformed
* into a 'left-deep' tree, i.e. into something like this:
*
* (((a op b) op c) op d) op...
*/
#if REARRANGE_ADDS
void Compiler::fgMoveOpsLeft(GenTree* tree)
{
GenTree* op1;
GenTree* op2;
genTreeOps oper;
do
{
op1 = tree->AsOp()->gtOp1;
op2 = tree->AsOp()->gtOp2;
oper = tree->OperGet();
noway_assert(GenTree::OperIsCommutative(oper));
noway_assert(oper == GT_ADD || oper == GT_XOR || oper == GT_OR || oper == GT_AND || oper == GT_MUL);
noway_assert(!varTypeIsFloating(tree->TypeGet()) || !opts.genFPorder);
noway_assert(oper == op2->gtOper);
// Commutativity doesn't hold if overflow checks are needed
if (tree->gtOverflowEx() || op2->gtOverflowEx())
{
return;
}
if (gtIsActiveCSE_Candidate(op2))
{
// If we have marked op2 as a CSE candidate,
// we can't perform a commutative reordering
// because any value numbers that we computed for op2
// will be incorrect after performing a commutative reordering
//
return;
}
if (oper == GT_MUL && (op2->gtFlags & GTF_MUL_64RSLT))
{
return;
}
// Check for GTF_ADDRMODE_NO_CSE flag on add/mul Binary Operators
if (((oper == GT_ADD) || (oper == GT_MUL)) && ((tree->gtFlags & GTF_ADDRMODE_NO_CSE) != 0))
{
return;
}
if ((tree->gtFlags | op2->gtFlags) & GTF_BOOLEAN)
{
// We could deal with this, but we were always broken and just hit the assert
// below regarding flags, which means it's not frequent, so will just bail out.
// See #195514
return;
}
noway_assert(!tree->gtOverflowEx() && !op2->gtOverflowEx());
GenTree* ad1 = op2->AsOp()->gtOp1;
GenTree* ad2 = op2->AsOp()->gtOp2;
// Compiler::optOptimizeBools() can create GT_OR of two GC pointers yeilding a GT_INT
// We can not reorder such GT_OR trees
//
if (varTypeIsGC(ad1->TypeGet()) != varTypeIsGC(op2->TypeGet()))
{
break;
}
// Don't split up a byref calculation and create a new byref. E.g.,
// [byref]+ (ref, [int]+ (int, int)) => [byref]+ ([byref]+ (ref, int), int).
// Doing this transformation could create a situation where the first
// addition (that is, [byref]+ (ref, int) ) creates a byref pointer that
// no longer points within the ref object. If a GC happens, the byref won't
// get updated. This can happen, for instance, if one of the int components
// is negative. It also requires the address generation be in a fully-interruptible
// code region.
//
if (varTypeIsGC(op1->TypeGet()) && op2->TypeGet() == TYP_I_IMPL)
{
assert(varTypeIsGC(tree->TypeGet()) && (oper == GT_ADD));
break;
}
/* Change "(x op (y op z))" to "(x op y) op z" */
/* ie. "(op1 op (ad1 op ad2))" to "(op1 op ad1) op ad2" */
GenTree* new_op1 = op2;
new_op1->AsOp()->gtOp1 = op1;
new_op1->AsOp()->gtOp2 = ad1;
/* Change the flags. */
// Make sure we arent throwing away any flags
noway_assert((new_op1->gtFlags &
~(GTF_MAKE_CSE | GTF_DONT_CSE | // It is ok that new_op1->gtFlags contains GTF_DONT_CSE flag.
GTF_REVERSE_OPS | // The reverse ops flag also can be set, it will be re-calculated
GTF_NODE_MASK | GTF_ALL_EFFECT | GTF_UNSIGNED)) == 0);
new_op1->gtFlags =
(new_op1->gtFlags & (GTF_NODE_MASK | GTF_DONT_CSE)) | // Make sure we propagate GTF_DONT_CSE flag.
(op1->gtFlags & GTF_ALL_EFFECT) | (ad1->gtFlags & GTF_ALL_EFFECT);
/* Retype new_op1 if it has not/become a GC ptr. */
if (varTypeIsGC(op1->TypeGet()))
{
noway_assert((varTypeIsGC(tree->TypeGet()) && op2->TypeGet() == TYP_I_IMPL &&
oper == GT_ADD) || // byref(ref + (int+int))
(varTypeIsI(tree->TypeGet()) && op2->TypeGet() == TYP_I_IMPL &&
oper == GT_OR)); // int(gcref | int(gcref|intval))
new_op1->gtType = tree->gtType;
}
else if (varTypeIsGC(ad2->TypeGet()))
{
// Neither ad1 nor op1 are GC. So new_op1 isnt either
noway_assert(op1->gtType == TYP_I_IMPL && ad1->gtType == TYP_I_IMPL);
new_op1->gtType = TYP_I_IMPL;
}
// If new_op1 is a new expression. Assign it a new unique value number.
// vnStore is null before the ValueNumber phase has run
if (vnStore != nullptr)
{
// We can only keep the old value number on new_op1 if both op1 and ad2
// have the same non-NoVN value numbers. Since op is commutative, comparing
// only ad2 and op1 is enough.
if ((op1->gtVNPair.GetLiberal() == ValueNumStore::NoVN) ||
(ad2->gtVNPair.GetLiberal() == ValueNumStore::NoVN) ||
(ad2->gtVNPair.GetLiberal() != op1->gtVNPair.GetLiberal()))
{
new_op1->gtVNPair.SetBoth(vnStore->VNForExpr(nullptr, new_op1->TypeGet()));
}
}
tree->AsOp()->gtOp1 = new_op1;
tree->AsOp()->gtOp2 = ad2;
/* If 'new_op1' is now the same nested op, process it recursively */
if ((ad1->gtOper == oper) && !ad1->gtOverflowEx())
{
fgMoveOpsLeft(new_op1);
}
/* If 'ad2' is now the same nested op, process it
* Instead of recursion, we set up op1 and op2 for the next loop.
*/
op1 = new_op1;
op2 = ad2;
} while ((op2->gtOper == oper) && !op2->gtOverflowEx());
return;
}
#endif
/*****************************************************************************/
void Compiler::fgSetRngChkTarget(GenTree* tree, bool delay)
{
if (tree->OperIs(GT_BOUNDS_CHECK))
{
GenTreeBoundsChk* const boundsChk = tree->AsBoundsChk();
BasicBlock* const failBlock = fgSetRngChkTargetInner(boundsChk->gtThrowKind, delay);
if (failBlock != nullptr)
{
boundsChk->gtIndRngFailBB = failBlock;
}
}
else if (tree->OperIs(GT_INDEX_ADDR))
{
GenTreeIndexAddr* const indexAddr = tree->AsIndexAddr();
BasicBlock* const failBlock = fgSetRngChkTargetInner(SCK_RNGCHK_FAIL, delay);
if (failBlock != nullptr)
{
indexAddr->gtIndRngFailBB = failBlock;
}
}
else
{
noway_assert(tree->OperIs(GT_ARR_ELEM, GT_ARR_INDEX));
fgSetRngChkTargetInner(SCK_RNGCHK_FAIL, delay);
}
}
BasicBlock* Compiler::fgSetRngChkTargetInner(SpecialCodeKind kind, bool delay)
{
if (opts.MinOpts())
{
delay = false;
}
if (!opts.compDbgCode)
{
if (!delay && !compIsForInlining())
{
// Create/find the appropriate "range-fail" label
return fgRngChkTarget(compCurBB, kind);
}
}
return nullptr;
}
/*****************************************************************************
*
* Expand a GT_INDEX node and fully morph the child operands
*
* The orginal GT_INDEX node is bashed into the GT_IND node that accesses
* the array element. We expand the GT_INDEX node into a larger tree that
* evaluates the array base and index. The simplest expansion is a GT_COMMA
* with a GT_BOUNDS_CHECK and a GT_IND with a GTF_INX_RNGCHK flag.
* For complex array or index expressions one or more GT_COMMA assignments
* are inserted so that we only evaluate the array or index expressions once.
*
* The fully expanded tree is then morphed. This causes gtFoldExpr to
* perform local constant prop and reorder the constants in the tree and
* fold them.
*
* We then parse the resulting array element expression in order to locate
* and label the constants and variables that occur in the tree.
*/
const int MAX_ARR_COMPLEXITY = 4;
const int MAX_INDEX_COMPLEXITY = 4;
GenTree* Compiler::fgMorphArrayIndex(GenTree* tree)
{
noway_assert(tree->gtOper == GT_INDEX);
GenTreeIndex* asIndex = tree->AsIndex();
var_types elemTyp = asIndex->TypeGet();
unsigned elemSize = asIndex->gtIndElemSize;
CORINFO_CLASS_HANDLE elemStructType = asIndex->gtStructElemClass;
noway_assert(elemTyp != TYP_STRUCT || elemStructType != NO_CLASS_HANDLE);
// Fold "cns_str"[cns_index] to ushort constant
// NOTE: don't do it for empty string, the operation will fail anyway
if (opts.OptimizationEnabled() && asIndex->Arr()->OperIs(GT_CNS_STR) &&
!asIndex->Arr()->AsStrCon()->IsStringEmptyField() && asIndex->Index()->IsIntCnsFitsInI32())
{
const int cnsIndex = static_cast<int>(asIndex->Index()->AsIntConCommon()->IconValue());
if (cnsIndex >= 0)
{
int length;
const char16_t* str = info.compCompHnd->getStringLiteral(asIndex->Arr()->AsStrCon()->gtScpHnd,
asIndex->Arr()->AsStrCon()->gtSconCPX, &length);
if ((cnsIndex < length) && (str != nullptr))
{
GenTree* cnsCharNode = gtNewIconNode(str[cnsIndex], TYP_INT);
INDEBUG(cnsCharNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
return cnsCharNode;
}
}
}
#ifdef FEATURE_SIMD
if (varTypeIsStruct(elemTyp) && structSizeMightRepresentSIMDType(elemSize))
{
// If this is a SIMD type, this is the point at which we lose the type information,
// so we need to set the correct type on the GT_IND.
// (We don't care about the base type here, so we only check, but don't retain, the return value).
unsigned simdElemSize = 0;
if (getBaseJitTypeAndSizeOfSIMDType(elemStructType, &simdElemSize) != CORINFO_TYPE_UNDEF)
{
assert(simdElemSize == elemSize);
elemTyp = getSIMDTypeForSize(elemSize);
// This is the new type of the node.
tree->gtType = elemTyp;
// Now set elemStructType to null so that we don't confuse value numbering.
elemStructType = NO_CLASS_HANDLE;
}
}
#endif // FEATURE_SIMD
// Set up the array length's offset into lenOffs
// And the first element's offset into elemOffs
ssize_t lenOffs;
uint8_t elemOffs;
if (tree->gtFlags & GTF_INX_STRING_LAYOUT)
{
lenOffs = OFFSETOF__CORINFO_String__stringLen;
elemOffs = OFFSETOF__CORINFO_String__chars;
tree->gtFlags &= ~GTF_INX_STRING_LAYOUT; // Clear this flag as it is used for GTF_IND_VOLATILE
}
else
{
// We have a standard array
lenOffs = OFFSETOF__CORINFO_Array__length;
elemOffs = OFFSETOF__CORINFO_Array__data;
}
// In minopts, we expand GT_INDEX to GT_IND(GT_INDEX_ADDR) in order to minimize the size of the IR. As minopts
// compilation time is roughly proportional to the size of the IR, this helps keep compilation times down.
// Furthermore, this representation typically saves on code size in minopts w.r.t. the complete expansion
// performed when optimizing, as it does not require LclVar nodes (which are always stack loads/stores in
// minopts).
//
// When we *are* optimizing, we fully expand GT_INDEX to:
// 1. Evaluate the array address expression and store the result in a temp if the expression is complex or
// side-effecting.
// 2. Evaluate the array index expression and store the result in a temp if the expression is complex or
// side-effecting.
// 3. Perform an explicit bounds check: GT_BOUNDS_CHECK(index, GT_ARR_LENGTH(array))
// 4. Compute the address of the element that will be accessed:
// GT_ADD(GT_ADD(array, firstElementOffset), GT_MUL(index, elementSize)) OR
// GT_ADD(GT_ADD(array, GT_ADD(GT_MUL(index, elementSize), firstElementOffset)))
// 5. Wrap the address in a GT_ADD_ADDR (the information saved there will later be used by VN).
// 6. Dereference the address with a GT_IND.
//
// This expansion explicitly exposes the bounds check and the address calculation to the optimizer, which allows
// for more straightforward bounds-check removal, CSE, etc.
if (opts.MinOpts())
{
GenTree* const array = fgMorphTree(asIndex->Arr());
GenTree* const index = fgMorphTree(asIndex->Index());
GenTreeIndexAddr* const indexAddr = new (this, GT_INDEX_ADDR)
GenTreeIndexAddr(array, index, elemTyp, elemStructType, elemSize, static_cast<unsigned>(lenOffs), elemOffs);
indexAddr->gtFlags |= (array->gtFlags | index->gtFlags) & GTF_ALL_EFFECT;
// Mark the indirection node as needing a range check if necessary.
// Note this will always be true unless JitSkipArrayBoundCheck() is used
if ((indexAddr->gtFlags & GTF_INX_RNGCHK) != 0)
{
fgSetRngChkTarget(indexAddr);
}
if (!tree->TypeIs(TYP_STRUCT))
{
tree->ChangeOper(GT_IND);
}
else
{
DEBUG_DESTROY_NODE(tree);
tree = gtNewObjNode(elemStructType, indexAddr);
INDEBUG(tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
}
GenTreeIndir* const indir = tree->AsIndir();
indir->Addr() = indexAddr;
bool canCSE = indir->CanCSE();
indir->gtFlags = indexAddr->gtFlags & GTF_ALL_EFFECT;
if (!canCSE)
{
indir->SetDoNotCSE();
}
INDEBUG(indexAddr->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
return indir;
}
GenTree* arrRef = asIndex->Arr();
GenTree* index = asIndex->Index();
bool chkd = ((tree->gtFlags & GTF_INX_RNGCHK) != 0); // if false, range checking will be disabled
bool indexNonFaulting = ((tree->gtFlags & GTF_INX_NOFAULT) != 0); // if true, mark GTF_IND_NONFAULTING
bool nCSE = ((tree->gtFlags & GTF_DONT_CSE) != 0);
GenTree* arrRefDefn = nullptr; // non-NULL if we need to allocate a temp for the arrRef expression
GenTree* indexDefn = nullptr; // non-NULL if we need to allocate a temp for the index expression
GenTree* bndsChk = nullptr;
// If we're doing range checking, introduce a GT_BOUNDS_CHECK node for the address.
if (chkd)
{
GenTree* arrRef2 = nullptr; // The second copy will be used in array address expression
GenTree* index2 = nullptr;
// If the arrRef or index expressions involves an assignment, a call, or reads from global memory,
// then we *must* allocate a temporary in which to "localize" those values, to ensure that the
// same values are used in the bounds check and the actual dereference.
// Also we allocate the temporary when the expression is sufficiently complex/expensive.
//
// Note that if the expression is a GT_FIELD, it has not yet been morphed so its true complexity is
// not exposed. Without that condition there are cases of local struct fields that were previously,
// needlessly, marked as GTF_GLOB_REF, and when that was fixed, there were some regressions that
// were mostly ameliorated by adding this condition.
//
// Likewise, allocate a temporary if the expression is a GT_LCL_FLD node. These used to be created
// after fgMorphArrayIndex from GT_FIELD trees so this preserves the existing behavior. This is
// perhaps a decision that should be left to CSE but FX diffs show that it is slightly better to
// do this here.
if ((arrRef->gtFlags & (GTF_ASG | GTF_CALL | GTF_GLOB_REF)) ||
gtComplexityExceeds(&arrRef, MAX_ARR_COMPLEXITY) || arrRef->OperIs(GT_FIELD, GT_LCL_FLD))
{
unsigned arrRefTmpNum = lvaGrabTemp(true DEBUGARG("arr expr"));
arrRefDefn = gtNewTempAssign(arrRefTmpNum, arrRef);
arrRef = gtNewLclvNode(arrRefTmpNum, arrRef->TypeGet());
arrRef2 = gtNewLclvNode(arrRefTmpNum, arrRef->TypeGet());
}
else
{
arrRef2 = gtCloneExpr(arrRef);
noway_assert(arrRef2 != nullptr);
}
if ((index->gtFlags & (GTF_ASG | GTF_CALL | GTF_GLOB_REF)) || gtComplexityExceeds(&index, MAX_ARR_COMPLEXITY) ||
index->OperIs(GT_FIELD, GT_LCL_FLD))
{
unsigned indexTmpNum = lvaGrabTemp(true DEBUGARG("index expr"));
indexDefn = gtNewTempAssign(indexTmpNum, index);
index = gtNewLclvNode(indexTmpNum, index->TypeGet());
index2 = gtNewLclvNode(indexTmpNum, index->TypeGet());
}
else
{
index2 = gtCloneExpr(index);
noway_assert(index2 != nullptr);
}
// Next introduce a GT_BOUNDS_CHECK node
var_types bndsChkType = TYP_INT; // By default, try to use 32-bit comparison for array bounds check.
#ifdef TARGET_64BIT
// The CLI Spec allows an array to be indexed by either an int32 or a native int. In the case
// of a 64 bit architecture this means the array index can potentially be a TYP_LONG, so for this case,
// the comparison will have to be widened to 64 bits.
if (index->TypeGet() == TYP_I_IMPL)
{
bndsChkType = TYP_I_IMPL;
}
#endif // TARGET_64BIT
GenTree* arrLen = gtNewArrLen(TYP_INT, arrRef, (int)lenOffs, compCurBB);
if (bndsChkType != TYP_INT)
{
arrLen = gtNewCastNode(bndsChkType, arrLen, true, bndsChkType);
}
GenTreeBoundsChk* arrBndsChk = new (this, GT_BOUNDS_CHECK) GenTreeBoundsChk(index, arrLen, SCK_RNGCHK_FAIL);
arrBndsChk->gtInxType = elemTyp;
bndsChk = arrBndsChk;
// Now we'll switch to using the second copies for arrRef and index
// to compute the address expression
arrRef = arrRef2;
index = index2;
}
// Create the "addr" which is "*(arrRef + ((index * elemSize) + elemOffs))"
GenTree* addr;
#ifdef TARGET_64BIT
// Widen 'index' on 64-bit targets
if (index->TypeGet() != TYP_I_IMPL)
{
if (index->OperGet() == GT_CNS_INT)
{
index->gtType = TYP_I_IMPL;
}
else
{
index = gtNewCastNode(TYP_I_IMPL, index, true, TYP_I_IMPL);
}
}
#endif // TARGET_64BIT
/* Scale the index value if necessary */
if (elemSize > 1)
{
GenTree* size = gtNewIconNode(elemSize, TYP_I_IMPL);
// Fix 392756 WP7 Crossgen
//
// During codegen optGetArrayRefScaleAndIndex() makes the assumption that op2 of a GT_MUL node
// is a constant and is not capable of handling CSE'ing the elemSize constant into a lclvar.
// Hence to prevent the constant from becoming a CSE we mark it as NO_CSE.
//
size->gtFlags |= GTF_DONT_CSE;
/* Multiply by the array element size */
addr = gtNewOperNode(GT_MUL, TYP_I_IMPL, index, size);
}
else
{
addr = index;
}
// Be careful to only create the byref pointer when the full index expression is added to the array reference.
// We don't want to create a partial byref address expression that doesn't include the full index offset:
// a byref must point within the containing object. It is dangerous (especially when optimizations come into
// play) to create a "partial" byref that doesn't point exactly to the correct object; there is risk that
// the partial byref will not point within the object, and thus not get updated correctly during a GC.
// This is mostly a risk in fully-interruptible code regions.
// We can generate two types of trees for "addr":
//
// 1) "arrRef + (index + elemOffset)"
// 2) "(arrRef + elemOffset) + index"
//
// XArch has powerful addressing modes such as [base + index*scale + offset] so it's fine with 1),
// while for Arm we better try to make an invariant sub-tree as large as possible, which is usually
// "(arrRef + elemOffset)" and is CSE/LoopHoisting friendly => produces better codegen.
// 2) should still be safe from GC's point of view since both ADD operations are byref and point to
// within the object so GC will be able to correctly track and update them.
bool groupArrayRefWithElemOffset = false;
#ifdef TARGET_ARMARCH
groupArrayRefWithElemOffset = true;
// TODO: in some cases even on ARM we better use 1) shape because if "index" is invariant and "arrRef" is not
// we at least will be able to hoist/CSE "index + elemOffset" in some cases.
// See https://github.com/dotnet/runtime/pull/61293#issuecomment-964146497
// Use 2) form only for primitive types for now - it significantly reduced number of size regressions
if (!varTypeIsIntegral(elemTyp) && !varTypeIsFloating(elemTyp))
{
groupArrayRefWithElemOffset = false;
}
#endif
// First element's offset
GenTree* elemOffset = gtNewIconNode(elemOffs, TYP_I_IMPL);
if (groupArrayRefWithElemOffset)
{
GenTree* basePlusOffset = gtNewOperNode(GT_ADD, TYP_BYREF, arrRef, elemOffset);
addr = gtNewOperNode(GT_ADD, TYP_BYREF, basePlusOffset, addr);
}
else
{
addr = gtNewOperNode(GT_ADD, TYP_I_IMPL, addr, elemOffset);
addr = gtNewOperNode(GT_ADD, TYP_BYREF, arrRef, addr);
}
addr = new (this, GT_ARR_ADDR) GenTreeArrAddr(addr, elemTyp, elemStructType, elemOffs);
// Change the orginal GT_INDEX node into a GT_IND node
tree->SetOper(GT_IND);
// If the index node is a floating-point type, notify the compiler
// we'll potentially use floating point registers at the time of codegen.
if (varTypeUsesFloatReg(tree->gtType))
{
this->compFloatingPointUsed = true;
}
// We've now consumed the GTF_INX_RNGCHK and GTF_INX_NOFAULT, and the node
// is no longer a GT_INDEX node.
tree->gtFlags &= ~(GTF_INX_RNGCHK | GTF_INX_NOFAULT);
tree->AsOp()->gtOp1 = addr;
// If there's a bounds check, the indir won't fault.
if (bndsChk || indexNonFaulting)
{
tree->gtFlags |= GTF_IND_NONFAULTING;
addr->gtFlags |= GTF_ARR_ADDR_NONNULL;
}
else
{
tree->gtFlags |= GTF_EXCEPT;
}
if (nCSE)
{
tree->gtFlags |= GTF_DONT_CSE;
}
// Did we create a bndsChk tree?
if (bndsChk)
{
// Use a GT_COMMA node to prepend the array bound check
//
tree = gtNewOperNode(GT_COMMA, elemTyp, bndsChk, tree);
/* Mark the indirection node as needing a range check */
fgSetRngChkTarget(bndsChk);
}
if (indexDefn != nullptr)
{
// Use a GT_COMMA node to prepend the index assignment
//
tree = gtNewOperNode(GT_COMMA, tree->TypeGet(), indexDefn, tree);
}
if (arrRefDefn != nullptr)
{
// Use a GT_COMMA node to prepend the arRef assignment
//
tree = gtNewOperNode(GT_COMMA, tree->TypeGet(), arrRefDefn, tree);
}
JITDUMP("fgMorphArrayIndex (before remorph):\n")
DISPTREE(tree)
tree = fgMorphTree(tree);
JITDUMP("fgMorphArrayIndex (after remorph):\n")
DISPTREE(tree)
return tree;
}
#ifdef TARGET_X86
/*****************************************************************************
*
* Wrap fixed stack arguments for varargs functions to go through varargs
* cookie to access them, except for the cookie itself.
*
* Non-x86 platforms are allowed to access all arguments directly
* so we don't need this code.
*
*/
GenTree* Compiler::fgMorphStackArgForVarArgs(unsigned lclNum, var_types varType, unsigned lclOffs)
{
/* For the fixed stack arguments of a varargs function, we need to go
through the varargs cookies to access them, except for the
cookie itself */
LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (varDsc->lvIsParam && !varDsc->lvIsRegArg && lclNum != lvaVarargsHandleArg)
{
// Create a node representing the local pointing to the base of the args
GenTree* ptrArg =
gtNewOperNode(GT_SUB, TYP_I_IMPL, gtNewLclvNode(lvaVarargsBaseOfStkArgs, TYP_I_IMPL),
gtNewIconNode(varDsc->GetStackOffset() -
codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES - lclOffs));
// Access the argument through the local
GenTree* tree;
if (varTypeIsStruct(varType))
{
CORINFO_CLASS_HANDLE typeHnd = varDsc->GetStructHnd();
assert(typeHnd != nullptr);
tree = gtNewObjNode(typeHnd, ptrArg);
}
else
{
tree = gtNewOperNode(GT_IND, varType, ptrArg);
}
tree->gtFlags |= GTF_IND_TGTANYWHERE;
if (varDsc->IsAddressExposed())
{
tree->gtFlags |= GTF_GLOB_REF;
}
return fgMorphTree(tree);
}
return NULL;
}
#endif
/*****************************************************************************
*
* Transform the given GT_LCL_VAR tree for code generation.
*/
GenTree* Compiler::fgMorphLocalVar(GenTree* tree, bool forceRemorph)
{
assert(tree->gtOper == GT_LCL_VAR);
unsigned lclNum = tree->AsLclVarCommon()->GetLclNum();
var_types varType = lvaGetRealType(lclNum);
LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (varDsc->IsAddressExposed())
{
tree->gtFlags |= GTF_GLOB_REF;
}
#ifdef TARGET_X86
if (info.compIsVarArgs)
{
GenTree* newTree = fgMorphStackArgForVarArgs(lclNum, varType, 0);
if (newTree != nullptr)
{
if (newTree->OperIsBlk() && ((tree->gtFlags & GTF_VAR_DEF) == 0))
{
newTree->SetOper(GT_IND);
}
return newTree;
}
}
#endif // TARGET_X86
/* If not during the global morphing phase bail */
if (!fgGlobalMorph && !forceRemorph)
{
return tree;
}
bool varAddr = (tree->gtFlags & GTF_DONT_CSE) != 0;
noway_assert(!(tree->gtFlags & GTF_VAR_DEF) || varAddr); // GTF_VAR_DEF should always imply varAddr
if (!varAddr && varDsc->lvNormalizeOnLoad())
{
// TYP_BOOL quirk: previously, the code in optAssertionIsSubrange did not handle TYP_BOOL.
// Now it does, but this leads to some regressions because we lose the uniform VNs for trees
// that represent the "reduced" normalize-on-load locals, i. e. LCL_VAR(small type V00), created
// here with local assertions, and "expanded", i. e. CAST(small type <- LCL_VAR(int V00)).
// This is a pretty fundamental problem with how normalize-on-load locals appear to the optimizer.
// This quirk preserves the previous behavior.
// TODO-CQ: fix the VNs for normalize-on-load locals and remove this quirk.
bool isBoolQuirk = varType == TYP_BOOL;
// Assertion prop can tell us to omit adding a cast here. This is
// useful when the local is a small-typed parameter that is passed in a
// register: in that case, the ABI specifies that the upper bits might
// be invalid, but the assertion guarantees us that we have normalized
// when we wrote it.
if (optLocalAssertionProp && !isBoolQuirk &&
optAssertionIsSubrange(tree, IntegralRange::ForType(varType), apFull) != NO_ASSERTION_INDEX)
{
// The previous assertion can guarantee us that if this node gets
// assigned a register, it will be normalized already. It is still
// possible that this node ends up being in memory, in which case
// normalization will still be needed, so we better have the right
// type.
assert(tree->TypeGet() == varDsc->TypeGet());
return tree;
}
// Small-typed arguments and aliased locals are normalized on load.
// Other small-typed locals are normalized on store.
// Also, under the debugger as the debugger could write to the variable.
// If this is one of the former, insert a narrowing cast on the load.
// ie. Convert: var-short --> cast-short(var-int)
tree->gtType = TYP_INT;
fgMorphTreeDone(tree);
tree = gtNewCastNode(TYP_INT, tree, false, varType);
fgMorphTreeDone(tree);
return tree;
}
return tree;
}
/*****************************************************************************
Grab a temp for big offset morphing.
This method will grab a new temp if no temp of this "type" has been created.
Or it will return the same cached one if it has been created.
*/
unsigned Compiler::fgGetBigOffsetMorphingTemp(var_types type)
{
unsigned lclNum = fgBigOffsetMorphingTemps[type];
if (lclNum == BAD_VAR_NUM)
{
// We haven't created a temp for this kind of type. Create one now.
lclNum = lvaGrabTemp(false DEBUGARG("Big Offset Morphing"));
fgBigOffsetMorphingTemps[type] = lclNum;
}
else
{
// We better get the right type.
noway_assert(lvaTable[lclNum].TypeGet() == type);
}
noway_assert(lclNum != BAD_VAR_NUM);
return lclNum;
}
/*****************************************************************************
*
* Transform the given GT_FIELD tree for code generation.
*/
GenTree* Compiler::fgMorphField(GenTree* tree, MorphAddrContext* mac)
{
assert(tree->gtOper == GT_FIELD);
CORINFO_FIELD_HANDLE symHnd = tree->AsField()->gtFldHnd;
unsigned fldOffset = tree->AsField()->gtFldOffset;
GenTree* objRef = tree->AsField()->GetFldObj();
bool objIsLocal = false;
FieldSeqNode* fieldSeq = FieldSeqStore::NotAField();
if (!tree->AsField()->gtFldMayOverlap)
{
if (objRef != nullptr)
{
fieldSeq = GetFieldSeqStore()->CreateSingleton(symHnd, FieldSeqNode::FieldKind::Instance);
}
else
{
// Only simple statics get importred as GT_FIELDs.
fieldSeq = GetFieldSeqStore()->CreateSingleton(symHnd, FieldSeqNode::FieldKind::SimpleStatic);
}
}
// Reset the flag because we may reuse the node.
tree->AsField()->gtFldMayOverlap = false;
if (fgGlobalMorph && (objRef != nullptr) && (objRef->gtOper == GT_ADDR))
{
// Make sure we've checked if 'objRef' is an address of an implicit-byref parameter.
// If it is, fgMorphImplicitByRefArgs may change it do a different opcode, which the
// simd field rewrites are sensitive to.
fgMorphImplicitByRefArgs(objRef);
}
noway_assert(((objRef != nullptr) && (objRef->IsLocalAddrExpr() != nullptr)) ||
((tree->gtFlags & GTF_GLOB_REF) != 0));
#ifdef FEATURE_SIMD
// if this field belongs to simd struct, translate it to simd intrinsic.
if (mac == nullptr)
{
if (IsBaselineSimdIsaSupported())
{
GenTree* newTree = fgMorphFieldToSimdGetElement(tree);
if (newTree != tree)
{
newTree = fgMorphTree(newTree);
return newTree;
}
}
}
else if ((objRef != nullptr) && (objRef->OperGet() == GT_ADDR) && varTypeIsSIMD(objRef->gtGetOp1()))
{
GenTreeLclVarCommon* lcl = objRef->IsLocalAddrExpr();
if (lcl != nullptr)
{
lvaSetVarDoNotEnregister(lcl->GetLclNum() DEBUGARG(DoNotEnregisterReason::LocalField));
}
}
#endif
// Create a default MorphAddrContext early so it doesn't go out of scope
// before it is used.
MorphAddrContext defMAC(MACK_Ind);
/* Is this an instance data member? */
if (objRef)
{
GenTree* addr;
objIsLocal = objRef->IsLocal();
if (tree->gtFlags & GTF_IND_TLS_REF)
{
NO_WAY("instance field can not be a TLS ref.");
}
/* We'll create the expression "*(objRef + mem_offs)" */
noway_assert(varTypeIsGC(objRef->TypeGet()) || objRef->TypeGet() == TYP_I_IMPL);
/*
Now we have a tree like this:
+--------------------+
| GT_FIELD | tree
+----------+---------+
|
+--------------+-------------+
|tree->AsField()->GetFldObj()|
+--------------+-------------+
We want to make it like this (when fldOffset is <= MAX_UNCHECKED_OFFSET_FOR_NULL_OBJECT):
+--------------------+
| GT_IND/GT_OBJ | tree
+---------+----------+
|
|
+---------+----------+
| GT_ADD | addr
+---------+----------+
|
/ \
/ \
/ \
+-------------------+ +----------------------+
| objRef | | fldOffset |
| | | (when fldOffset !=0) |
+-------------------+ +----------------------+
or this (when fldOffset is > MAX_UNCHECKED_OFFSET_FOR_NULL_OBJECT):
+--------------------+
| GT_IND/GT_OBJ | tree
+----------+---------+
|
+----------+---------+
| GT_COMMA | comma2
+----------+---------+
|
/ \
/ \
/ \
/ \
+---------+----------+ +---------+----------+
comma | GT_COMMA | | "+" (i.e. GT_ADD) | addr
+---------+----------+ +---------+----------+
| |
/ \ / \
/ \ / \
/ \ / \
+-----+-----+ +-----+-----+ +---------+ +-----------+
asg | GT_ASG | ind | GT_IND | | tmpLcl | | fldOffset |
+-----+-----+ +-----+-----+ +---------+ +-----------+
| |
/ \ |
/ \ |
/ \ |
+-----+-----+ +-----+-----+ +-----------+
| tmpLcl | | objRef | | tmpLcl |
+-----------+ +-----------+ +-----------+
*/
var_types objRefType = objRef->TypeGet();
GenTree* comma = nullptr;
// NULL mac means we encounter the GT_FIELD first. This denotes a dereference of the field,
// and thus is equivalent to a MACK_Ind with zero offset.
if (mac == nullptr)
{
mac = &defMAC;
}
// This flag is set to enable the "conservative" style of explicit null-check insertion.
// This means that we insert an explicit null check whenever we create byref by adding a
// constant offset to a ref, in a MACK_Addr context (meaning that the byref is not immediately
// dereferenced). The alternative is "aggressive", which would not insert such checks (for
// small offsets); in this plan, we would transfer some null-checking responsibility to
// callee's of methods taking byref parameters. They would have to add explicit null checks
// when creating derived byrefs from argument byrefs by adding constants to argument byrefs, in
// contexts where the resulting derived byref is not immediately dereferenced (or if the offset is too
// large). To make the "aggressive" scheme work, however, we'd also have to add explicit derived-from-null
// checks for byref parameters to "external" methods implemented in C++, and in P/Invoke stubs.
// This is left here to point out how to implement it.
CLANG_FORMAT_COMMENT_ANCHOR;
#define CONSERVATIVE_NULL_CHECK_BYREF_CREATION 1
bool addExplicitNullCheck = false;
// Implicit byref locals and string literals are never null.
if (fgAddrCouldBeNull(objRef))
{
// If the objRef is a GT_ADDR node, it, itself, never requires null checking. The expression
// whose address is being taken is either a local or static variable, whose address is necessarily
// non-null, or else it is a field dereference, which will do its own bounds checking if necessary.
if (objRef->gtOper != GT_ADDR && (mac->m_kind == MACK_Addr || mac->m_kind == MACK_Ind))
{
if (!mac->m_allConstantOffsets || fgIsBigOffset(mac->m_totalOffset + fldOffset))
{
addExplicitNullCheck = true;
}
else
{
// In R2R mode the field offset for some fields may change when the code
// is loaded. So we can't rely on a zero offset here to suppress the null check.
//
// See GitHub issue #16454.
bool fieldHasChangeableOffset = false;
#ifdef FEATURE_READYTORUN
fieldHasChangeableOffset = (tree->AsField()->gtFieldLookup.addr != nullptr);
#endif
#if CONSERVATIVE_NULL_CHECK_BYREF_CREATION
addExplicitNullCheck = (mac->m_kind == MACK_Addr) &&
((mac->m_totalOffset + fldOffset > 0) || fieldHasChangeableOffset);
#else
addExplicitNullCheck = (objRef->gtType == TYP_BYREF && mac->m_kind == MACK_Addr &&
((mac->m_totalOffset + fldOffset > 0) || fieldHasChangeableOffset));
#endif
}
}
}
if (addExplicitNullCheck)
{
#ifdef DEBUG
if (verbose)
{
printf("Before explicit null check morphing:\n");
gtDispTree(tree);
}
#endif
//
// Create the "comma" subtree
//
GenTree* asg = nullptr;
GenTree* nullchk;
unsigned lclNum;
if (objRef->gtOper != GT_LCL_VAR)
{
lclNum = fgGetBigOffsetMorphingTemp(genActualType(objRef->TypeGet()));
// Create the "asg" node
asg = gtNewTempAssign(lclNum, objRef);
}
else
{
lclNum = objRef->AsLclVarCommon()->GetLclNum();
}
GenTree* lclVar = gtNewLclvNode(lclNum, objRefType);
nullchk = gtNewNullCheck(lclVar, compCurBB);
nullchk->gtFlags |= GTF_DONT_CSE; // Don't try to create a CSE for these TYP_BYTE indirections
if (asg)
{
// Create the "comma" node.
comma = gtNewOperNode(GT_COMMA,
TYP_VOID, // We don't want to return anything from this "comma" node.
// Set the type to TYP_VOID, so we can select "cmp" instruction
// instead of "mov" instruction later on.
asg, nullchk);
}
else
{
comma = nullchk;
}
addr = gtNewLclvNode(lclNum, objRefType); // Use "tmpLcl" to create "addr" node.
}
else
{
addr = objRef;
}
#ifdef FEATURE_READYTORUN
if (tree->AsField()->gtFieldLookup.addr != nullptr)
{
GenTree* offsetNode = nullptr;
if (tree->AsField()->gtFieldLookup.accessType == IAT_PVALUE)
{
offsetNode = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)tree->AsField()->gtFieldLookup.addr,
GTF_ICON_CONST_PTR, true);
#ifdef DEBUG
offsetNode->gtGetOp1()->AsIntCon()->gtTargetHandle = (size_t)symHnd;
#endif
}
else
{
noway_assert(!"unexpected accessType for R2R field access");
}
var_types addType = (objRefType == TYP_I_IMPL) ? TYP_I_IMPL : TYP_BYREF;
addr = gtNewOperNode(GT_ADD, addType, addr, offsetNode);
}
#endif
if (fldOffset != 0)
{
// Generate the "addr" node.
// Add the member offset to the object's address.
addr = gtNewOperNode(GT_ADD, (objRefType == TYP_I_IMPL) ? TYP_I_IMPL : TYP_BYREF, addr,
gtNewIconHandleNode(fldOffset, GTF_ICON_FIELD_OFF, fieldSeq));
}
// Now let's set the "tree" as a GT_IND tree.
tree->SetOper(GT_IND);
tree->AsOp()->gtOp1 = addr;
tree->SetIndirExceptionFlags(this);
if (addExplicitNullCheck)
{
//
// Create "comma2" node and link it to "tree".
//
GenTree* comma2;
comma2 = gtNewOperNode(GT_COMMA,
addr->TypeGet(), // The type of "comma2" node is the same as the type of "addr" node.
comma, addr);
tree->AsOp()->gtOp1 = comma2;
}
#ifdef DEBUG
if (verbose)
{
if (addExplicitNullCheck)
{
printf("After adding explicit null check:\n");
gtDispTree(tree);
}
}
#endif
}
else /* This is a static data member */
{
if (tree->gtFlags & GTF_IND_TLS_REF)
{
// Thread Local Storage static field reference
//
// Field ref is a TLS 'Thread-Local-Storage' reference
//
// Build this tree: IND(*) #
// |
// ADD(I_IMPL)
// / \.
// / CNS(fldOffset)
// /
// /
// /
// IND(I_IMPL) == [Base of this DLL's TLS]
// |
// ADD(I_IMPL)
// / \.
// / CNS(IdValue*4) or MUL
// / / \.
// IND(I_IMPL) / CNS(4)
// | /
// CNS(TLS_HDL,0x2C) IND
// |
// CNS(pIdAddr)
//
// # Denotes the orginal node
//
void** pIdAddr = nullptr;
unsigned IdValue = info.compCompHnd->getFieldThreadLocalStoreID(symHnd, (void**)&pIdAddr);
//
// If we can we access the TLS DLL index ID value directly
// then pIdAddr will be NULL and
// IdValue will be the actual TLS DLL index ID
//
GenTree* dllRef = nullptr;
if (pIdAddr == nullptr)
{
if (IdValue != 0)
{
dllRef = gtNewIconNode(IdValue * 4, TYP_I_IMPL);
}
}
else
{
dllRef = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)pIdAddr, GTF_ICON_CONST_PTR, true);
// Next we multiply by 4
dllRef = gtNewOperNode(GT_MUL, TYP_I_IMPL, dllRef, gtNewIconNode(4, TYP_I_IMPL));
}
#define WIN32_TLS_SLOTS (0x2C) // Offset from fs:[0] where the pointer to the slots resides
// Mark this ICON as a TLS_HDL, codegen will use FS:[cns]
GenTree* tlsRef = gtNewIconHandleNode(WIN32_TLS_SLOTS, GTF_ICON_TLS_HDL);
// Translate GTF_FLD_INITCLASS to GTF_ICON_INITCLASS
if ((tree->gtFlags & GTF_FLD_INITCLASS) != 0)
{
tree->gtFlags &= ~GTF_FLD_INITCLASS;
tlsRef->gtFlags |= GTF_ICON_INITCLASS;
}
tlsRef = gtNewOperNode(GT_IND, TYP_I_IMPL, tlsRef);
if (dllRef != nullptr)
{
/* Add the dllRef */
tlsRef = gtNewOperNode(GT_ADD, TYP_I_IMPL, tlsRef, dllRef);
}
/* indirect to have tlsRef point at the base of the DLLs Thread Local Storage */
tlsRef = gtNewOperNode(GT_IND, TYP_I_IMPL, tlsRef);
if (fldOffset != 0)
{
GenTree* fldOffsetNode = new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, fldOffset, fieldSeq);
/* Add the TLS static field offset to the address */
tlsRef = gtNewOperNode(GT_ADD, TYP_I_IMPL, tlsRef, fldOffsetNode);
}
// Final indirect to get to actual value of TLS static field
tree->SetOper(GT_IND);
tree->AsOp()->gtOp1 = tlsRef;
noway_assert(tree->gtFlags & GTF_IND_TLS_REF);
}
else
{
// Normal static field reference
//
// If we can we access the static's address directly
// then pFldAddr will be NULL and
// fldAddr will be the actual address of the static field
//
void** pFldAddr = nullptr;
void* fldAddr = info.compCompHnd->getFieldAddress(symHnd, (void**)&pFldAddr);
// We should always be able to access this static field address directly
//
assert(pFldAddr == nullptr);
// For boxed statics, this direct address will be for the box. We have already added
// the indirection for the field itself and attached the sequence, in importation.
bool isBoxedStatic = gtIsStaticFieldPtrToBoxedStruct(tree->TypeGet(), symHnd);
if (isBoxedStatic)
{
fieldSeq = FieldSeqStore::NotAField();
}
// TODO-CQ: enable this optimization for 32 bit targets.
bool isStaticReadOnlyInited = false;
#ifdef TARGET_64BIT
if (tree->TypeIs(TYP_REF) && !isBoxedStatic)
{
bool pIsSpeculative = true;
if (info.compCompHnd->getStaticFieldCurrentClass(symHnd, &pIsSpeculative) != NO_CLASS_HANDLE)
{
isStaticReadOnlyInited = !pIsSpeculative;
}
}
#endif // TARGET_64BIT
// TODO: choices made below have mostly historical reasons and
// should be unified to always use the IND(<address>) form.
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_64BIT) || defined(TARGET_X86)
bool preferIndir = true;
#else // !TARGET_64BIT
bool preferIndir = isBoxedStatic;
#endif // !TARGET_64BIT
if (preferIndir)
{
GenTreeFlags handleKind = GTF_EMPTY;
if (isBoxedStatic)
{
handleKind = GTF_ICON_STATIC_BOX_PTR;
}
else if (isStaticReadOnlyInited)
{
handleKind = GTF_ICON_CONST_PTR;
}
else
{
handleKind = GTF_ICON_STATIC_HDL;
}
GenTree* addr = gtNewIconHandleNode((size_t)fldAddr, handleKind, fieldSeq);
// Translate GTF_FLD_INITCLASS to GTF_ICON_INITCLASS, if we need to.
if (((tree->gtFlags & GTF_FLD_INITCLASS) != 0) && !isStaticReadOnlyInited)
{
tree->gtFlags &= ~GTF_FLD_INITCLASS;
addr->gtFlags |= GTF_ICON_INITCLASS;
}
tree->SetOper(GT_IND);
tree->AsOp()->gtOp1 = addr;
if (isBoxedStatic)
{
// The box for the static cannot be null, and is logically invariant, since it
// represents (a base for) the static's address.
tree->gtFlags |= (GTF_IND_INVARIANT | GTF_IND_NONFAULTING | GTF_IND_NONNULL);
}
else if (isStaticReadOnlyInited)
{
JITDUMP("Marking initialized static read-only field '%s' as invariant.\n", eeGetFieldName(symHnd));
// Static readonly field is not null at this point (see getStaticFieldCurrentClass impl).
tree->gtFlags |= (GTF_IND_INVARIANT | GTF_IND_NONFAULTING | GTF_IND_NONNULL);
}
return fgMorphSmpOp(tree);
}
else
{
// Only volatile or classinit could be set, and they map over
noway_assert((tree->gtFlags & ~(GTF_FLD_VOLATILE | GTF_FLD_INITCLASS | GTF_COMMON_MASK)) == 0);
static_assert_no_msg(GTF_FLD_VOLATILE == GTF_CLS_VAR_VOLATILE);
static_assert_no_msg(GTF_FLD_INITCLASS == GTF_CLS_VAR_INITCLASS);
tree->SetOper(GT_CLS_VAR);
tree->AsClsVar()->gtClsVarHnd = symHnd;
tree->AsClsVar()->gtFieldSeq = fieldSeq;
}
return tree;
}
}
noway_assert(tree->gtOper == GT_IND);
if (fldOffset == 0)
{
GenTree* addr = tree->AsOp()->gtOp1;
// 'addr' may be a GT_COMMA. Skip over any comma nodes
addr = addr->gtEffectiveVal();
#ifdef DEBUG
if (verbose)
{
printf("\nBefore calling fgAddFieldSeqForZeroOffset:\n");
gtDispTree(tree);
}
#endif
// We expect 'addr' to be an address at this point.
assert(addr->TypeGet() == TYP_BYREF || addr->TypeGet() == TYP_I_IMPL || addr->TypeGet() == TYP_REF);
// Since we don't make a constant zero to attach the field sequence to, associate it with the "addr" node.
fgAddFieldSeqForZeroOffset(addr, fieldSeq);
}
// Pass down the current mac; if non null we are computing an address
GenTree* result = fgMorphSmpOp(tree, mac);
#ifdef DEBUG
if (verbose)
{
printf("\nFinal value of Compiler::fgMorphField after calling fgMorphSmpOp:\n");
gtDispTree(result);
}
#endif
return result;
}
//------------------------------------------------------------------------------
// fgMorphCallInline: attempt to inline a call
//
// Arguments:
// call - call expression to inline, inline candidate
// inlineResult - result tracking and reporting
//
// Notes:
// Attempts to inline the call.
//
// If successful, callee's IR is inserted in place of the call, and
// is marked with an InlineContext.
//
// If unsuccessful, the transformations done in anticipation of a
// possible inline are undone, and the candidate flag on the call
// is cleared.
void Compiler::fgMorphCallInline(GenTreeCall* call, InlineResult* inlineResult)
{
bool inliningFailed = false;
// Is this call an inline candidate?
if (call->IsInlineCandidate())
{
InlineContext* createdContext = nullptr;
// Attempt the inline
fgMorphCallInlineHelper(call, inlineResult, &createdContext);
// We should have made up our minds one way or another....
assert(inlineResult->IsDecided());
// If we failed to inline, we have a bit of work to do to cleanup
if (inlineResult->IsFailure())
{
if (createdContext != nullptr)
{
// We created a context before we got to the failure, so mark
// it as failed in the tree.
createdContext->SetFailed(inlineResult);
}
else
{
#ifdef DEBUG
// In debug we always put all inline attempts into the inline tree.
InlineContext* ctx =
m_inlineStrategy->NewContext(call->gtInlineCandidateInfo->inlinersContext, fgMorphStmt, call);
ctx->SetFailed(inlineResult);
#endif
}
inliningFailed = true;
// Clear the Inline Candidate flag so we can ensure later we tried
// inlining all candidates.
//
call->gtFlags &= ~GTF_CALL_INLINE_CANDIDATE;
}
}
else
{
// This wasn't an inline candidate. So it must be a GDV candidate.
assert(call->IsGuardedDevirtualizationCandidate());
// We already know we can't inline this call, so don't even bother to try.
inliningFailed = true;
}
// If we failed to inline (or didn't even try), do some cleanup.
if (inliningFailed)
{
if (call->gtReturnType != TYP_VOID)
{
JITDUMP("Inlining [%06u] failed, so bashing " FMT_STMT " to NOP\n", dspTreeID(call), fgMorphStmt->GetID());
// Detach the GT_CALL tree from the original statement by
// hanging a "nothing" node to it. Later the "nothing" node will be removed
// and the original GT_CALL tree will be picked up by the GT_RET_EXPR node.
noway_assert(fgMorphStmt->GetRootNode() == call);
fgMorphStmt->SetRootNode(gtNewNothingNode());
}
}
}
//------------------------------------------------------------------------------
// fgMorphCallInlineHelper: Helper to attempt to inline a call
//
// Arguments:
// call - call expression to inline, inline candidate
// result - result to set to success or failure
// createdContext - The context that was created if the inline attempt got to the inliner.
//
// Notes:
// Attempts to inline the call.
//
// If successful, callee's IR is inserted in place of the call, and
// is marked with an InlineContext.
//
// If unsuccessful, the transformations done in anticipation of a
// possible inline are undone, and the candidate flag on the call
// is cleared.
//
// If a context was created because we got to the importer then it is output by this function.
// If the inline succeeded, this context will already be marked as successful. If it failed and
// a context is returned, then it will not have been marked as success or failed.
void Compiler::fgMorphCallInlineHelper(GenTreeCall* call, InlineResult* result, InlineContext** createdContext)
{
// Don't expect any surprises here.
assert(result->IsCandidate());
if (lvaCount >= MAX_LV_NUM_COUNT_FOR_INLINING)
{
// For now, attributing this to call site, though it's really
// more of a budget issue (lvaCount currently includes all
// caller and prospective callee locals). We still might be
// able to inline other callees into this caller, or inline
// this callee in other callers.
result->NoteFatal(InlineObservation::CALLSITE_TOO_MANY_LOCALS);
return;
}
if (call->IsVirtual())
{
result->NoteFatal(InlineObservation::CALLSITE_IS_VIRTUAL);
return;
}
// Re-check this because guarded devirtualization may allow these through.
if (gtIsRecursiveCall(call) && call->IsImplicitTailCall())
{
result->NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL);
return;
}
// impMarkInlineCandidate() is expected not to mark tail prefixed calls
// and recursive tail calls as inline candidates.
noway_assert(!call->IsTailPrefixedCall());
noway_assert(!call->IsImplicitTailCall() || !gtIsRecursiveCall(call));
//
// Calling inlinee's compiler to inline the method.
//
unsigned startVars = lvaCount;
#ifdef DEBUG
if (verbose)
{
printf("Expanding INLINE_CANDIDATE in statement ");
printStmtID(fgMorphStmt);
printf(" in " FMT_BB ":\n", compCurBB->bbNum);
gtDispStmt(fgMorphStmt);
if (call->IsImplicitTailCall())
{
printf("Note: candidate is implicit tail call\n");
}
}
#endif
impInlineRoot()->m_inlineStrategy->NoteAttempt(result);
//
// Invoke the compiler to inline the call.
//
fgInvokeInlineeCompiler(call, result, createdContext);
if (result->IsFailure())
{
// Undo some changes made in anticipation of inlining...
// Zero out the used locals
memset(lvaTable + startVars, 0, (lvaCount - startVars) * sizeof(*lvaTable));
for (unsigned i = startVars; i < lvaCount; i++)
{
new (&lvaTable[i], jitstd::placement_t()) LclVarDsc(); // call the constructor.
}
lvaCount = startVars;
#ifdef DEBUG
if (verbose)
{
// printf("Inlining failed. Restore lvaCount to %d.\n", lvaCount);
}
#endif
return;
}
#ifdef DEBUG
if (verbose)
{
// printf("After inlining lvaCount=%d.\n", lvaCount);
}
#endif
}
//------------------------------------------------------------------------
// fgCanFastTailCall: Check to see if this tail call can be optimized as epilog+jmp.
//
// Arguments:
// callee - The callee to check
// failReason - If this method returns false, the reason why. Can be nullptr.
//
// Return Value:
// Returns true or false based on whether the callee can be fastTailCalled
//
// Notes:
// This function is target specific and each target will make the fastTailCall
// decision differently. See the notes below.
//
// This function calls fgInitArgInfo() to initialize the arg info table, which
// is used to analyze the argument. This function can alter the call arguments
// by adding argument IR nodes for non-standard arguments.
//
// Windows Amd64:
// A fast tail call can be made whenever the number of callee arguments
// is less than or equal to the number of caller arguments, or we have four
// or fewer callee arguments. This is because, on Windows AMD64, each
// argument uses exactly one register or one 8-byte stack slot. Thus, we only
// need to count arguments, and not be concerned with the size of each
// incoming or outgoing argument.
//
// Can fast tail call examples (amd64 Windows):
//
// -- Callee will have all register arguments --
// caller(int, int, int, int)
// callee(int, int, float, int)
//
// -- Callee requires stack space that is equal or less than the caller --
// caller(struct, struct, struct, struct, struct, struct)
// callee(int, int, int, int, int, int)
//
// -- Callee requires stack space that is less than the caller --
// caller(struct, double, struct, float, struct, struct)
// callee(int, int, int, int, int)
//
// -- Callee will have all register arguments --
// caller(int)
// callee(int, int, int, int)
//
// Cannot fast tail call examples (amd64 Windows):
//
// -- Callee requires stack space that is larger than the caller --
// caller(struct, double, struct, float, struct, struct)
// callee(int, int, int, int, int, double, double, double)
//
// -- Callee has a byref struct argument --
// caller(int, int, int)
// callee(struct(size 3 bytes))
//
// Unix Amd64 && Arm64:
// A fastTailCall decision can be made whenever the callee's stack space is
// less than or equal to the caller's stack space. There are many permutations
// of when the caller and callee have different stack sizes if there are
// structs being passed to either the caller or callee.
//
// Exceptions:
// If the callee has a 9 to 16 byte struct argument and the callee has
// stack arguments, the decision will be to not fast tail call. This is
// because before fgMorphArgs is done, the struct is unknown whether it
// will be placed on the stack or enregistered. Therefore, the conservative
// decision of do not fast tail call is taken. This limitations should be
// removed if/when fgMorphArgs no longer depends on fgCanFastTailCall.
//
// Can fast tail call examples (amd64 Unix):
//
// -- Callee will have all register arguments --
// caller(int, int, int, int)
// callee(int, int, float, int)
//
// -- Callee requires stack space that is equal to the caller --
// caller({ long, long }, { int, int }, { int }, { int }, { int }, { int }) -- 6 int register arguments, 16 byte
// stack
// space
// callee(int, int, int, int, int, int, int, int) -- 6 int register arguments, 16 byte stack space
//
// -- Callee requires stack space that is less than the caller --
// caller({ long, long }, int, { long, long }, int, { long, long }, { long, long }) 6 int register arguments, 32 byte
// stack
// space
// callee(int, int, int, int, int, int, { long, long } ) // 6 int register arguments, 16 byte stack space
//
// -- Callee will have all register arguments --
// caller(int)
// callee(int, int, int, int)
//
// Cannot fast tail call examples (amd64 Unix):
//
// -- Callee requires stack space that is larger than the caller --
// caller(float, float, float, float, float, float, float, float) -- 8 float register arguments
// callee(int, int, int, int, int, int, int, int) -- 6 int register arguments, 16 byte stack space
//
// -- Callee has structs which cannot be enregistered (Implementation Limitation) --
// caller(float, float, float, float, float, float, float, float, { double, double, double }) -- 8 float register
// arguments, 24 byte stack space
// callee({ double, double, double }) -- 24 bytes stack space
//
// -- Callee requires stack space and has a struct argument >8 bytes and <16 bytes (Implementation Limitation) --
// caller(int, int, int, int, int, int, { double, double, double }) -- 6 int register arguments, 24 byte stack space
// callee(int, int, int, int, int, int, { int, int }) -- 6 int registers, 16 byte stack space
//
// -- Caller requires stack space and nCalleeArgs > nCallerArgs (Bug) --
// caller({ double, double, double, double, double, double }) // 48 byte stack
// callee(int, int) -- 2 int registers
bool Compiler::fgCanFastTailCall(GenTreeCall* callee, const char** failReason)
{
#if FEATURE_FASTTAILCALL
// To reach here means that the return types of the caller and callee are tail call compatible.
// In the case of structs that can be returned in a register, compRetNativeType is set to the actual return type.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (callee->IsTailPrefixedCall())
{
var_types retType = info.compRetType;
assert(impTailCallRetTypeCompatible(false, retType, info.compMethodInfo->args.retTypeClass, info.compCallConv,
(var_types)callee->gtReturnType, callee->gtRetClsHnd,
callee->GetUnmanagedCallConv()));
}
#endif
assert(!callee->AreArgsComplete());
fgInitArgInfo(callee);
fgArgInfo* argInfo = callee->fgArgInfo;
unsigned calleeArgStackSize = 0;
unsigned callerArgStackSize = info.compArgStackSize;
auto reportFastTailCallDecision = [&](const char* thisFailReason) {
if (failReason != nullptr)
{
*failReason = thisFailReason;
}
#ifdef DEBUG
if ((JitConfig.JitReportFastTailCallDecisions()) == 1)
{
if (callee->gtCallType != CT_INDIRECT)
{
const char* methodName;
methodName = eeGetMethodFullName(callee->gtCallMethHnd);
printf("[Fast tailcall decision]: Caller: %s\n[Fast tailcall decision]: Callee: %s -- Decision: ",
info.compFullName, methodName);
}
else
{
printf("[Fast tailcall decision]: Caller: %s\n[Fast tailcall decision]: Callee: IndirectCall -- "
"Decision: ",
info.compFullName);
}
if (thisFailReason == nullptr)
{
printf("Will fast tailcall");
}
else
{
printf("Will not fast tailcall (%s)", thisFailReason);
}
printf(" (CallerArgStackSize: %d, CalleeArgStackSize: %d)\n\n", callerArgStackSize, calleeArgStackSize);
}
else
{
if (thisFailReason == nullptr)
{
JITDUMP("[Fast tailcall decision]: Will fast tailcall\n");
}
else
{
JITDUMP("[Fast tailcall decision]: Will not fast tailcall (%s)\n", thisFailReason);
}
}
#endif // DEBUG
};
for (unsigned index = 0; index < argInfo->ArgCount(); ++index)
{
fgArgTabEntry* arg = argInfo->GetArgEntry(index, false);
calleeArgStackSize = roundUp(calleeArgStackSize, arg->GetByteAlignment());
calleeArgStackSize += arg->GetStackByteSize();
#ifdef TARGET_ARM
if (arg->IsSplit())
{
reportFastTailCallDecision("Splitted argument in callee is not supported on ARM32");
return false;
}
#endif // TARGET_ARM
}
calleeArgStackSize = GetOutgoingArgByteSize(calleeArgStackSize);
#ifdef TARGET_ARM
if (compHasSplitParam)
{
reportFastTailCallDecision("Splitted argument in caller is not supported on ARM32");
return false;
}
if (compIsProfilerHookNeeded())
{
reportFastTailCallDecision("Profiler is not supported on ARM32");
return false;
}
// On ARM32 we have only one non-parameter volatile register and we need it
// for the GS security cookie check. We could technically still tailcall
// when the callee does not use all argument registers, but we keep the
// code simple here.
if (getNeedsGSSecurityCookie())
{
reportFastTailCallDecision("Not enough registers available due to the GS security cookie check");
return false;
}
#endif
if (!opts.compFastTailCalls)
{
reportFastTailCallDecision("Configuration doesn't allow fast tail calls");
return false;
}
if (callee->IsStressTailCall())
{
reportFastTailCallDecision("Fast tail calls are not performed under tail call stress");
return false;
}
#ifdef TARGET_ARM
if (callee->IsR2RRelativeIndir() || callee->HasNonStandardAddedArgs(this))
{
reportFastTailCallDecision(
"Method with non-standard args passed in callee saved register cannot be tail called");
return false;
}
#endif
// Note on vararg methods:
// If the caller is vararg method, we don't know the number of arguments passed by caller's caller.
// But we can be sure that in-coming arg area of vararg caller would be sufficient to hold its
// fixed args. Therefore, we can allow a vararg method to fast tail call other methods as long as
// out-going area required for callee is bounded by caller's fixed argument space.
//
// Note that callee being a vararg method is not a problem since we can account the params being passed.
//
// We will currently decide to not fast tail call on Windows armarch if the caller or callee is a vararg
// method. This is due to the ABI differences for native vararg methods for these platforms. There is
// work required to shuffle arguments to the correct locations.
CLANG_FORMAT_COMMENT_ANCHOR;
if (TargetOS::IsWindows && TargetArchitecture::IsArmArch && (info.compIsVarArgs || callee->IsVarargs()))
{
reportFastTailCallDecision("Fast tail calls with varargs not supported on Windows ARM/ARM64");
return false;
}
if (compLocallocUsed)
{
reportFastTailCallDecision("Localloc used");
return false;
}
#ifdef TARGET_AMD64
// Needed for Jit64 compat.
// In future, enabling fast tail calls from methods that need GS cookie
// check would require codegen side work to emit GS cookie check before a
// tail call.
if (getNeedsGSSecurityCookie())
{
reportFastTailCallDecision("GS Security cookie check required");
return false;
}
#endif
// If the NextCallReturnAddress intrinsic is used we should do normal calls.
if (info.compHasNextCallRetAddr)
{
reportFastTailCallDecision("Uses NextCallReturnAddress intrinsic");
return false;
}
if (callee->HasRetBufArg()) // RetBuf
{
// If callee has RetBuf param, caller too must have it.
// Otherwise go the slow route.
if (info.compRetBuffArg == BAD_VAR_NUM)
{
reportFastTailCallDecision("Callee has RetBuf but caller does not.");
return false;
}
}
// For a fast tail call the caller will use its incoming arg stack space to place
// arguments, so if the callee requires more arg stack space than is available here
// the fast tail call cannot be performed. This is common to all platforms.
// Note that the GC'ness of on stack args need not match since the arg setup area is marked
// as non-interruptible for fast tail calls.
if (calleeArgStackSize > callerArgStackSize)
{
reportFastTailCallDecision("Not enough incoming arg space");
return false;
}
// For Windows some struct parameters are copied on the local frame
// and then passed by reference. We cannot fast tail call in these situation
// as we need to keep our frame around.
if (fgCallHasMustCopyByrefParameter(callee))
{
reportFastTailCallDecision("Callee has a byref parameter");
return false;
}
reportFastTailCallDecision(nullptr);
return true;
#else // FEATURE_FASTTAILCALL
if (failReason)
*failReason = "Fast tailcalls are not supported on this platform";
return false;
#endif
}
//------------------------------------------------------------------------
// fgCallHasMustCopyByrefParameter: Check to see if this call has a byref parameter that
// requires a struct copy in the caller.
//
// Arguments:
// callee - The callee to check
//
// Return Value:
// Returns true or false based on whether this call has a byref parameter that
// requires a struct copy in the caller.
#if FEATURE_FASTTAILCALL
bool Compiler::fgCallHasMustCopyByrefParameter(GenTreeCall* callee)
{
fgArgInfo* argInfo = callee->fgArgInfo;
bool hasMustCopyByrefParameter = false;
for (unsigned index = 0; index < argInfo->ArgCount(); ++index)
{
fgArgTabEntry* arg = argInfo->GetArgEntry(index, false);
if (arg->isStruct)
{
if (arg->passedByRef)
{
// Generally a byref arg will block tail calling, as we have to
// make a local copy of the struct for the callee.
hasMustCopyByrefParameter = true;
// If we're optimizing, we may be able to pass our caller's byref to our callee,
// and so still be able to avoid a struct copy.
if (opts.OptimizationEnabled())
{
// First, see if this arg is an implicit byref param.
GenTreeLclVar* const lcl = arg->GetNode()->IsImplicitByrefParameterValue(this);
if (lcl != nullptr)
{
// Yes, the arg is an implicit byref param.
const unsigned lclNum = lcl->GetLclNum();
LclVarDsc* const varDsc = lvaGetDesc(lcl);
// The param must not be promoted; if we've promoted, then the arg will be
// a local struct assembled from the promoted fields.
if (varDsc->lvPromoted)
{
JITDUMP("Arg [%06u] is promoted implicit byref V%02u, so no tail call\n",
dspTreeID(arg->GetNode()), lclNum);
}
else
{
JITDUMP("Arg [%06u] is unpromoted implicit byref V%02u, seeing if we can still tail call\n",
dspTreeID(arg->GetNode()), lclNum);
// We have to worry about introducing aliases if we bypass copying
// the struct at the call. We'll do some limited analysis to see if we
// can rule this out.
const unsigned argLimit = 6;
// If this is the only appearance of the byref in the method, then
// aliasing is not possible.
//
// If no other call arg refers to this byref, and no other arg is
// a pointer which could refer to this byref, we can optimize.
//
// We only check this for calls with small numbers of arguments,
// as the analysis cost will be quadratic.
//
const unsigned totalAppearances = varDsc->lvRefCnt(RCS_EARLY);
const unsigned callAppearances = (unsigned)varDsc->lvRefCntWtd(RCS_EARLY);
assert(totalAppearances >= callAppearances);
if (totalAppearances == 1)
{
JITDUMP("... yes, arg is the only appearance of V%02u\n", lclNum);
hasMustCopyByrefParameter = false;
}
else if (totalAppearances > callAppearances)
{
// lvRefCntWtd tracks the number of appearances of the arg at call sites.
// If this number doesn't match the regular ref count, there is
// a non-call appearance, and we must be conservative.
//
JITDUMP("... no, arg has %u non-call appearance(s)\n",
totalAppearances - callAppearances);
}
else if (argInfo->ArgCount() <= argLimit)
{
JITDUMP("... all %u appearance(s) are as implicit byref args to calls.\n"
"... Running alias analysis on this call's args\n",
totalAppearances);
GenTree* interferingArg = nullptr;
for (unsigned index2 = 0; index2 < argInfo->ArgCount(); ++index2)
{
if (index2 == index)
{
continue;
}
fgArgTabEntry* const arg2 = argInfo->GetArgEntry(index2, false);
JITDUMP("... checking other arg [%06u]...\n", dspTreeID(arg2->GetNode()));
DISPTREE(arg2->GetNode());
// Do we pass 'lcl' more than once to the callee?
if (arg2->isStruct && arg2->passedByRef)
{
GenTreeLclVarCommon* const lcl2 =
arg2->GetNode()->IsImplicitByrefParameterValue(this);
if ((lcl2 != nullptr) && (lclNum == lcl2->GetLclNum()))
{
// not copying would introduce aliased implicit byref structs
// in the callee ... we can't optimize.
interferingArg = arg2->GetNode();
break;
}
else
{
JITDUMP("... arg refers to different implicit byref V%02u\n",
lcl2->GetLclNum());
continue;
}
}
// Do we pass a byref pointer which might point within 'lcl'?
//
// We can assume the 'lcl' is unaliased on entry to the
// method, so the only way we can have an aliasing byref pointer at
// the call is if 'lcl' is address taken/exposed in the method.
//
// Note even though 'lcl' is not promoted, we are in the middle
// of the promote->rewrite->undo->(morph)->demote cycle, and so
// might see references to promoted fields of 'lcl' that haven't yet
// been demoted (see fgMarkDemotedImplicitByRefArgs).
//
// So, we also need to scan all 'lcl's fields, if any, to see if they
// are exposed.
//
// When looking for aliases from other args, we check for both TYP_BYREF
// and TYP_I_IMPL typed args here. Conceptually anything that points into
// an implicit byref parameter should be TYP_BYREF, as these parameters could
// refer to boxed heap locations (say if the method is invoked by reflection)
// but there are some stack only structs (like typed references) where
// the importer/runtime code uses TYP_I_IMPL, and fgInitArgInfo will
// transiently retype all simple address-of implicit parameter args as
// TYP_I_IMPL.
//
if ((arg2->argType == TYP_BYREF) || (arg2->argType == TYP_I_IMPL))
{
JITDUMP("...arg is a byref, must run an alias check\n");
bool checkExposure = true;
bool hasExposure = false;
// See if there is any way arg could refer to a parameter struct.
GenTree* arg2Node = arg2->GetNode();
if (arg2Node->OperIs(GT_LCL_VAR))
{
GenTreeLclVarCommon* arg2LclNode = arg2Node->AsLclVarCommon();
assert(arg2LclNode->GetLclNum() != lclNum);
LclVarDsc* arg2Dsc = lvaGetDesc(arg2LclNode);
// Other params can't alias implicit byref params
if (arg2Dsc->lvIsParam)
{
checkExposure = false;
}
}
// Because we're checking TYP_I_IMPL above, at least
// screen out obvious things that can't cause aliases.
else if (arg2Node->IsIntegralConst())
{
checkExposure = false;
}
if (checkExposure)
{
JITDUMP(
"... not sure where byref arg points, checking if V%02u is exposed\n",
lclNum);
// arg2 might alias arg, see if we've exposed
// arg somewhere in the method.
if (varDsc->lvHasLdAddrOp || varDsc->IsAddressExposed())
{
// Struct as a whole is exposed, can't optimize
JITDUMP("... V%02u is exposed\n", lclNum);
hasExposure = true;
}
else if (varDsc->lvFieldLclStart != 0)
{
// This is the promoted/undone struct case.
//
// The field start is actually the local number of the promoted local,
// use it to enumerate the fields.
const unsigned promotedLcl = varDsc->lvFieldLclStart;
LclVarDsc* const promotedVarDsc = lvaGetDesc(promotedLcl);
JITDUMP("...promoted-unpromoted case -- also checking exposure of "
"fields of V%02u\n",
promotedLcl);
for (unsigned fieldIndex = 0; fieldIndex < promotedVarDsc->lvFieldCnt;
fieldIndex++)
{
LclVarDsc* fieldDsc =
lvaGetDesc(promotedVarDsc->lvFieldLclStart + fieldIndex);
if (fieldDsc->lvHasLdAddrOp || fieldDsc->IsAddressExposed())
{
// Promoted and not yet demoted field is exposed, can't optimize
JITDUMP("... field V%02u is exposed\n",
promotedVarDsc->lvFieldLclStart + fieldIndex);
hasExposure = true;
break;
}
}
}
}
if (hasExposure)
{
interferingArg = arg2->GetNode();
break;
}
}
else
{
JITDUMP("...arg is not a byref or implicit byref (%s)\n",
varTypeName(arg2->GetNode()->TypeGet()));
}
}
if (interferingArg != nullptr)
{
JITDUMP("... no, arg [%06u] may alias with V%02u\n", dspTreeID(interferingArg),
lclNum);
}
else
{
JITDUMP("... yes, no other arg in call can alias V%02u\n", lclNum);
hasMustCopyByrefParameter = false;
}
}
else
{
JITDUMP(" ... no, call has %u > %u args, alias analysis deemed too costly\n",
argInfo->ArgCount(), argLimit);
}
}
}
}
if (hasMustCopyByrefParameter)
{
// This arg requires a struct copy. No reason to keep scanning the remaining args.
break;
}
}
}
}
return hasMustCopyByrefParameter;
}
#endif
//------------------------------------------------------------------------
// fgMorphPotentialTailCall: Attempt to morph a call that the importer has
// identified as a potential tailcall to an actual tailcall and return the
// placeholder node to use in this case.
//
// Arguments:
// call - The call to morph.
//
// Return Value:
// Returns a node to use if the call was morphed into a tailcall. If this
// function returns a node the call is done being morphed and the new node
// should be used. Otherwise the call will have been demoted to a regular call
// and should go through normal morph.
//
// Notes:
// This is called only for calls that the importer has already identified as
// potential tailcalls. It will do profitability and legality checks and
// classify which kind of tailcall we are able to (or should) do, along with
// modifying the trees to perform that kind of tailcall.
//
GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call)
{
// It should either be an explicit (i.e. tail prefixed) or an implicit tail call
assert(call->IsTailPrefixedCall() ^ call->IsImplicitTailCall());
// It cannot be an inline candidate
assert(!call->IsInlineCandidate());
auto failTailCall = [&](const char* reason, unsigned lclNum = BAD_VAR_NUM) {
#ifdef DEBUG
if (verbose)
{
printf("\nRejecting tail call in morph for call ");
printTreeID(call);
printf(": %s", reason);
if (lclNum != BAD_VAR_NUM)
{
printf(" V%02u", lclNum);
}
printf("\n");
}
#endif
// for non user funcs, we have no handles to report
info.compCompHnd->reportTailCallDecision(nullptr,
(call->gtCallType == CT_USER_FUNC) ? call->gtCallMethHnd : nullptr,
call->IsTailPrefixedCall(), TAILCALL_FAIL, reason);
// We have checked the candidate so demote.
call->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
#if FEATURE_TAILCALL_OPT
call->gtCallMoreFlags &= ~GTF_CALL_M_IMPLICIT_TAILCALL;
#endif
};
if (call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC)
{
failTailCall("Might turn into an intrinsic");
return nullptr;
}
#ifdef TARGET_ARM
if (call->gtCallMoreFlags & GTF_CALL_M_WRAPPER_DELEGATE_INV)
{
failTailCall("Non-standard calling convention");
return nullptr;
}
#endif
if (call->IsNoReturn() && !call->IsTailPrefixedCall())
{
// Such tail calls always throw an exception and we won't be able to see current
// Caller() in the stacktrace.
failTailCall("Never returns");
return nullptr;
}
#ifdef DEBUG
if (opts.compGcChecks && (info.compRetType == TYP_REF))
{
failTailCall("COMPlus_JitGCChecks or stress might have interposed a call to CORINFO_HELP_CHECK_OBJ, "
"invalidating tailcall opportunity");
return nullptr;
}
#endif
// We have to ensure to pass the incoming retValBuf as the
// outgoing one. Using a temp will not do as this function will
// not regain control to do the copy. This can happen when inlining
// a tailcall which also has a potential tailcall in it: the IL looks
// like we can do a tailcall, but the trees generated use a temp for the inlinee's
// result. TODO-CQ: Fix this.
if (info.compRetBuffArg != BAD_VAR_NUM)
{
noway_assert(call->TypeGet() == TYP_VOID);
GenTree* retValBuf = call->gtCallArgs->GetNode();
if (retValBuf->gtOper != GT_LCL_VAR || retValBuf->AsLclVarCommon()->GetLclNum() != info.compRetBuffArg)
{
failTailCall("Need to copy return buffer");
return nullptr;
}
}
// We are still not sure whether it can be a tail call. Because, when converting
// a call to an implicit tail call, we must check that there are no locals with
// their address taken. If this is the case, we have to assume that the address
// has been leaked and the current stack frame must live until after the final
// call.
// Verify that none of vars has lvHasLdAddrOp or IsAddressExposed() bit set. Note
// that lvHasLdAddrOp is much more conservative. We cannot just base it on
// IsAddressExposed() alone since it is not guaranteed to be set on all VarDscs
// during morph stage. The reason for also checking IsAddressExposed() is that in case
// of vararg methods user args are marked as addr exposed but not lvHasLdAddrOp.
// The combination of lvHasLdAddrOp and IsAddressExposed() though conservative allows us
// never to be incorrect.
//
// TODO-Throughput: have a compiler level flag to indicate whether method has vars whose
// address is taken. Such a flag could be set whenever lvHasLdAddrOp or IsAddressExposed()
// is set. This avoids the need for iterating through all lcl vars of the current
// method. Right now throughout the code base we are not consistently using 'set'
// method to set lvHasLdAddrOp and IsAddressExposed() flags.
bool isImplicitOrStressTailCall = call->IsImplicitTailCall() || call->IsStressTailCall();
if (isImplicitOrStressTailCall && compLocallocUsed)
{
failTailCall("Localloc used");
return nullptr;
}
bool hasStructParam = false;
for (unsigned varNum = 0; varNum < lvaCount; varNum++)
{
LclVarDsc* varDsc = lvaGetDesc(varNum);
// If the method is marked as an explicit tail call we will skip the
// following three hazard checks.
// We still must check for any struct parameters and set 'hasStructParam'
// so that we won't transform the recursive tail call into a loop.
//
if (isImplicitOrStressTailCall)
{
if (varDsc->lvHasLdAddrOp && !lvaIsImplicitByRefLocal(varNum))
{
failTailCall("Local address taken", varNum);
return nullptr;
}
if (varDsc->IsAddressExposed())
{
if (lvaIsImplicitByRefLocal(varNum))
{
// The address of the implicit-byref is a non-address use of the pointer parameter.
}
else if (varDsc->lvIsStructField && lvaIsImplicitByRefLocal(varDsc->lvParentLcl))
{
// The address of the implicit-byref's field is likewise a non-address use of the pointer
// parameter.
}
else if (varDsc->lvPromoted && (lvaTable[varDsc->lvFieldLclStart].lvParentLcl != varNum))
{
// This temp was used for struct promotion bookkeeping. It will not be used, and will have
// its ref count and address-taken flag reset in fgMarkDemotedImplicitByRefArgs.
assert(lvaIsImplicitByRefLocal(lvaTable[varDsc->lvFieldLclStart].lvParentLcl));
assert(fgGlobalMorph);
}
else
{
failTailCall("Local address taken", varNum);
return nullptr;
}
}
if (varDsc->lvPromoted && varDsc->lvIsParam && !lvaIsImplicitByRefLocal(varNum))
{
failTailCall("Has Struct Promoted Param", varNum);
return nullptr;
}
if (varDsc->lvPinned)
{
// A tail call removes the method from the stack, which means the pinning
// goes away for the callee. We can't allow that.
failTailCall("Has Pinned Vars", varNum);
return nullptr;
}
}
if (varTypeIsStruct(varDsc->TypeGet()) && varDsc->lvIsParam)
{
hasStructParam = true;
// This prevents transforming a recursive tail call into a loop
// but doesn't prevent tail call optimization so we need to
// look at the rest of parameters.
}
}
if (!fgCheckStmtAfterTailCall())
{
failTailCall("Unexpected statements after the tail call");
return nullptr;
}
const char* failReason = nullptr;
bool canFastTailCall = fgCanFastTailCall(call, &failReason);
CORINFO_TAILCALL_HELPERS tailCallHelpers;
bool tailCallViaJitHelper = false;
if (!canFastTailCall)
{
if (call->IsImplicitTailCall())
{
// Implicit or opportunistic tail calls are always dispatched via fast tail call
// mechanism and never via tail call helper for perf.
failTailCall(failReason);
return nullptr;
}
assert(call->IsTailPrefixedCall());
assert(call->tailCallInfo != nullptr);
// We do not currently handle non-standard args except for VSD stubs.
if (!call->IsVirtualStub() && call->HasNonStandardAddedArgs(this))
{
failTailCall(
"Method with non-standard args passed in callee trash register cannot be tail called via helper");
return nullptr;
}
// On x86 we have a faster mechanism than the general one which we use
// in almost all cases. See fgCanTailCallViaJitHelper for more information.
if (fgCanTailCallViaJitHelper())
{
tailCallViaJitHelper = true;
}
else
{
// Make sure we can get the helpers. We do this last as the runtime
// will likely be required to generate these.
CORINFO_RESOLVED_TOKEN* token = nullptr;
CORINFO_SIG_INFO* sig = call->tailCallInfo->GetSig();
unsigned flags = 0;
if (!call->tailCallInfo->IsCalli())
{
token = call->tailCallInfo->GetToken();
if (call->tailCallInfo->IsCallvirt())
{
flags |= CORINFO_TAILCALL_IS_CALLVIRT;
}
}
if (call->gtCallThisArg != nullptr)
{
var_types thisArgType = call->gtCallThisArg->GetNode()->TypeGet();
if (thisArgType != TYP_REF)
{
flags |= CORINFO_TAILCALL_THIS_ARG_IS_BYREF;
}
}
if (!info.compCompHnd->getTailCallHelpers(token, sig, (CORINFO_GET_TAILCALL_HELPERS_FLAGS)flags,
&tailCallHelpers))
{
failTailCall("Tail call help not available");
return nullptr;
}
}
}
// Check if we can make the tailcall a loop.
bool fastTailCallToLoop = false;
#if FEATURE_TAILCALL_OPT
// TODO-CQ: enable the transformation when the method has a struct parameter that can be passed in a register
// or return type is a struct that can be passed in a register.
//
// TODO-CQ: if the method being compiled requires generic context reported in gc-info (either through
// hidden generic context param or through keep alive thisptr), then while transforming a recursive
// call to such a method requires that the generic context stored on stack slot be updated. Right now,
// fgMorphRecursiveFastTailCallIntoLoop() is not handling update of generic context while transforming
// a recursive call into a loop. Another option is to modify gtIsRecursiveCall() to check that the
// generic type parameters of both caller and callee generic method are the same.
if (opts.compTailCallLoopOpt && canFastTailCall && gtIsRecursiveCall(call) && !lvaReportParamTypeArg() &&
!lvaKeepAliveAndReportThis() && !call->IsVirtual() && !hasStructParam && !varTypeIsStruct(call->TypeGet()))
{
fastTailCallToLoop = true;
}
#endif
// Ok -- now we are committed to performing a tailcall. Report the decision.
CorInfoTailCall tailCallResult;
if (fastTailCallToLoop)
{
tailCallResult = TAILCALL_RECURSIVE;
}
else if (canFastTailCall)
{
tailCallResult = TAILCALL_OPTIMIZED;
}
else
{
tailCallResult = TAILCALL_HELPER;
}
info.compCompHnd->reportTailCallDecision(nullptr,
(call->gtCallType == CT_USER_FUNC) ? call->gtCallMethHnd : nullptr,
call->IsTailPrefixedCall(), tailCallResult, nullptr);
// Are we currently planning to expand the gtControlExpr as an early virtual call target?
//
if (call->IsExpandedEarly() && call->IsVirtualVtable())
{
// It isn't alway profitable to expand a virtual call early
//
// We alway expand the TAILCALL_HELPER type late.
// And we exapnd late when we have an optimized tail call
// and the this pointer needs to be evaluated into a temp.
//
if (tailCallResult == TAILCALL_HELPER)
{
// We will alway expand this late in lower instead.
// (see LowerTailCallViaJitHelper as it needs some work
// for us to be able to expand this earlier in morph)
//
call->ClearExpandedEarly();
}
else if ((tailCallResult == TAILCALL_OPTIMIZED) &&
((call->gtCallThisArg->GetNode()->gtFlags & GTF_SIDE_EFFECT) != 0))
{
// We generate better code when we expand this late in lower instead.
//
call->ClearExpandedEarly();
}
}
// Now actually morph the call.
compTailCallUsed = true;
// This will prevent inlining this call.
call->gtCallMoreFlags |= GTF_CALL_M_TAILCALL;
if (tailCallViaJitHelper)
{
call->gtCallMoreFlags |= GTF_CALL_M_TAILCALL_VIA_JIT_HELPER;
}
#if FEATURE_TAILCALL_OPT
if (fastTailCallToLoop)
{
call->gtCallMoreFlags |= GTF_CALL_M_TAILCALL_TO_LOOP;
}
#endif
// Mark that this is no longer a pending tailcall. We need to do this before
// we call fgMorphCall again (which happens in the fast tailcall case) to
// avoid recursing back into this method.
call->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
#if FEATURE_TAILCALL_OPT
call->gtCallMoreFlags &= ~GTF_CALL_M_IMPLICIT_TAILCALL;
#endif
#ifdef DEBUG
if (verbose)
{
printf("\nGTF_CALL_M_TAILCALL bit set for call ");
printTreeID(call);
printf("\n");
if (fastTailCallToLoop)
{
printf("\nGTF_CALL_M_TAILCALL_TO_LOOP bit set for call ");
printTreeID(call);
printf("\n");
}
}
#endif
// For R2R we might need a different entry point for this call if we are doing a tailcall.
// The reason is that the normal delay load helper uses the return address to find the indirection
// cell in xarch, but now the JIT is expected to leave the indirection cell in REG_R2R_INDIRECT_PARAM:
// We optimize delegate invocations manually in the JIT so skip this for those.
if (call->IsR2RRelativeIndir() && canFastTailCall && !fastTailCallToLoop && !call->IsDelegateInvoke())
{
info.compCompHnd->updateEntryPointForTailCall(&call->gtEntryPoint);
#ifdef TARGET_XARCH
// We have already computed arg info to make the fast tailcall decision, but on X64 we now
// have to pass the indirection cell, so redo arg info.
call->ResetArgInfo();
#endif
}
// If this block has a flow successor, make suitable updates.
//
BasicBlock* const nextBlock = compCurBB->GetUniqueSucc();
if (nextBlock == nullptr)
{
// No unique successor. compCurBB should be a return.
//
assert(compCurBB->bbJumpKind == BBJ_RETURN);
}
else
{
// Flow no longer reaches nextBlock from here.
//
fgRemoveRefPred(nextBlock, compCurBB);
// Adjust profile weights.
//
// Note if this is a tail call to loop, further updates
// are needed once we install the loop edge.
//
if (compCurBB->hasProfileWeight() && nextBlock->hasProfileWeight())
{
// Since we have linear flow we can update the next block weight.
//
weight_t const blockWeight = compCurBB->bbWeight;
weight_t const nextWeight = nextBlock->bbWeight;
weight_t const newNextWeight = nextWeight - blockWeight;
// If the math would result in a negative weight then there's
// no local repair we can do; just leave things inconsistent.
//
if (newNextWeight >= 0)
{
// Note if we'd already morphed the IR in nextblock we might
// have done something profile sensitive that we should arguably reconsider.
//
JITDUMP("Reducing profile weight of " FMT_BB " from " FMT_WT " to " FMT_WT "\n", nextBlock->bbNum,
nextWeight, newNextWeight);
nextBlock->setBBProfileWeight(newNextWeight);
}
else
{
JITDUMP("Not reducing profile weight of " FMT_BB " as its weight " FMT_WT
" is less than direct flow pred " FMT_BB " weight " FMT_WT "\n",
nextBlock->bbNum, nextWeight, compCurBB->bbNum, blockWeight);
}
// If nextBlock is not a BBJ_RETURN, it should have a unique successor that
// is a BBJ_RETURN, as we allow a little bit of flow after a tail call.
//
if (nextBlock->bbJumpKind != BBJ_RETURN)
{
BasicBlock* retBlock = nextBlock->GetUniqueSucc();
// Check if we have a sequence of GT_ASG blocks where the same variable is assigned
// to temp locals over and over.
// Also allow casts on the RHSs of the assignments, and blocks with GT_NOPs.
//
// { GT_ASG(t_0, GT_CALL(...)) }
// { GT_ASG(t_1, t0) } (with casts on rhs potentially)
// ...
// { GT_ASG(t_n, t_(n - 1)) }
// { GT_RET t_n }
//
if (retBlock->bbJumpKind != BBJ_RETURN)
{
// Make sure the block has a single statement
assert(nextBlock->firstStmt() == nextBlock->lastStmt());
// And the root node is "ASG(LCL_VAR, LCL_VAR)"
GenTree* asgNode = nextBlock->firstStmt()->GetRootNode();
assert(asgNode->OperIs(GT_ASG));
unsigned lcl = asgNode->gtGetOp1()->AsLclVarCommon()->GetLclNum();
while (retBlock->bbJumpKind != BBJ_RETURN)
{
#ifdef DEBUG
Statement* nonEmptyStmt = nullptr;
for (Statement* const stmt : retBlock->Statements())
{
// Ignore NOP statements
if (!stmt->GetRootNode()->OperIs(GT_NOP))
{
// Only a single non-NOP statement is allowed
assert(nonEmptyStmt == nullptr);
nonEmptyStmt = stmt;
}
}
if (nonEmptyStmt != nullptr)
{
asgNode = nonEmptyStmt->GetRootNode();
if (!asgNode->OperIs(GT_NOP))
{
assert(asgNode->OperIs(GT_ASG));
GenTree* rhs = asgNode->gtGetOp2();
while (rhs->OperIs(GT_CAST))
{
assert(!rhs->gtOverflow());
rhs = rhs->gtGetOp1();
}
assert(lcl == rhs->AsLclVarCommon()->GetLclNum());
lcl = asgNode->gtGetOp1()->AsLclVarCommon()->GetLclNum();
}
}
#endif
retBlock = retBlock->GetUniqueSucc();
}
}
assert(retBlock->bbJumpKind == BBJ_RETURN);
if (retBlock->hasProfileWeight())
{
// Do similar updates here.
//
weight_t const nextNextWeight = retBlock->bbWeight;
weight_t const newNextNextWeight = nextNextWeight - blockWeight;
// If the math would result in an negative weight then there's
// no local repair we can do; just leave things inconsistent.
//
if (newNextNextWeight >= 0)
{
JITDUMP("Reducing profile weight of " FMT_BB " from " FMT_WT " to " FMT_WT "\n",
retBlock->bbNum, nextNextWeight, newNextNextWeight);
retBlock->setBBProfileWeight(newNextNextWeight);
}
else
{
JITDUMP("Not reducing profile weight of " FMT_BB " as its weight " FMT_WT
" is less than direct flow pred " FMT_BB " weight " FMT_WT "\n",
retBlock->bbNum, nextNextWeight, compCurBB->bbNum, blockWeight);
}
}
}
}
}
#if !FEATURE_TAILCALL_OPT_SHARED_RETURN
// We enable shared-ret tail call optimization for recursive calls even if
// FEATURE_TAILCALL_OPT_SHARED_RETURN is not defined.
if (gtIsRecursiveCall(call))
#endif
{
// Many tailcalls will have call and ret in the same block, and thus be
// BBJ_RETURN, but if the call falls through to a ret, and we are doing a
// tailcall, change it here.
compCurBB->bbJumpKind = BBJ_RETURN;
}
GenTree* stmtExpr = fgMorphStmt->GetRootNode();
#ifdef DEBUG
// Tail call needs to be in one of the following IR forms
// Either a call stmt or
// GT_RETURN(GT_CALL(..)) or GT_RETURN(GT_CAST(GT_CALL(..)))
// var = GT_CALL(..) or var = (GT_CAST(GT_CALL(..)))
// GT_COMMA(GT_CALL(..), GT_NOP) or GT_COMMA(GT_CAST(GT_CALL(..)), GT_NOP)
// In the above,
// GT_CASTS may be nested.
genTreeOps stmtOper = stmtExpr->gtOper;
if (stmtOper == GT_CALL)
{
assert(stmtExpr == call);
}
else
{
assert(stmtOper == GT_RETURN || stmtOper == GT_ASG || stmtOper == GT_COMMA);
GenTree* treeWithCall;
if (stmtOper == GT_RETURN)
{
treeWithCall = stmtExpr->gtGetOp1();
}
else if (stmtOper == GT_COMMA)
{
// Second operation must be nop.
assert(stmtExpr->gtGetOp2()->IsNothingNode());
treeWithCall = stmtExpr->gtGetOp1();
}
else
{
treeWithCall = stmtExpr->gtGetOp2();
}
// Peel off casts
while (treeWithCall->gtOper == GT_CAST)
{
assert(!treeWithCall->gtOverflow());
treeWithCall = treeWithCall->gtGetOp1();
}
assert(treeWithCall == call);
}
#endif
// Store the call type for later to introduce the correct placeholder.
var_types origCallType = call->TypeGet();
GenTree* result;
if (!canFastTailCall && !tailCallViaJitHelper)
{
// For tailcall via CORINFO_TAILCALL_HELPERS we transform into regular
// calls with (to the JIT) regular control flow so we do not need to do
// much special handling.
result = fgMorphTailCallViaHelpers(call, tailCallHelpers);
}
else
{
// Otherwise we will transform into something that does not return. For
// fast tailcalls a "jump" and for tailcall via JIT helper a call to a
// JIT helper that does not return. So peel off everything after the
// call.
Statement* nextMorphStmt = fgMorphStmt->GetNextStmt();
JITDUMP("Remove all stmts after the call.\n");
while (nextMorphStmt != nullptr)
{
Statement* stmtToRemove = nextMorphStmt;
nextMorphStmt = stmtToRemove->GetNextStmt();
fgRemoveStmt(compCurBB, stmtToRemove);
}
bool isRootReplaced = false;
GenTree* root = fgMorphStmt->GetRootNode();
if (root != call)
{
JITDUMP("Replace root node [%06d] with [%06d] tail call node.\n", dspTreeID(root), dspTreeID(call));
isRootReplaced = true;
fgMorphStmt->SetRootNode(call);
}
// Avoid potential extra work for the return (for example, vzeroupper)
call->gtType = TYP_VOID;
// The runtime requires that we perform a null check on the `this` argument before
// tail calling to a virtual dispatch stub. This requirement is a consequence of limitations
// in the runtime's ability to map an AV to a NullReferenceException if
// the AV occurs in a dispatch stub that has unmanaged caller.
if (call->IsVirtualStub())
{
call->gtFlags |= GTF_CALL_NULLCHECK;
}
// Do some target-specific transformations (before we process the args,
// etc.) for the JIT helper case.
if (tailCallViaJitHelper)
{
fgMorphTailCallViaJitHelper(call);
// Force re-evaluating the argInfo. fgMorphTailCallViaJitHelper will modify the
// argument list, invalidating the argInfo.
call->fgArgInfo = nullptr;
}
// Tail call via JIT helper: The VM can't use return address hijacking
// if we're not going to return and the helper doesn't have enough info
// to safely poll, so we poll before the tail call, if the block isn't
// already safe. Since tail call via helper is a slow mechanism it
// doen't matter whether we emit GC poll. his is done to be in parity
// with Jit64. Also this avoids GC info size increase if all most all
// methods are expected to be tail calls (e.g. F#).
//
// Note that we can avoid emitting GC-poll if we know that the current
// BB is dominated by a Gc-SafePoint block. But we don't have dominator
// info at this point. One option is to just add a place holder node for
// GC-poll (e.g. GT_GCPOLL) here and remove it in lowering if the block
// is dominated by a GC-SafePoint. For now it not clear whether
// optimizing slow tail calls is worth the effort. As a low cost check,
// we check whether the first and current basic blocks are
// GC-SafePoints.
//
// Fast Tail call as epilog+jmp - No need to insert GC-poll. Instead,
// fgSetBlockOrder() is going to mark the method as fully interruptible
// if the block containing this tail call is reachable without executing
// any call.
BasicBlock* curBlock = compCurBB;
if (canFastTailCall || (fgFirstBB->bbFlags & BBF_GC_SAFE_POINT) || (compCurBB->bbFlags & BBF_GC_SAFE_POINT) ||
(fgCreateGCPoll(GCPOLL_INLINE, compCurBB) == curBlock))
{
// We didn't insert a poll block, so we need to morph the call now
// (Normally it will get morphed when we get to the split poll block)
GenTree* temp = fgMorphCall(call);
noway_assert(temp == call);
}
// Fast tail call: in case of fast tail calls, we need a jmp epilog and
// hence mark it as BBJ_RETURN with BBF_JMP flag set.
noway_assert(compCurBB->bbJumpKind == BBJ_RETURN);
if (canFastTailCall)
{
compCurBB->bbFlags |= BBF_HAS_JMP;
}
else
{
// We call CORINFO_HELP_TAILCALL which does not return, so we will
// not need epilogue.
compCurBB->bbJumpKind = BBJ_THROW;
}
if (isRootReplaced)
{
// We have replaced the root node of this stmt and deleted the rest,
// but we still have the deleted, dead nodes on the `fgMorph*` stack
// if the root node was an `ASG`, `RET` or `CAST`.
// Return a zero con node to exit morphing of the old trees without asserts
// and forbid POST_ORDER morphing doing something wrong with our call.
var_types callType;
if (varTypeIsStruct(origCallType))
{
CORINFO_CLASS_HANDLE retClsHnd = call->gtRetClsHnd;
Compiler::structPassingKind howToReturnStruct;
callType = getReturnTypeForStruct(retClsHnd, call->GetUnmanagedCallConv(), &howToReturnStruct);
assert((howToReturnStruct != SPK_Unknown) && (howToReturnStruct != SPK_ByReference));
if (howToReturnStruct == SPK_ByValue)
{
callType = TYP_I_IMPL;
}
else if (howToReturnStruct == SPK_ByValueAsHfa || varTypeIsSIMD(callType))
{
callType = TYP_FLOAT;
}
assert((callType != TYP_UNKNOWN) && !varTypeIsStruct(callType));
}
else
{
callType = origCallType;
}
assert((callType != TYP_UNKNOWN) && !varTypeIsStruct(callType));
callType = genActualType(callType);
GenTree* zero = gtNewZeroConNode(callType);
result = fgMorphTree(zero);
}
else
{
result = call;
}
}
return result;
}
//------------------------------------------------------------------------
// fgMorphTailCallViaHelpers: Transform the given GT_CALL tree for tailcall code
// generation.
//
// Arguments:
// call - The call to transform
// helpers - The tailcall helpers provided by the runtime.
//
// Return Value:
// Returns the transformed node.
//
// Notes:
// This transforms
// GT_CALL
// {callTarget}
// {this}
// {args}
// into
// GT_COMMA
// GT_CALL StoreArgsStub
// {callTarget} (depending on flags provided by the runtime)
// {this} (as a regular arg)
// {args}
// GT_COMMA
// GT_CALL Dispatcher
// GT_ADDR ReturnAddress
// {CallTargetStub}
// GT_ADDR ReturnValue
// GT_LCL ReturnValue
// whenever the call node returns a value. If the call node does not return a
// value the last comma will not be there.
//
GenTree* Compiler::fgMorphTailCallViaHelpers(GenTreeCall* call, CORINFO_TAILCALL_HELPERS& help)
{
// R2R requires different handling but we don't support tailcall via
// helpers in R2R yet, so just leave it for now.
// TODO: R2R: TailCallViaHelper
assert(!opts.IsReadyToRun());
JITDUMP("fgMorphTailCallViaHelpers (before):\n");
DISPTREE(call);
// Don't support tail calling helper methods
assert(call->gtCallType != CT_HELPER);
// We come this route only for tail prefixed calls that cannot be dispatched as
// fast tail calls
assert(!call->IsImplicitTailCall());
// We want to use the following assert, but it can modify the IR in some cases, so we
// can't do that in an assert.
// assert(!fgCanFastTailCall(call, nullptr));
// We might or might not have called fgInitArgInfo before this point: in
// builds with FEATURE_FASTTAILCALL we will have called it when checking if
// we could do a fast tailcall, so it is possible we have added extra IR
// for non-standard args that we must get rid of. Get rid of that IR here
// and do this first as it will 'expose' the retbuf as the first arg, which
// we rely upon in fgCreateCallDispatcherAndGetResult.
call->ResetArgInfo();
GenTree* callDispatcherAndGetResult = fgCreateCallDispatcherAndGetResult(call, help.hCallTarget, help.hDispatcher);
// Change the call to a call to the StoreArgs stub.
if (call->HasRetBufArg())
{
JITDUMP("Removing retbuf");
call->gtCallArgs = call->gtCallArgs->GetNext();
call->gtCallMoreFlags &= ~GTF_CALL_M_RETBUFFARG;
}
const bool stubNeedsTargetFnPtr = (help.flags & CORINFO_TAILCALL_STORE_TARGET) != 0;
GenTree* doBeforeStoreArgsStub = nullptr;
GenTree* thisPtrStubArg = nullptr;
// Put 'this' in normal param list
if (call->gtCallThisArg != nullptr)
{
JITDUMP("Moving this pointer into arg list\n");
GenTree* objp = call->gtCallThisArg->GetNode();
GenTree* thisPtr = nullptr;
call->gtCallThisArg = nullptr;
// JIT will need one or two copies of "this" in the following cases:
// 1) the call needs null check;
// 2) StoreArgs stub needs the target function pointer address and if the call is virtual
// the stub also needs "this" in order to evalute the target.
const bool callNeedsNullCheck = call->NeedsNullCheck();
const bool stubNeedsThisPtr = stubNeedsTargetFnPtr && call->IsVirtual();
// TODO-Review: The following transformation is implemented under assumption that
// both conditions can be true. However, I could not construct such example
// where a virtual tail call would require null check. In case, if the conditions
// are mutually exclusive the following could be simplified.
if (callNeedsNullCheck || stubNeedsThisPtr)
{
// Clone "this" if "this" has no side effects.
if ((objp->gtFlags & GTF_SIDE_EFFECT) == 0)
{
thisPtr = gtClone(objp, true);
}
// Create a temp and spill "this" to the temp if "this" has side effects or "this" was too complex to clone.
if (thisPtr == nullptr)
{
const unsigned lclNum = lvaGrabTemp(true DEBUGARG("tail call thisptr"));
// tmp = "this"
doBeforeStoreArgsStub = gtNewTempAssign(lclNum, objp);
if (callNeedsNullCheck)
{
// COMMA(tmp = "this", deref(tmp))
GenTree* tmp = gtNewLclvNode(lclNum, objp->TypeGet());
GenTree* nullcheck = gtNewNullCheck(tmp, compCurBB);
doBeforeStoreArgsStub = gtNewOperNode(GT_COMMA, TYP_VOID, doBeforeStoreArgsStub, nullcheck);
}
thisPtr = gtNewLclvNode(lclNum, objp->TypeGet());
if (stubNeedsThisPtr)
{
thisPtrStubArg = gtNewLclvNode(lclNum, objp->TypeGet());
}
}
else
{
if (callNeedsNullCheck)
{
// deref("this")
doBeforeStoreArgsStub = gtNewNullCheck(objp, compCurBB);
if (stubNeedsThisPtr)
{
thisPtrStubArg = gtClone(objp, true);
}
}
else
{
assert(stubNeedsThisPtr);
thisPtrStubArg = objp;
}
}
call->gtFlags &= ~GTF_CALL_NULLCHECK;
assert((thisPtrStubArg != nullptr) == stubNeedsThisPtr);
}
else
{
thisPtr = objp;
}
// During rationalization tmp="this" and null check will be materialized
// in the right execution order.
assert(thisPtr != nullptr);
call->gtCallArgs = gtPrependNewCallArg(thisPtr, call->gtCallArgs);
}
// We may need to pass the target, for instance for calli or generic methods
// where we pass instantiating stub.
if (stubNeedsTargetFnPtr)
{
JITDUMP("Adding target since VM requested it\n");
GenTree* target;
if (!call->IsVirtual())
{
if (call->gtCallType == CT_INDIRECT)
{
noway_assert(call->gtCallAddr != nullptr);
target = call->gtCallAddr;
}
else
{
CORINFO_CONST_LOOKUP addrInfo;
info.compCompHnd->getFunctionEntryPoint(call->gtCallMethHnd, &addrInfo);
CORINFO_GENERIC_HANDLE handle = nullptr;
void* pIndirection = nullptr;
assert(addrInfo.accessType != IAT_PPVALUE && addrInfo.accessType != IAT_RELPVALUE);
if (addrInfo.accessType == IAT_VALUE)
{
handle = addrInfo.handle;
}
else if (addrInfo.accessType == IAT_PVALUE)
{
pIndirection = addrInfo.addr;
}
target = gtNewIconEmbHndNode(handle, pIndirection, GTF_ICON_FTN_ADDR, call->gtCallMethHnd);
}
}
else
{
assert(!call->tailCallInfo->GetSig()->hasTypeArg());
CORINFO_CALL_INFO callInfo;
unsigned flags = CORINFO_CALLINFO_LDFTN;
if (call->tailCallInfo->IsCallvirt())
{
flags |= CORINFO_CALLINFO_CALLVIRT;
}
eeGetCallInfo(call->tailCallInfo->GetToken(), nullptr, (CORINFO_CALLINFO_FLAGS)flags, &callInfo);
target = getVirtMethodPointerTree(thisPtrStubArg, call->tailCallInfo->GetToken(), &callInfo);
}
// Insert target as last arg
GenTreeCall::Use** newArgSlot = &call->gtCallArgs;
while (*newArgSlot != nullptr)
{
newArgSlot = &(*newArgSlot)->NextRef();
}
*newArgSlot = gtNewCallArgs(target);
}
// This is now a direct call to the store args stub and not a tailcall.
call->gtCallType = CT_USER_FUNC;
call->gtCallMethHnd = help.hStoreArgs;
call->gtFlags &= ~GTF_CALL_VIRT_KIND_MASK;
call->gtCallMoreFlags &= ~(GTF_CALL_M_TAILCALL | GTF_CALL_M_DELEGATE_INV | GTF_CALL_M_WRAPPER_DELEGATE_INV);
// The store-args stub returns no value.
call->gtRetClsHnd = nullptr;
call->gtType = TYP_VOID;
call->gtReturnType = TYP_VOID;
GenTree* callStoreArgsStub = call;
if (doBeforeStoreArgsStub != nullptr)
{
callStoreArgsStub = gtNewOperNode(GT_COMMA, TYP_VOID, doBeforeStoreArgsStub, callStoreArgsStub);
}
GenTree* finalTree =
gtNewOperNode(GT_COMMA, callDispatcherAndGetResult->TypeGet(), callStoreArgsStub, callDispatcherAndGetResult);
finalTree = fgMorphTree(finalTree);
JITDUMP("fgMorphTailCallViaHelpers (after):\n");
DISPTREE(finalTree);
return finalTree;
}
//------------------------------------------------------------------------
// fgCreateCallDispatcherAndGetResult: Given a call
// CALL
// {callTarget}
// {retbuf}
// {this}
// {args}
// create a similarly typed node that calls the tailcall dispatcher and returns
// the result, as in the following:
// COMMA
// CALL TailCallDispatcher
// ADDR ReturnAddress
// &CallTargetFunc
// ADDR RetValue
// RetValue
// If the call has type TYP_VOID, only create the CALL node.
//
// Arguments:
// origCall - the call
// callTargetStubHnd - the handle of the CallTarget function (this is a special
// IL stub created by the runtime)
// dispatcherHnd - the handle of the tailcall dispatcher function
//
// Return Value:
// A node that can be used in place of the original call.
//
GenTree* Compiler::fgCreateCallDispatcherAndGetResult(GenTreeCall* origCall,
CORINFO_METHOD_HANDLE callTargetStubHnd,
CORINFO_METHOD_HANDLE dispatcherHnd)
{
GenTreeCall* callDispatcherNode =
gtNewCallNode(CT_USER_FUNC, dispatcherHnd, TYP_VOID, nullptr, fgMorphStmt->GetDebugInfo());
// The dispatcher has signature
// void DispatchTailCalls(void* callersRetAddrSlot, void* callTarget, void* retValue)
// Add return value arg.
GenTree* retValArg;
GenTree* retVal = nullptr;
unsigned int newRetLcl = BAD_VAR_NUM;
GenTree* copyToRetBufNode = nullptr;
if (origCall->HasRetBufArg())
{
JITDUMP("Transferring retbuf\n");
GenTree* retBufArg = origCall->gtCallArgs->GetNode();
assert(info.compRetBuffArg != BAD_VAR_NUM);
assert(retBufArg->OperIsLocal());
assert(retBufArg->AsLclVarCommon()->GetLclNum() == info.compRetBuffArg);
// Caller return buffer argument retBufArg can point to GC heap while the dispatcher expects
// the return value argument retValArg to point to the stack.
// We use a temporary stack allocated return buffer to hold the value during the dispatcher call
// and copy the value back to the caller return buffer after that.
unsigned int tmpRetBufNum = lvaGrabTemp(true DEBUGARG("substitute local for return buffer"));
constexpr bool unsafeValueClsCheck = false;
lvaSetStruct(tmpRetBufNum, origCall->gtRetClsHnd, unsafeValueClsCheck);
lvaSetVarAddrExposed(tmpRetBufNum DEBUGARG(AddressExposedReason::DISPATCH_RET_BUF));
var_types tmpRetBufType = lvaGetDesc(tmpRetBufNum)->TypeGet();
retValArg = gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(tmpRetBufNum, tmpRetBufType));
var_types callerRetBufType = lvaGetDesc(info.compRetBuffArg)->TypeGet();
GenTree* dstAddr = gtNewLclvNode(info.compRetBuffArg, callerRetBufType);
GenTree* dst = gtNewObjNode(info.compMethodInfo->args.retTypeClass, dstAddr);
GenTree* src = gtNewLclvNode(tmpRetBufNum, tmpRetBufType);
constexpr bool isVolatile = false;
constexpr bool isCopyBlock = true;
copyToRetBufNode = gtNewBlkOpNode(dst, src, isVolatile, isCopyBlock);
if (origCall->gtType != TYP_VOID)
{
retVal = gtClone(retBufArg);
}
}
else if (origCall->gtType != TYP_VOID)
{
JITDUMP("Creating a new temp for the return value\n");
newRetLcl = lvaGrabTemp(false DEBUGARG("Return value for tail call dispatcher"));
if (varTypeIsStruct(origCall->gtType))
{
lvaSetStruct(newRetLcl, origCall->gtRetClsHnd, false);
}
else
{
// Since we pass a reference to the return value to the dispatcher
// we need to use the real return type so we can normalize it on
// load when we return it.
lvaTable[newRetLcl].lvType = (var_types)origCall->gtReturnType;
}
lvaSetVarAddrExposed(newRetLcl DEBUGARG(AddressExposedReason::DISPATCH_RET_BUF));
retValArg =
gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(newRetLcl, genActualType(lvaTable[newRetLcl].lvType)));
retVal = gtNewLclvNode(newRetLcl, genActualType(lvaTable[newRetLcl].lvType));
if (varTypeIsStruct(origCall->gtType))
{
retVal = impFixupStructReturnType(retVal, origCall->gtRetClsHnd, origCall->GetUnmanagedCallConv());
}
}
else
{
JITDUMP("No return value so using null pointer as arg\n");
retValArg = gtNewZeroConNode(TYP_I_IMPL);
}
callDispatcherNode->gtCallArgs = gtPrependNewCallArg(retValArg, callDispatcherNode->gtCallArgs);
// Add callTarget
callDispatcherNode->gtCallArgs =
gtPrependNewCallArg(new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, callTargetStubHnd),
callDispatcherNode->gtCallArgs);
// Add the caller's return address slot.
if (lvaRetAddrVar == BAD_VAR_NUM)
{
lvaRetAddrVar = lvaGrabTemp(false DEBUGARG("Return address"));
lvaTable[lvaRetAddrVar].lvType = TYP_I_IMPL;
lvaSetVarAddrExposed(lvaRetAddrVar DEBUGARG(AddressExposedReason::DISPATCH_RET_BUF));
}
GenTree* retAddrSlot = gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaRetAddrVar, TYP_I_IMPL));
callDispatcherNode->gtCallArgs = gtPrependNewCallArg(retAddrSlot, callDispatcherNode->gtCallArgs);
GenTree* finalTree = callDispatcherNode;
if (copyToRetBufNode != nullptr)
{
finalTree = gtNewOperNode(GT_COMMA, TYP_VOID, callDispatcherNode, copyToRetBufNode);
}
if (origCall->gtType == TYP_VOID)
{
return finalTree;
}
assert(retVal != nullptr);
finalTree = gtNewOperNode(GT_COMMA, origCall->TypeGet(), finalTree, retVal);
// The JIT seems to want to CSE this comma and messes up multi-reg ret
// values in the process. Just avoid CSE'ing this tree entirely in that
// case.
if (origCall->HasMultiRegRetVal())
{
finalTree->gtFlags |= GTF_DONT_CSE;
}
return finalTree;
}
//------------------------------------------------------------------------
// getLookupTree: get a lookup tree
//
// Arguments:
// pResolvedToken - resolved token of the call
// pLookup - the lookup to get the tree for
// handleFlags - flags to set on the result node
// compileTimeHandle - compile-time handle corresponding to the lookup
//
// Return Value:
// A node representing the lookup tree
//
GenTree* Compiler::getLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_LOOKUP* pLookup,
GenTreeFlags handleFlags,
void* compileTimeHandle)
{
if (!pLookup->lookupKind.needsRuntimeLookup)
{
// No runtime lookup is required.
// Access is direct or memory-indirect (of a fixed address) reference
CORINFO_GENERIC_HANDLE handle = nullptr;
void* pIndirection = nullptr;
assert(pLookup->constLookup.accessType != IAT_PPVALUE && pLookup->constLookup.accessType != IAT_RELPVALUE);
if (pLookup->constLookup.accessType == IAT_VALUE)
{
handle = pLookup->constLookup.handle;
}
else if (pLookup->constLookup.accessType == IAT_PVALUE)
{
pIndirection = pLookup->constLookup.addr;
}
return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle);
}
return getRuntimeLookupTree(pResolvedToken, pLookup, compileTimeHandle);
}
//------------------------------------------------------------------------
// getRuntimeLookupTree: get a tree for a runtime lookup
//
// Arguments:
// pResolvedToken - resolved token of the call
// pLookup - the lookup to get the tree for
// compileTimeHandle - compile-time handle corresponding to the lookup
//
// Return Value:
// A node representing the runtime lookup tree
//
GenTree* Compiler::getRuntimeLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_LOOKUP* pLookup,
void* compileTimeHandle)
{
assert(!compIsForInlining());
CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup;
// If pRuntimeLookup->indirections is equal to CORINFO_USEHELPER, it specifies that a run-time helper should be
// used; otherwise, it specifies the number of indirections via pRuntimeLookup->offsets array.
if ((pRuntimeLookup->indirections == CORINFO_USEHELPER) || pRuntimeLookup->testForNull ||
pRuntimeLookup->testForFixup)
{
// If the first condition is true, runtime lookup tree is available only via the run-time helper function.
// TODO-CQ If the second or third condition is true, we are always using the slow path since we can't
// introduce control flow at this point. See impRuntimeLookupToTree for the logic to avoid calling the helper.
// The long-term solution is to introduce a new node representing a runtime lookup, create instances
// of that node both in the importer and here, and expand the node in lower (introducing control flow if
// necessary).
return gtNewRuntimeLookupHelperCallNode(pRuntimeLookup,
getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind),
compileTimeHandle);
}
GenTree* result = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind);
ArrayStack<GenTree*> stmts(getAllocator(CMK_ArrayStack));
auto cloneTree = [&](GenTree** tree DEBUGARG(const char* reason)) -> GenTree* {
if (!((*tree)->gtFlags & GTF_GLOB_EFFECT))
{
GenTree* clone = gtClone(*tree, true);
if (clone)
{
return clone;
}
}
unsigned temp = lvaGrabTemp(true DEBUGARG(reason));
stmts.Push(gtNewTempAssign(temp, *tree));
*tree = gtNewLclvNode(temp, lvaGetActualType(temp));
return gtNewLclvNode(temp, lvaGetActualType(temp));
};
// Apply repeated indirections
for (WORD i = 0; i < pRuntimeLookup->indirections; i++)
{
GenTree* preInd = nullptr;
if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
{
preInd = cloneTree(&result DEBUGARG("getRuntimeLookupTree indirectOffset"));
}
if (i != 0)
{
result = gtNewOperNode(GT_IND, TYP_I_IMPL, result);
result->gtFlags |= GTF_IND_NONFAULTING;
result->gtFlags |= GTF_IND_INVARIANT;
}
if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
{
result = gtNewOperNode(GT_ADD, TYP_I_IMPL, preInd, result);
}
if (pRuntimeLookup->offsets[i] != 0)
{
result = gtNewOperNode(GT_ADD, TYP_I_IMPL, result, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL));
}
}
assert(!pRuntimeLookup->testForNull);
if (pRuntimeLookup->indirections > 0)
{
assert(!pRuntimeLookup->testForFixup);
result = gtNewOperNode(GT_IND, TYP_I_IMPL, result);
result->gtFlags |= GTF_IND_NONFAULTING;
}
// Produces GT_COMMA(stmt1, GT_COMMA(stmt2, ... GT_COMMA(stmtN, result)))
while (!stmts.Empty())
{
result = gtNewOperNode(GT_COMMA, TYP_I_IMPL, stmts.Pop(), result);
}
DISPTREE(result);
return result;
}
//------------------------------------------------------------------------
// getVirtMethodPointerTree: get a tree for a virtual method pointer
//
// Arguments:
// thisPtr - tree representing `this` pointer
// pResolvedToken - pointer to the resolved token of the method
// pCallInfo - pointer to call info
//
// Return Value:
// A node representing the virtual method pointer
GenTree* Compiler::getVirtMethodPointerTree(GenTree* thisPtr,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_CALL_INFO* pCallInfo)
{
GenTree* exactTypeDesc = getTokenHandleTree(pResolvedToken, true);
GenTree* exactMethodDesc = getTokenHandleTree(pResolvedToken, false);
GenTreeCall::Use* helpArgs = gtNewCallArgs(thisPtr, exactTypeDesc, exactMethodDesc);
return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, helpArgs);
}
//------------------------------------------------------------------------
// getTokenHandleTree: get a handle tree for a token
//
// Arguments:
// pResolvedToken - token to get a handle for
// parent - whether parent should be imported
//
// Return Value:
// A node representing the virtual method pointer
GenTree* Compiler::getTokenHandleTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool parent)
{
CORINFO_GENERICHANDLE_RESULT embedInfo;
info.compCompHnd->embedGenericHandle(pResolvedToken, parent, &embedInfo);
GenTree* result = getLookupTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token),
embedInfo.compileTimeHandle);
// If we have a result and it requires runtime lookup, wrap it in a runtime lookup node.
if ((result != nullptr) && embedInfo.lookup.lookupKind.needsRuntimeLookup)
{
result = gtNewRuntimeLookup(embedInfo.compileTimeHandle, embedInfo.handleType, result);
}
return result;
}
/*****************************************************************************
*
* Transform the given GT_CALL tree for tail call via JIT helper.
*/
void Compiler::fgMorphTailCallViaJitHelper(GenTreeCall* call)
{
JITDUMP("fgMorphTailCallViaJitHelper (before):\n");
DISPTREE(call);
// For the helper-assisted tail calls, we need to push all the arguments
// into a single list, and then add a few extra at the beginning or end.
//
// For x86, the tailcall helper is defined as:
//
// JIT_TailCall(<function args>, int numberOfOldStackArgsWords, int numberOfNewStackArgsWords, int flags, void*
// callTarget)
//
// Note that the special arguments are on the stack, whereas the function arguments follow
// the normal convention: there might be register arguments in ECX and EDX. The stack will
// look like (highest address at the top):
// first normal stack argument
// ...
// last normal stack argument
// numberOfOldStackArgs
// numberOfNewStackArgs
// flags
// callTarget
//
// Each special arg is 4 bytes.
//
// 'flags' is a bitmask where:
// 1 == restore callee-save registers (EDI,ESI,EBX). The JIT always saves all
// callee-saved registers for tailcall functions. Note that the helper assumes
// that the callee-saved registers live immediately below EBP, and must have been
// pushed in this order: EDI, ESI, EBX.
// 2 == call target is a virtual stub dispatch.
//
// The x86 tail call helper lives in VM\i386\jithelp.asm. See that function for more details
// on the custom calling convention.
// Check for PInvoke call types that we don't handle in codegen yet.
assert(!call->IsUnmanaged());
assert(call->IsVirtual() || (call->gtCallType != CT_INDIRECT) || (call->gtCallCookie == nullptr));
// Don't support tail calling helper methods
assert(call->gtCallType != CT_HELPER);
// We come this route only for tail prefixed calls that cannot be dispatched as
// fast tail calls
assert(!call->IsImplicitTailCall());
// We want to use the following assert, but it can modify the IR in some cases, so we
// can't do that in an assert.
// assert(!fgCanFastTailCall(call, nullptr));
// First move the 'this' pointer (if any) onto the regular arg list. We do this because
// we are going to prepend special arguments onto the argument list (for non-x86 platforms),
// and thus shift where the 'this' pointer will be passed to a later argument slot. In
// addition, for all platforms, we are going to change the call into a helper call. Our code
// generation code for handling calls to helpers does not handle 'this' pointers. So, when we
// do this transformation, we must explicitly create a null 'this' pointer check, if required,
// since special 'this' pointer handling will no longer kick in.
//
// Some call types, such as virtual vtable calls, require creating a call address expression
// that involves the "this" pointer. Lowering will sometimes create an embedded statement
// to create a temporary that is assigned to the "this" pointer expression, and then use
// that temp to create the call address expression. This temp creation embedded statement
// will occur immediately before the "this" pointer argument, and then will be used for both
// the "this" pointer argument as well as the call address expression. In the normal ordering,
// the embedded statement establishing the "this" pointer temp will execute before both uses
// of the temp. However, for tail calls via a helper, we move the "this" pointer onto the
// normal call argument list, and insert a placeholder which will hold the call address
// expression. For non-x86, things are ok, because the order of execution of these is not
// altered. However, for x86, the call address expression is inserted as the *last* argument
// in the argument list, *after* the "this" pointer. It will be put on the stack, and be
// evaluated first. To ensure we don't end up with out-of-order temp definition and use,
// for those cases where call lowering creates an embedded form temp of "this", we will
// create a temp here, early, that will later get morphed correctly.
if (call->gtCallThisArg != nullptr)
{
GenTree* thisPtr = nullptr;
GenTree* objp = call->gtCallThisArg->GetNode();
call->gtCallThisArg = nullptr;
if ((call->IsDelegateInvoke() || call->IsVirtualVtable()) && !objp->OperIs(GT_LCL_VAR))
{
// tmp = "this"
unsigned lclNum = lvaGrabTemp(true DEBUGARG("tail call thisptr"));
GenTree* asg = gtNewTempAssign(lclNum, objp);
// COMMA(tmp = "this", tmp)
var_types vt = objp->TypeGet();
GenTree* tmp = gtNewLclvNode(lclNum, vt);
thisPtr = gtNewOperNode(GT_COMMA, vt, asg, tmp);
objp = thisPtr;
}
if (call->NeedsNullCheck())
{
// clone "this" if "this" has no side effects.
if ((thisPtr == nullptr) && !(objp->gtFlags & GTF_SIDE_EFFECT))
{
thisPtr = gtClone(objp, true);
}
var_types vt = objp->TypeGet();
if (thisPtr == nullptr)
{
// create a temp if either "this" has side effects or "this" is too complex to clone.
// tmp = "this"
unsigned lclNum = lvaGrabTemp(true DEBUGARG("tail call thisptr"));
GenTree* asg = gtNewTempAssign(lclNum, objp);
// COMMA(tmp = "this", deref(tmp))
GenTree* tmp = gtNewLclvNode(lclNum, vt);
GenTree* nullcheck = gtNewNullCheck(tmp, compCurBB);
asg = gtNewOperNode(GT_COMMA, TYP_VOID, asg, nullcheck);
// COMMA(COMMA(tmp = "this", deref(tmp)), tmp)
thisPtr = gtNewOperNode(GT_COMMA, vt, asg, gtNewLclvNode(lclNum, vt));
}
else
{
// thisPtr = COMMA(deref("this"), "this")
GenTree* nullcheck = gtNewNullCheck(thisPtr, compCurBB);
thisPtr = gtNewOperNode(GT_COMMA, vt, nullcheck, gtClone(objp, true));
}
call->gtFlags &= ~GTF_CALL_NULLCHECK;
}
else
{
thisPtr = objp;
}
// TODO-Cleanup: we leave it as a virtual stub call to
// use logic in `LowerVirtualStubCall`, clear GTF_CALL_VIRT_KIND_MASK here
// and change `LowerCall` to recognize it as a direct call.
// During rationalization tmp="this" and null check will
// materialize as embedded stmts in right execution order.
assert(thisPtr != nullptr);
call->gtCallArgs = gtPrependNewCallArg(thisPtr, call->gtCallArgs);
}
// Find the end of the argument list. ppArg will point at the last pointer; setting *ppArg will
// append to the list.
GenTreeCall::Use** ppArg = &call->gtCallArgs;
for (GenTreeCall::Use& use : call->Args())
{
ppArg = &use.NextRef();
}
assert(ppArg != nullptr);
assert(*ppArg == nullptr);
unsigned nOldStkArgsWords =
(compArgSize - (codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES)) / REGSIZE_BYTES;
GenTree* arg3 = gtNewIconNode((ssize_t)nOldStkArgsWords, TYP_I_IMPL);
*ppArg = gtNewCallArgs(arg3); // numberOfOldStackArgs
ppArg = &((*ppArg)->NextRef());
// Inject a placeholder for the count of outgoing stack arguments that the Lowering phase will generate.
// The constant will be replaced.
GenTree* arg2 = gtNewIconNode(9, TYP_I_IMPL);
*ppArg = gtNewCallArgs(arg2); // numberOfNewStackArgs
ppArg = &((*ppArg)->NextRef());
// Inject a placeholder for the flags.
// The constant will be replaced.
GenTree* arg1 = gtNewIconNode(8, TYP_I_IMPL);
*ppArg = gtNewCallArgs(arg1);
ppArg = &((*ppArg)->NextRef());
// Inject a placeholder for the real call target that the Lowering phase will generate.
// The constant will be replaced.
GenTree* arg0 = gtNewIconNode(7, TYP_I_IMPL);
*ppArg = gtNewCallArgs(arg0);
// It is now a varargs tail call.
call->gtCallMoreFlags |= GTF_CALL_M_VARARGS;
call->gtFlags &= ~GTF_CALL_POP_ARGS;
// The function is responsible for doing explicit null check when it is necessary.
assert(!call->NeedsNullCheck());
JITDUMP("fgMorphTailCallViaJitHelper (after):\n");
DISPTREE(call);
}
//------------------------------------------------------------------------
// fgGetStubAddrArg: Return the virtual stub address for the given call.
//
// Notes:
// the JIT must place the address of the stub used to load the call target,
// the "stub indirection cell", in special call argument with special register.
//
// Arguments:
// call - a call that needs virtual stub dispatching.
//
// Return Value:
// addr tree with set resister requirements.
//
GenTree* Compiler::fgGetStubAddrArg(GenTreeCall* call)
{
assert(call->IsVirtualStub());
GenTree* stubAddrArg;
if (call->gtCallType == CT_INDIRECT)
{
stubAddrArg = gtClone(call->gtCallAddr, true);
}
else
{
assert(call->gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT);
ssize_t addr = ssize_t(call->gtStubCallStubAddr);
stubAddrArg = gtNewIconHandleNode(addr, GTF_ICON_FTN_ADDR);
#ifdef DEBUG
stubAddrArg->AsIntCon()->gtTargetHandle = (size_t)call->gtCallMethHnd;
#endif
}
assert(stubAddrArg != nullptr);
stubAddrArg->SetRegNum(virtualStubParamInfo->GetReg());
return stubAddrArg;
}
//------------------------------------------------------------------------------
// fgGetArgTabEntryParameterLclNum : Get the lcl num for the parameter that
// corresponds to the argument to a recursive call.
//
// Notes:
// Due to non-standard args this is not just fgArgTabEntry::argNum.
// For example, in R2R compilations we will have added a non-standard
// arg for the R2R indirection cell.
//
// Arguments:
// argTabEntry - the arg
//
unsigned Compiler::fgGetArgTabEntryParameterLclNum(GenTreeCall* call, fgArgTabEntry* argTabEntry)
{
fgArgInfo* argInfo = call->fgArgInfo;
unsigned argCount = argInfo->ArgCount();
fgArgTabEntry** argTable = argInfo->ArgTable();
unsigned numToRemove = 0;
for (unsigned i = 0; i < argCount; i++)
{
fgArgTabEntry* arg = argTable[i];
// Late added args add extra args that do not map to IL parameters and that we should not reassign.
if (!arg->isNonStandard() || !arg->isNonStandardArgAddedLate())
continue;
if (arg->argNum < argTabEntry->argNum)
numToRemove++;
}
return argTabEntry->argNum - numToRemove;
}
//------------------------------------------------------------------------------
// fgMorphRecursiveFastTailCallIntoLoop : Transform a recursive fast tail call into a loop.
//
//
// Arguments:
// block - basic block ending with a recursive fast tail call
// recursiveTailCall - recursive tail call to transform
//
// Notes:
// The legality of the transformation is ensured by the checks in endsWithTailCallConvertibleToLoop.
void Compiler::fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCall* recursiveTailCall)
{
assert(recursiveTailCall->IsTailCallConvertibleToLoop());
Statement* lastStmt = block->lastStmt();
assert(recursiveTailCall == lastStmt->GetRootNode());
// Transform recursive tail call into a loop.
Statement* earlyArgInsertionPoint = lastStmt;
const DebugInfo& callDI = lastStmt->GetDebugInfo();
// Hoist arg setup statement for the 'this' argument.
GenTreeCall::Use* thisArg = recursiveTailCall->gtCallThisArg;
if ((thisArg != nullptr) && !thisArg->GetNode()->IsNothingNode() && !thisArg->GetNode()->IsArgPlaceHolderNode())
{
Statement* thisArgStmt = gtNewStmt(thisArg->GetNode(), callDI);
fgInsertStmtBefore(block, earlyArgInsertionPoint, thisArgStmt);
}
// All arguments whose trees may involve caller parameter local variables need to be assigned to temps first;
// then the temps need to be assigned to the method parameters. This is done so that the caller
// parameters are not re-assigned before call arguments depending on them are evaluated.
// tmpAssignmentInsertionPoint and paramAssignmentInsertionPoint keep track of
// where the next temp or parameter assignment should be inserted.
// In the example below the first call argument (arg1 - 1) needs to be assigned to a temp first
// while the second call argument (const 1) doesn't.
// Basic block before tail recursion elimination:
// ***** BB04, stmt 1 (top level)
// [000037] ------------ * stmtExpr void (top level) (IL 0x00A...0x013)
// [000033] --C - G------ - \--* call void RecursiveMethod
// [000030] ------------ | / --* const int - 1
// [000031] ------------arg0 in rcx + --* +int
// [000029] ------------ | \--* lclVar int V00 arg1
// [000032] ------------arg1 in rdx \--* const int 1
//
//
// Basic block after tail recursion elimination :
// ***** BB04, stmt 1 (top level)
// [000051] ------------ * stmtExpr void (top level) (IL 0x00A... ? ? ? )
// [000030] ------------ | / --* const int - 1
// [000031] ------------ | / --* +int
// [000029] ------------ | | \--* lclVar int V00 arg1
// [000050] - A---------- \--* = int
// [000049] D------N---- \--* lclVar int V02 tmp0
//
// ***** BB04, stmt 2 (top level)
// [000055] ------------ * stmtExpr void (top level) (IL 0x00A... ? ? ? )
// [000052] ------------ | / --* lclVar int V02 tmp0
// [000054] - A---------- \--* = int
// [000053] D------N---- \--* lclVar int V00 arg0
// ***** BB04, stmt 3 (top level)
// [000058] ------------ * stmtExpr void (top level) (IL 0x00A... ? ? ? )
// [000032] ------------ | / --* const int 1
// [000057] - A---------- \--* = int
// [000056] D------N---- \--* lclVar int V01 arg1
Statement* tmpAssignmentInsertionPoint = lastStmt;
Statement* paramAssignmentInsertionPoint = lastStmt;
// Process early args. They may contain both setup statements for late args and actual args.
// Early args don't include 'this' arg. We need to account for that so that the call to gtArgEntryByArgNum
// below has the correct second argument.
int earlyArgIndex = (thisArg == nullptr) ? 0 : 1;
for (GenTreeCall::Use& use : recursiveTailCall->Args())
{
GenTree* earlyArg = use.GetNode();
if (!earlyArg->IsNothingNode() && !earlyArg->IsArgPlaceHolderNode())
{
if ((earlyArg->gtFlags & GTF_LATE_ARG) != 0)
{
// This is a setup node so we need to hoist it.
Statement* earlyArgStmt = gtNewStmt(earlyArg, callDI);
fgInsertStmtBefore(block, earlyArgInsertionPoint, earlyArgStmt);
}
else
{
// This is an actual argument that needs to be assigned to the corresponding caller parameter.
fgArgTabEntry* curArgTabEntry = gtArgEntryByArgNum(recursiveTailCall, earlyArgIndex);
// Late-added non-standard args are extra args that are not passed as locals, so skip those
if (!curArgTabEntry->isNonStandard() || !curArgTabEntry->isNonStandardArgAddedLate())
{
Statement* paramAssignStmt =
fgAssignRecursiveCallArgToCallerParam(earlyArg, curArgTabEntry,
fgGetArgTabEntryParameterLclNum(recursiveTailCall,
curArgTabEntry),
block, callDI, tmpAssignmentInsertionPoint,
paramAssignmentInsertionPoint);
if ((tmpAssignmentInsertionPoint == lastStmt) && (paramAssignStmt != nullptr))
{
// All temp assignments will happen before the first param assignment.
tmpAssignmentInsertionPoint = paramAssignStmt;
}
}
}
}
earlyArgIndex++;
}
// Process late args.
int lateArgIndex = 0;
for (GenTreeCall::Use& use : recursiveTailCall->LateArgs())
{
// A late argument is an actual argument that needs to be assigned to the corresponding caller's parameter.
GenTree* lateArg = use.GetNode();
fgArgTabEntry* curArgTabEntry = gtArgEntryByLateArgIndex(recursiveTailCall, lateArgIndex);
// Late-added non-standard args are extra args that are not passed as locals, so skip those
if (!curArgTabEntry->isNonStandard() || !curArgTabEntry->isNonStandardArgAddedLate())
{
Statement* paramAssignStmt =
fgAssignRecursiveCallArgToCallerParam(lateArg, curArgTabEntry,
fgGetArgTabEntryParameterLclNum(recursiveTailCall,
curArgTabEntry),
block, callDI, tmpAssignmentInsertionPoint,
paramAssignmentInsertionPoint);
if ((tmpAssignmentInsertionPoint == lastStmt) && (paramAssignStmt != nullptr))
{
// All temp assignments will happen before the first param assignment.
tmpAssignmentInsertionPoint = paramAssignStmt;
}
}
lateArgIndex++;
}
// If the method has starg.s 0 or ldarga.s 0 a special local (lvaArg0Var) is created so that
// compThisArg stays immutable. Normally it's assigned in fgFirstBBScratch block. Since that
// block won't be in the loop (it's assumed to have no predecessors), we need to update the special local here.
if (!info.compIsStatic && (lvaArg0Var != info.compThisArg))
{
var_types thisType = lvaTable[info.compThisArg].TypeGet();
GenTree* arg0 = gtNewLclvNode(lvaArg0Var, thisType);
GenTree* arg0Assignment = gtNewAssignNode(arg0, gtNewLclvNode(info.compThisArg, thisType));
Statement* arg0AssignmentStmt = gtNewStmt(arg0Assignment, callDI);
fgInsertStmtBefore(block, paramAssignmentInsertionPoint, arg0AssignmentStmt);
}
// If compInitMem is set, we may need to zero-initialize some locals. Normally it's done in the prolog
// but this loop can't include the prolog. Since we don't have liveness information, we insert zero-initialization
// for all non-parameter IL locals as well as temp structs with GC fields.
// Liveness phase will remove unnecessary initializations.
if (info.compInitMem || compSuppressedZeroInit)
{
unsigned varNum;
LclVarDsc* varDsc;
for (varNum = 0, varDsc = lvaTable; varNum < lvaCount; varNum++, varDsc++)
{
#if FEATURE_FIXED_OUT_ARGS
if (varNum == lvaOutgoingArgSpaceVar)
{
continue;
}
#endif // FEATURE_FIXED_OUT_ARGS
if (!varDsc->lvIsParam)
{
var_types lclType = varDsc->TypeGet();
bool isUserLocal = (varNum < info.compLocalsCount);
bool structWithGCFields = ((lclType == TYP_STRUCT) && varDsc->GetLayout()->HasGCPtr());
bool hadSuppressedInit = varDsc->lvSuppressedZeroInit;
if ((info.compInitMem && (isUserLocal || structWithGCFields)) || hadSuppressedInit)
{
GenTree* lcl = gtNewLclvNode(varNum, lclType);
GenTree* init = nullptr;
if (varTypeIsStruct(lclType))
{
const bool isVolatile = false;
const bool isCopyBlock = false;
init = gtNewBlkOpNode(lcl, gtNewIconNode(0), isVolatile, isCopyBlock);
init = fgMorphInitBlock(init);
}
else
{
GenTree* zero = gtNewZeroConNode(genActualType(lclType));
init = gtNewAssignNode(lcl, zero);
}
Statement* initStmt = gtNewStmt(init, callDI);
fgInsertStmtBefore(block, lastStmt, initStmt);
}
}
}
}
// Remove the call
fgRemoveStmt(block, lastStmt);
// Set the loop edge.
if (opts.IsOSR())
{
// Todo: this may not look like a viable loop header.
// Might need the moral equivalent of a scratch BB.
block->bbJumpDest = fgEntryBB;
}
else
{
// Ensure we have a scratch block and then target the next
// block. Loop detection needs to see a pred out of the loop,
// so mark the scratch block BBF_DONT_REMOVE to prevent empty
// block removal on it.
fgEnsureFirstBBisScratch();
fgFirstBB->bbFlags |= BBF_DONT_REMOVE;
block->bbJumpDest = fgFirstBB->bbNext;
}
// Finish hooking things up.
block->bbJumpKind = BBJ_ALWAYS;
fgAddRefPred(block->bbJumpDest, block);
block->bbFlags &= ~BBF_HAS_JMP;
}
//------------------------------------------------------------------------------
// fgAssignRecursiveCallArgToCallerParam : Assign argument to a recursive call to the corresponding caller parameter.
//
//
// Arguments:
// arg - argument to assign
// argTabEntry - argument table entry corresponding to arg
// lclParamNum - the lcl num of the parameter
// block --- basic block the call is in
// callILOffset - IL offset of the call
// tmpAssignmentInsertionPoint - tree before which temp assignment should be inserted (if necessary)
// paramAssignmentInsertionPoint - tree before which parameter assignment should be inserted
//
// Return Value:
// parameter assignment statement if one was inserted; nullptr otherwise.
Statement* Compiler::fgAssignRecursiveCallArgToCallerParam(GenTree* arg,
fgArgTabEntry* argTabEntry,
unsigned lclParamNum,
BasicBlock* block,
const DebugInfo& callDI,
Statement* tmpAssignmentInsertionPoint,
Statement* paramAssignmentInsertionPoint)
{
// Call arguments should be assigned to temps first and then the temps should be assigned to parameters because
// some argument trees may reference parameters directly.
GenTree* argInTemp = nullptr;
bool needToAssignParameter = true;
// TODO-CQ: enable calls with struct arguments passed in registers.
noway_assert(!varTypeIsStruct(arg->TypeGet()));
if ((argTabEntry->isTmp) || arg->IsCnsIntOrI() || arg->IsCnsFltOrDbl())
{
// The argument is already assigned to a temp or is a const.
argInTemp = arg;
}
else if (arg->OperGet() == GT_LCL_VAR)
{
unsigned lclNum = arg->AsLclVar()->GetLclNum();
LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (!varDsc->lvIsParam)
{
// The argument is a non-parameter local so it doesn't need to be assigned to a temp.
argInTemp = arg;
}
else if (lclNum == lclParamNum)
{
// The argument is the same parameter local that we were about to assign so
// we can skip the assignment.
needToAssignParameter = false;
}
}
// TODO: We don't need temp assignments if we can prove that the argument tree doesn't involve
// any caller parameters. Some common cases are handled above but we may be able to eliminate
// more temp assignments.
Statement* paramAssignStmt = nullptr;
if (needToAssignParameter)
{
if (argInTemp == nullptr)
{
// The argument is not assigned to a temp. We need to create a new temp and insert an assignment.
// TODO: we can avoid a temp assignment if we can prove that the argument tree
// doesn't involve any caller parameters.
unsigned tmpNum = lvaGrabTemp(true DEBUGARG("arg temp"));
lvaTable[tmpNum].lvType = arg->gtType;
GenTree* tempSrc = arg;
GenTree* tempDest = gtNewLclvNode(tmpNum, tempSrc->gtType);
GenTree* tmpAssignNode = gtNewAssignNode(tempDest, tempSrc);
Statement* tmpAssignStmt = gtNewStmt(tmpAssignNode, callDI);
fgInsertStmtBefore(block, tmpAssignmentInsertionPoint, tmpAssignStmt);
argInTemp = gtNewLclvNode(tmpNum, tempSrc->gtType);
}
// Now assign the temp to the parameter.
const LclVarDsc* paramDsc = lvaGetDesc(lclParamNum);
assert(paramDsc->lvIsParam);
GenTree* paramDest = gtNewLclvNode(lclParamNum, paramDsc->lvType);
GenTree* paramAssignNode = gtNewAssignNode(paramDest, argInTemp);
paramAssignStmt = gtNewStmt(paramAssignNode, callDI);
fgInsertStmtBefore(block, paramAssignmentInsertionPoint, paramAssignStmt);
}
return paramAssignStmt;
}
/*****************************************************************************
*
* Transform the given GT_CALL tree for code generation.
*/
GenTree* Compiler::fgMorphCall(GenTreeCall* call)
{
if (call->CanTailCall())
{
GenTree* newNode = fgMorphPotentialTailCall(call);
if (newNode != nullptr)
{
return newNode;
}
assert(!call->CanTailCall());
#if FEATURE_MULTIREG_RET
if (fgGlobalMorph && call->HasMultiRegRetVal() && varTypeIsStruct(call->TypeGet()))
{
// The tail call has been rejected so we must finish the work deferred
// by impFixupCallStructReturn for multi-reg-returning calls and transform
// ret call
// into
// temp = call
// ret temp
// Force re-evaluating the argInfo as the return argument has changed.
call->ResetArgInfo();
// Create a new temp.
unsigned tmpNum =
lvaGrabTemp(false DEBUGARG("Return value temp for multi-reg return (rejected tail call)."));
lvaTable[tmpNum].lvIsMultiRegRet = true;
CORINFO_CLASS_HANDLE structHandle = call->gtRetClsHnd;
assert(structHandle != NO_CLASS_HANDLE);
const bool unsafeValueClsCheck = false;
lvaSetStruct(tmpNum, structHandle, unsafeValueClsCheck);
var_types structType = lvaTable[tmpNum].lvType;
GenTree* dst = gtNewLclvNode(tmpNum, structType);
GenTree* assg = gtNewAssignNode(dst, call);
assg = fgMorphTree(assg);
// Create the assignment statement and insert it before the current statement.
Statement* assgStmt = gtNewStmt(assg, compCurStmt->GetDebugInfo());
fgInsertStmtBefore(compCurBB, compCurStmt, assgStmt);
// Return the temp.
GenTree* result = gtNewLclvNode(tmpNum, lvaTable[tmpNum].lvType);
result->gtFlags |= GTF_DONT_CSE;
compCurBB->bbFlags |= BBF_HAS_CALL; // This block has a call
#ifdef DEBUG
if (verbose)
{
printf("\nInserting assignment of a multi-reg call result to a temp:\n");
gtDispStmt(assgStmt);
}
result->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif // DEBUG
return result;
}
#endif
}
if ((call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) == 0 &&
(call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_VIRTUAL_FUNC_PTR)
#ifdef FEATURE_READYTORUN
|| call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR)
#endif
) &&
(call == fgMorphStmt->GetRootNode()))
{
// This is call to CORINFO_HELP_VIRTUAL_FUNC_PTR with ignored result.
// Transform it into a null check.
GenTree* thisPtr = call->gtCallArgs->GetNode();
GenTree* nullCheck = gtNewNullCheck(thisPtr, compCurBB);
return fgMorphTree(nullCheck);
}
noway_assert(call->gtOper == GT_CALL);
//
// Only count calls once (only in the global morph phase)
//
if (fgGlobalMorph)
{
if (call->gtCallType == CT_INDIRECT)
{
optCallCount++;
optIndirectCallCount++;
}
else if (call->gtCallType == CT_USER_FUNC)
{
optCallCount++;
if (call->IsVirtual())
{
optIndirectCallCount++;
}
}
}
// Couldn't inline - remember that this BB contains method calls
// Mark the block as a GC safe point for the call if possible.
// In the event the call indicates the block isn't a GC safe point
// and the call is unmanaged with a GC transition suppression request
// then insert a GC poll.
CLANG_FORMAT_COMMENT_ANCHOR;
if (IsGcSafePoint(call))
{
compCurBB->bbFlags |= BBF_GC_SAFE_POINT;
}
// Regardless of the state of the basic block with respect to GC safe point,
// we will always insert a GC Poll for scenarios involving a suppressed GC
// transition. Only mark the block for GC Poll insertion on the first morph.
if (fgGlobalMorph && call->IsUnmanaged() && call->IsSuppressGCTransition())
{
compCurBB->bbFlags |= (BBF_HAS_SUPPRESSGC_CALL | BBF_GC_SAFE_POINT);
optMethodFlags |= OMF_NEEDS_GCPOLLS;
}
// Morph Type.op_Equality, Type.op_Inequality, and Enum.HasFlag
//
// We need to do these before the arguments are morphed
if ((call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC))
{
// See if this is foldable
GenTree* optTree = gtFoldExprCall(call);
// If we optimized, morph the result
if (optTree != call)
{
return fgMorphTree(optTree);
}
}
compCurBB->bbFlags |= BBF_HAS_CALL; // This block has a call
// Process the "normal" argument list
call = fgMorphArgs(call);
noway_assert(call->gtOper == GT_CALL);
// Assign DEF flags if it produces a definition from "return buffer".
fgAssignSetVarDef(call);
if (call->OperRequiresAsgFlag())
{
call->gtFlags |= GTF_ASG;
}
// Should we expand this virtual method call target early here?
//
if (call->IsExpandedEarly() && call->IsVirtualVtable())
{
// We only expand the Vtable Call target once in the global morph phase
if (fgGlobalMorph)
{
assert(call->gtControlExpr == nullptr); // We only call this method and assign gtControlExpr once
call->gtControlExpr = fgExpandVirtualVtableCallTarget(call);
}
// We always have to morph or re-morph the control expr
//
call->gtControlExpr = fgMorphTree(call->gtControlExpr);
// Propagate any gtFlags into the call
call->gtFlags |= call->gtControlExpr->gtFlags;
}
// Morph stelem.ref helper call to store a null value, into a store into an array without the helper.
// This needs to be done after the arguments are morphed to ensure constant propagation has already taken place.
if (opts.OptimizationEnabled() && (call->gtCallType == CT_HELPER) &&
(call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_ARRADDR_ST)))
{
GenTree* value = gtArgEntryByArgNum(call, 2)->GetNode();
if (value->IsIntegralConst(0))
{
assert(value->OperGet() == GT_CNS_INT);
GenTree* arr = gtArgEntryByArgNum(call, 0)->GetNode();
GenTree* index = gtArgEntryByArgNum(call, 1)->GetNode();
// Either or both of the array and index arguments may have been spilled to temps by `fgMorphArgs`. Copy
// the spill trees as well if necessary.
GenTreeOp* argSetup = nullptr;
for (GenTreeCall::Use& use : call->Args())
{
GenTree* const arg = use.GetNode();
if (arg->OperGet() != GT_ASG)
{
continue;
}
assert(arg != arr);
assert(arg != index);
arg->gtFlags &= ~GTF_LATE_ARG;
GenTree* op1 = argSetup;
if (op1 == nullptr)
{
op1 = gtNewNothingNode();
#if DEBUG
op1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif // DEBUG
}
argSetup = new (this, GT_COMMA) GenTreeOp(GT_COMMA, TYP_VOID, op1, arg);
#if DEBUG
argSetup->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif // DEBUG
}
#ifdef DEBUG
auto resetMorphedFlag = [](GenTree** slot, fgWalkData* data) -> fgWalkResult {
(*slot)->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED;
return WALK_CONTINUE;
};
fgWalkTreePost(&arr, resetMorphedFlag);
fgWalkTreePost(&index, resetMorphedFlag);
fgWalkTreePost(&value, resetMorphedFlag);
#endif // DEBUG
GenTree* const arrIndexNode = gtNewIndexRef(TYP_REF, arr, index);
GenTree* const arrStore = gtNewAssignNode(arrIndexNode, value);
GenTree* result = fgMorphTree(arrStore);
if (argSetup != nullptr)
{
result = new (this, GT_COMMA) GenTreeOp(GT_COMMA, TYP_VOID, argSetup, result);
#if DEBUG
result->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif // DEBUG
}
return result;
}
}
if (call->IsNoReturn())
{
//
// If we know that the call does not return then we can set fgRemoveRestOfBlock
// to remove all subsequent statements and change the call's basic block to BBJ_THROW.
// As a result the compiler won't need to preserve live registers across the call.
//
// This isn't need for tail calls as there shouldn't be any code after the call anyway.
// Besides, the tail call code is part of the epilog and converting the block to
// BBJ_THROW would result in the tail call being dropped as the epilog is generated
// only for BBJ_RETURN blocks.
//
if (!call->IsTailCall())
{
fgRemoveRestOfBlock = true;
}
}
return call;
}
/*****************************************************************************
*
* Expand and return the call target address for a VirtualCall
* The code here should match that generated by LowerVirtualVtableCall
*/
GenTree* Compiler::fgExpandVirtualVtableCallTarget(GenTreeCall* call)
{
GenTree* result;
JITDUMP("Expanding virtual call target for %d.%s:\n", call->gtTreeID, GenTree::OpName(call->gtOper));
noway_assert(call->gtCallType == CT_USER_FUNC);
// get a reference to the thisPtr being passed
fgArgTabEntry* thisArgTabEntry = gtArgEntryByArgNum(call, 0);
GenTree* thisPtr = thisArgTabEntry->GetNode();
// fgMorphArgs must enforce this invariant by creating a temp
//
assert(thisPtr->OperIsLocal());
// Make a copy of the thisPtr by cloning
//
thisPtr = gtClone(thisPtr, true);
noway_assert(thisPtr != nullptr);
// Get hold of the vtable offset
unsigned vtabOffsOfIndirection;
unsigned vtabOffsAfterIndirection;
bool isRelative;
info.compCompHnd->getMethodVTableOffset(call->gtCallMethHnd, &vtabOffsOfIndirection, &vtabOffsAfterIndirection,
&isRelative);
// Dereference the this pointer to obtain the method table, it is called vtab below
GenTree* vtab;
assert(VPTR_OFFS == 0); // We have to add this value to the thisPtr to get the methodTable
vtab = gtNewOperNode(GT_IND, TYP_I_IMPL, thisPtr);
vtab->gtFlags |= GTF_IND_INVARIANT;
// Get the appropriate vtable chunk
if (vtabOffsOfIndirection != CORINFO_VIRTUALCALL_NO_CHUNK)
{
// Note this isRelative code path is currently never executed
// as the VM doesn't ever return: isRelative == true
//
if (isRelative)
{
// MethodTable offset is a relative pointer.
//
// Additional temporary variable is used to store virtual table pointer.
// Address of method is obtained by the next computations:
//
// Save relative offset to tmp (vtab is virtual table pointer, vtabOffsOfIndirection is offset of
// vtable-1st-level-indirection):
// tmp = vtab
//
// Save address of method to result (vtabOffsAfterIndirection is offset of vtable-2nd-level-indirection):
// result = [tmp + vtabOffsOfIndirection + vtabOffsAfterIndirection + [tmp + vtabOffsOfIndirection]]
//
//
// When isRelative is true we need to setup two temporary variables
// var1 = vtab
// var2 = var1 + vtabOffsOfIndirection + vtabOffsAfterIndirection + [var1 + vtabOffsOfIndirection]
// result = [var2] + var2
//
unsigned varNum1 = lvaGrabTemp(true DEBUGARG("var1 - vtab"));
unsigned varNum2 = lvaGrabTemp(true DEBUGARG("var2 - relative"));
GenTree* asgVar1 = gtNewTempAssign(varNum1, vtab); // var1 = vtab
// [tmp + vtabOffsOfIndirection]
GenTree* tmpTree1 = gtNewOperNode(GT_ADD, TYP_I_IMPL, gtNewLclvNode(varNum1, TYP_I_IMPL),
gtNewIconNode(vtabOffsOfIndirection, TYP_I_IMPL));
tmpTree1 = gtNewOperNode(GT_IND, TYP_I_IMPL, tmpTree1, false);
tmpTree1->gtFlags |= GTF_IND_NONFAULTING;
tmpTree1->gtFlags |= GTF_IND_INVARIANT;
// var1 + vtabOffsOfIndirection + vtabOffsAfterIndirection
GenTree* tmpTree2 =
gtNewOperNode(GT_ADD, TYP_I_IMPL, gtNewLclvNode(varNum1, TYP_I_IMPL),
gtNewIconNode(vtabOffsOfIndirection + vtabOffsAfterIndirection, TYP_I_IMPL));
// var1 + vtabOffsOfIndirection + vtabOffsAfterIndirection + [var1 + vtabOffsOfIndirection]
tmpTree2 = gtNewOperNode(GT_ADD, TYP_I_IMPL, tmpTree2, tmpTree1);
GenTree* asgVar2 = gtNewTempAssign(varNum2, tmpTree2); // var2 = <expression>
// This last indirection is not invariant, but is non-faulting
result = gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewLclvNode(varNum2, TYP_I_IMPL), false); // [var2]
result->gtFlags |= GTF_IND_NONFAULTING;
result = gtNewOperNode(GT_ADD, TYP_I_IMPL, result, gtNewLclvNode(varNum2, TYP_I_IMPL)); // [var2] + var2
// Now stitch together the two assignment and the calculation of result into a single tree
GenTree* commaTree = gtNewOperNode(GT_COMMA, TYP_I_IMPL, asgVar2, result);
result = gtNewOperNode(GT_COMMA, TYP_I_IMPL, asgVar1, commaTree);
}
else
{
// result = [vtab + vtabOffsOfIndirection]
result = gtNewOperNode(GT_ADD, TYP_I_IMPL, vtab, gtNewIconNode(vtabOffsOfIndirection, TYP_I_IMPL));
result = gtNewOperNode(GT_IND, TYP_I_IMPL, result, false);
result->gtFlags |= GTF_IND_NONFAULTING;
result->gtFlags |= GTF_IND_INVARIANT;
}
}
else
{
result = vtab;
assert(!isRelative);
}
if (!isRelative)
{
// Load the function address
// result = [result + vtabOffsAfterIndirection]
result = gtNewOperNode(GT_ADD, TYP_I_IMPL, result, gtNewIconNode(vtabOffsAfterIndirection, TYP_I_IMPL));
// This last indirection is not invariant, but is non-faulting
result = gtNewOperNode(GT_IND, TYP_I_IMPL, result, false);
result->gtFlags |= GTF_IND_NONFAULTING;
}
return result;
}
/*****************************************************************************
*
* Transform the given constant tree for code generation.
*/
GenTree* Compiler::fgMorphConst(GenTree* tree)
{
assert(tree->OperIsConst());
/* Clear any exception flags or other unnecessary flags
* that may have been set before folding this node to a constant */
tree->gtFlags &= ~(GTF_ALL_EFFECT | GTF_REVERSE_OPS);
if (!tree->OperIs(GT_CNS_STR))
{
return tree;
}
if (tree->AsStrCon()->IsStringEmptyField())
{
LPVOID pValue;
InfoAccessType iat = info.compCompHnd->emptyStringLiteral(&pValue);
return fgMorphTree(gtNewStringLiteralNode(iat, pValue));
}
// TODO-CQ: Do this for compCurBB->isRunRarely(). Doing that currently will
// guarantee slow performance for that block. Instead cache the return value
// of CORINFO_HELP_STRCNS and go to cache first giving reasonable perf.
bool useLazyStrCns = false;
if (compCurBB->bbJumpKind == BBJ_THROW)
{
useLazyStrCns = true;
}
else if (fgGlobalMorph && compCurStmt->GetRootNode()->IsCall())
{
// Quick check: if the root node of the current statement happens to be a noreturn call.
GenTreeCall* call = compCurStmt->GetRootNode()->AsCall();
useLazyStrCns = call->IsNoReturn() || fgIsThrow(call);
}
if (useLazyStrCns)
{
CorInfoHelpFunc helper = info.compCompHnd->getLazyStringLiteralHelper(tree->AsStrCon()->gtScpHnd);
if (helper != CORINFO_HELP_UNDEF)
{
// For un-important blocks, we want to construct the string lazily
GenTreeCall::Use* args;
if (helper == CORINFO_HELP_STRCNS_CURRENT_MODULE)
{
args = gtNewCallArgs(gtNewIconNode(RidFromToken(tree->AsStrCon()->gtSconCPX), TYP_INT));
}
else
{
args = gtNewCallArgs(gtNewIconNode(RidFromToken(tree->AsStrCon()->gtSconCPX), TYP_INT),
gtNewIconEmbScpHndNode(tree->AsStrCon()->gtScpHnd));
}
tree = gtNewHelperCallNode(helper, TYP_REF, args);
return fgMorphTree(tree);
}
}
assert(tree->AsStrCon()->gtScpHnd == info.compScopeHnd || !IsUninitialized(tree->AsStrCon()->gtScpHnd));
LPVOID pValue;
InfoAccessType iat =
info.compCompHnd->constructStringLiteral(tree->AsStrCon()->gtScpHnd, tree->AsStrCon()->gtSconCPX, &pValue);
tree = gtNewStringLiteralNode(iat, pValue);
return fgMorphTree(tree);
}
//------------------------------------------------------------------------
// fgMorphTryFoldObjAsLclVar: try to fold an Obj node as a LclVar.
//
// Arguments:
// obj - the obj node.
// destroyNodes -- destroy nodes that are optimized away
//
// Return value:
// GenTreeLclVar if the obj can be replaced by it, null otherwise.
//
// Notes:
// TODO-CQ: currently this transformation is done only under copy block,
// but it is benefitial to do for each OBJ node. However, `PUT_ARG_STACK`
// for some platforms does not expect struct `LCL_VAR` as a source, so
// it needs more work.
//
GenTreeLclVar* Compiler::fgMorphTryFoldObjAsLclVar(GenTreeObj* obj, bool destroyNodes)
{
if (opts.OptimizationEnabled())
{
GenTree* op1 = obj->Addr();
assert(!op1->OperIs(GT_LCL_VAR_ADDR) && "missed an opt opportunity");
if (op1->OperIs(GT_ADDR))
{
GenTreeUnOp* addr = op1->AsUnOp();
GenTree* addrOp = addr->gtGetOp1();
if (addrOp->TypeIs(obj->TypeGet()) && addrOp->OperIs(GT_LCL_VAR))
{
GenTreeLclVar* lclVar = addrOp->AsLclVar();
ClassLayout* lclVarLayout = lvaGetDesc(lclVar)->GetLayout();
ClassLayout* objLayout = obj->GetLayout();
if (ClassLayout::AreCompatible(lclVarLayout, objLayout))
{
#ifdef DEBUG
CORINFO_CLASS_HANDLE objClsHandle = obj->GetLayout()->GetClassHandle();
assert(objClsHandle != NO_CLASS_HANDLE);
if (verbose)
{
CORINFO_CLASS_HANDLE lclClsHnd = gtGetStructHandle(lclVar);
printf("fold OBJ(ADDR(X)) [%06u] into X [%06u], ", dspTreeID(obj), dspTreeID(lclVar));
printf("with %s handles\n", ((lclClsHnd == objClsHandle) ? "matching" : "different"));
}
#endif
// Keep the DONT_CSE flag in sync
// (as the addr always marks it for its op1)
lclVar->gtFlags &= ~GTF_DONT_CSE;
lclVar->gtFlags |= (obj->gtFlags & GTF_DONT_CSE);
if (destroyNodes)
{
DEBUG_DESTROY_NODE(obj);
DEBUG_DESTROY_NODE(addr);
}
return lclVar;
}
}
}
}
return nullptr;
}
/*****************************************************************************
*
* Transform the given GTK_LEAF tree for code generation.
*/
GenTree* Compiler::fgMorphLeaf(GenTree* tree)
{
assert(tree->OperKind() & GTK_LEAF);
if (tree->gtOper == GT_LCL_VAR)
{
const bool forceRemorph = false;
return fgMorphLocalVar(tree, forceRemorph);
}
else if (tree->gtOper == GT_LCL_FLD)
{
if (lvaGetDesc(tree->AsLclFld())->IsAddressExposed())
{
tree->gtFlags |= GTF_GLOB_REF;
}
#ifdef TARGET_X86
if (info.compIsVarArgs)
{
GenTree* newTree = fgMorphStackArgForVarArgs(tree->AsLclFld()->GetLclNum(), tree->TypeGet(),
tree->AsLclFld()->GetLclOffs());
if (newTree != nullptr)
{
if (newTree->OperIsBlk() && ((tree->gtFlags & GTF_VAR_DEF) == 0))
{
newTree->SetOper(GT_IND);
}
return newTree;
}
}
#endif // TARGET_X86
}
else if (tree->gtOper == GT_FTN_ADDR)
{
GenTreeFptrVal* fptrValTree = tree->AsFptrVal();
// A function pointer address is being used. Let the VM know if this is the
// target of a Delegate or a raw function pointer.
bool isUnsafeFunctionPointer = !fptrValTree->gtFptrDelegateTarget;
CORINFO_CONST_LOOKUP addrInfo;
#ifdef FEATURE_READYTORUN
if (fptrValTree->gtEntryPoint.addr != nullptr)
{
addrInfo = fptrValTree->gtEntryPoint;
}
else
#endif
{
info.compCompHnd->getFunctionFixedEntryPoint(fptrValTree->gtFptrMethod, isUnsafeFunctionPointer, &addrInfo);
}
GenTree* indNode = nullptr;
switch (addrInfo.accessType)
{
case IAT_PPVALUE:
indNode = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)addrInfo.handle, GTF_ICON_CONST_PTR, true);
// Add the second indirection
indNode = gtNewOperNode(GT_IND, TYP_I_IMPL, indNode);
// This indirection won't cause an exception.
indNode->gtFlags |= GTF_IND_NONFAULTING;
// This indirection also is invariant.
indNode->gtFlags |= GTF_IND_INVARIANT;
break;
case IAT_PVALUE:
indNode = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)addrInfo.handle, GTF_ICON_FTN_ADDR, true);
break;
case IAT_VALUE:
// Refer to gtNewIconHandleNode() as the template for constructing a constant handle
//
tree->SetOper(GT_CNS_INT);
tree->AsIntConCommon()->SetIconValue(ssize_t(addrInfo.handle));
tree->gtFlags |= GTF_ICON_FTN_ADDR;
break;
default:
noway_assert(!"Unknown addrInfo.accessType");
}
if (indNode != nullptr)
{
DEBUG_DESTROY_NODE(tree);
tree = fgMorphTree(indNode);
}
}
return tree;
}
void Compiler::fgAssignSetVarDef(GenTree* tree)
{
GenTreeLclVarCommon* lclVarCmnTree;
bool isEntire = false;
if (tree->DefinesLocal(this, &lclVarCmnTree, &isEntire))
{
if (isEntire)
{
lclVarCmnTree->gtFlags |= GTF_VAR_DEF;
}
else
{
// We consider partial definitions to be modeled as uses followed by definitions.
// This captures the idea that precedings defs are not necessarily made redundant
// by this definition.
lclVarCmnTree->gtFlags |= (GTF_VAR_DEF | GTF_VAR_USEASG);
}
}
}
//------------------------------------------------------------------------
// fgMorphOneAsgBlockOp: Attempt to replace a block assignment with a scalar assignment
//
// Arguments:
// tree - The block assignment to be possibly morphed
//
// Return Value:
// The modified tree if successful, nullptr otherwise.
//
// Assumptions:
// 'tree' must be a block assignment.
//
// Notes:
// If successful, this method always returns the incoming tree, modifying only
// its arguments.
//
GenTree* Compiler::fgMorphOneAsgBlockOp(GenTree* tree)
{
// This must be a block assignment.
noway_assert(tree->OperIsBlkOp());
var_types asgType = tree->TypeGet();
GenTree* asg = tree;
GenTree* dest = asg->gtGetOp1();
GenTree* src = asg->gtGetOp2();
unsigned destVarNum = BAD_VAR_NUM;
LclVarDsc* destVarDsc = nullptr;
GenTree* destLclVarTree = nullptr;
bool isCopyBlock = asg->OperIsCopyBlkOp();
bool isInitBlock = !isCopyBlock;
unsigned size = 0;
CORINFO_CLASS_HANDLE clsHnd = NO_CLASS_HANDLE;
if (dest->gtEffectiveVal()->OperIsBlk())
{
GenTreeBlk* lhsBlk = dest->gtEffectiveVal()->AsBlk();
size = lhsBlk->Size();
if (impIsAddressInLocal(lhsBlk->Addr(), &destLclVarTree))
{
destVarNum = destLclVarTree->AsLclVarCommon()->GetLclNum();
destVarDsc = lvaGetDesc(destVarNum);
}
if (lhsBlk->OperGet() == GT_OBJ)
{
clsHnd = lhsBlk->AsObj()->GetLayout()->GetClassHandle();
}
}
else
{
// Is this an enregisterable struct that is already a simple assignment?
// This can happen if we are re-morphing.
// Note that we won't do this straightaway if this is a SIMD type, since it
// may be a promoted lclVar (sometimes we promote the individual float fields of
// fixed-size SIMD).
if (dest->OperGet() == GT_IND)
{
noway_assert(asgType != TYP_STRUCT);
if (varTypeIsStruct(asgType))
{
destLclVarTree = fgIsIndirOfAddrOfLocal(dest);
}
if (isCopyBlock && destLclVarTree == nullptr && !src->OperIs(GT_LCL_VAR))
{
fgMorphBlockOperand(src, asgType, genTypeSize(asgType), false /*isBlkReqd*/);
dest->gtFlags |= GTF_DONT_CSE;
return tree;
}
}
else
{
noway_assert(dest->OperIsLocal());
destLclVarTree = dest;
}
if (destLclVarTree != nullptr)
{
destVarNum = destLclVarTree->AsLclVarCommon()->GetLclNum();
destVarDsc = lvaGetDesc(destVarNum);
if (asgType == TYP_STRUCT)
{
clsHnd = destVarDsc->GetStructHnd();
size = destVarDsc->lvExactSize;
}
}
if (asgType != TYP_STRUCT)
{
size = genTypeSize(asgType);
}
}
if (size == 0)
{
return nullptr;
}
if ((destVarDsc != nullptr) && varTypeIsStruct(destLclVarTree) && destVarDsc->lvPromoted)
{
// Let fgMorphCopyBlock handle it.
return nullptr;
}
if (src->IsCall() || src->OperIsSIMD())
{
// Can't take ADDR from these nodes, let fgMorphCopyBlock handle it, #11413.
return nullptr;
}
if ((destVarDsc != nullptr) && !varTypeIsStruct(destVarDsc->TypeGet()))
{
//
// See if we can do a simple transformation:
//
// GT_ASG <TYP_size>
// / \.
// GT_IND GT_IND or CNS_INT
// | |
// [dest] [src]
//
if (asgType == TYP_STRUCT)
{
// It is possible to use `initobj` to init a primitive type on the stack,
// like `ldloca.s 1; initobj 1B000003` where `V01` has type `ref`;
// in this case we generate `ASG struct(BLK<8> struct(ADDR byref(LCL_VAR ref)), 0)`
// and this code path transforms it into `ASG ref(LCL_VARref, 0)` because it is not a real
// struct assignment.
if (size == REGSIZE_BYTES)
{
if (clsHnd == NO_CLASS_HANDLE)
{
// A register-sized cpblk can be treated as an integer asignment.
asgType = TYP_I_IMPL;
}
else
{
BYTE gcPtr;
info.compCompHnd->getClassGClayout(clsHnd, &gcPtr);
asgType = getJitGCType(gcPtr);
}
}
else
{
switch (size)
{
case 1:
asgType = TYP_BYTE;
break;
case 2:
asgType = TYP_SHORT;
break;
#ifdef TARGET_64BIT
case 4:
asgType = TYP_INT;
break;
#endif // TARGET_64BIT
}
}
}
}
GenTree* srcLclVarTree = nullptr;
LclVarDsc* srcVarDsc = nullptr;
if (isCopyBlock)
{
if (src->OperGet() == GT_LCL_VAR)
{
srcLclVarTree = src;
srcVarDsc = lvaGetDesc(src->AsLclVarCommon());
}
else if (src->OperIsIndir() && impIsAddressInLocal(src->AsOp()->gtOp1, &srcLclVarTree))
{
srcVarDsc = lvaGetDesc(srcLclVarTree->AsLclVarCommon());
}
if ((srcVarDsc != nullptr) && varTypeIsStruct(srcLclVarTree) && srcVarDsc->lvPromoted)
{
// Let fgMorphCopyBlock handle it.
return nullptr;
}
}
if (asgType != TYP_STRUCT)
{
noway_assert((size <= REGSIZE_BYTES) || varTypeIsSIMD(asgType));
// For initBlk, a non constant source is not going to allow us to fiddle
// with the bits to create a single assigment.
// Nor do we (for now) support transforming an InitBlock of SIMD type, unless
// it is a direct assignment to a lclVar and the value is zero.
if (isInitBlock)
{
if (!src->IsConstInitVal())
{
return nullptr;
}
if (varTypeIsSIMD(asgType) && (!src->IsIntegralConst(0) || (destVarDsc == nullptr)))
{
return nullptr;
}
}
if (destVarDsc != nullptr)
{
// Kill everything about dest
if (optLocalAssertionProp)
{
if (optAssertionCount > 0)
{
fgKillDependentAssertions(destVarNum DEBUGARG(tree));
}
}
// A previous incarnation of this code also required the local not to be
// address-exposed(=taken). That seems orthogonal to the decision of whether
// to do field-wise assignments: being address-exposed will cause it to be
// "dependently" promoted, so it will be in the right memory location. One possible
// further reason for avoiding field-wise stores is that the struct might have alignment-induced
// holes, whose contents could be meaningful in unsafe code. If we decide that's a valid
// concern, then we could compromise, and say that address-exposed + fields do not completely cover the
// memory of the struct prevent field-wise assignments. Same situation exists for the "src" decision.
if (varTypeIsStruct(destLclVarTree) && destVarDsc->lvPromoted)
{
// Let fgMorphInitBlock handle it. (Since we'll need to do field-var-wise assignments.)
return nullptr;
}
else if (!varTypeIsFloating(destLclVarTree->TypeGet()) && (size == genTypeSize(destVarDsc)))
{
// Use the dest local var directly, as well as its type.
dest = destLclVarTree;
asgType = destVarDsc->lvType;
// If the block operation had been a write to a local var of a small int type,
// of the exact size of the small int type, and the var is NormalizeOnStore,
// we would have labeled it GTF_VAR_USEASG, because the block operation wouldn't
// have done that normalization. If we're now making it into an assignment,
// the NormalizeOnStore will work, and it can be a full def.
if (destVarDsc->lvNormalizeOnStore())
{
dest->gtFlags &= (~GTF_VAR_USEASG);
}
}
else
{
// Could be a non-promoted struct, or a floating point type local, or
// an int subject to a partial write. Don't enregister.
lvaSetVarDoNotEnregister(destVarNum DEBUGARG(DoNotEnregisterReason::OneAsgRetyping));
// Mark the local var tree as a definition point of the local.
destLclVarTree->gtFlags |= GTF_VAR_DEF;
if (size < destVarDsc->lvExactSize)
{ // If it's not a full-width assignment....
destLclVarTree->gtFlags |= GTF_VAR_USEASG;
}
if (dest == destLclVarTree)
{
GenTree* addr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
dest = gtNewIndir(asgType, addr);
}
}
}
// Check to ensure we don't have a reducible *(& ... )
if (dest->OperIsIndir() && dest->AsIndir()->Addr()->OperGet() == GT_ADDR)
{
// If dest is an Indir or Block, and it has a child that is a Addr node
//
GenTree* addrNode = dest->AsIndir()->Addr(); // known to be a GT_ADDR
// Can we just remove the Ind(Addr(destOp)) and operate directly on 'destOp'?
//
GenTree* destOp = addrNode->gtGetOp1();
var_types destOpType = destOp->TypeGet();
// We can if we have a primitive integer type and the sizes are exactly the same.
//
if ((varTypeIsIntegralOrI(destOp) && (size == genTypeSize(destOpType))))
{
dest = destOp;
asgType = destOpType;
}
}
if (dest->gtEffectiveVal()->OperIsIndir())
{
// If we have no information about the destination, we have to assume it could
// live anywhere (not just in the GC heap).
// Mark the GT_IND node so that we use the correct write barrier helper in case
// the field is a GC ref.
if (!fgIsIndirOfAddrOfLocal(dest))
{
dest->gtFlags |= (GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
tree->gtFlags |= GTF_GLOB_REF;
}
dest->SetIndirExceptionFlags(this);
tree->gtFlags |= (dest->gtFlags & GTF_EXCEPT);
}
if (isCopyBlock)
{
if (srcVarDsc != nullptr)
{
// Handled above.
assert(!varTypeIsStruct(srcLclVarTree) || !srcVarDsc->lvPromoted);
if (!varTypeIsFloating(srcLclVarTree->TypeGet()) &&
size == genTypeSize(genActualType(srcLclVarTree->TypeGet())))
{
// Use the src local var directly.
src = srcLclVarTree;
}
else
{
// The source argument of the copyblk can potentially be accessed only through indir(addr(lclVar))
// or indir(lclVarAddr) so it must be on the stack.
unsigned lclVarNum = srcLclVarTree->AsLclVarCommon()->GetLclNum();
lvaSetVarDoNotEnregister(lclVarNum DEBUGARG(DoNotEnregisterReason::OneAsgRetyping));
GenTree* srcAddr;
if (src == srcLclVarTree)
{
srcAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, src);
src = gtNewOperNode(GT_IND, asgType, srcAddr);
}
else
{
assert(src->OperIsIndir());
}
}
}
if (src->OperIsIndir())
{
if (!fgIsIndirOfAddrOfLocal(src))
{
// If we have no information about the src, we have to assume it could
// live anywhere (not just in the GC heap).
// Mark the GT_IND node so that we use the correct write barrier helper in case
// the field is a GC ref.
src->gtFlags |= (GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
}
src->SetIndirExceptionFlags(this);
}
}
else // InitBlk
{
#ifdef FEATURE_SIMD
if (varTypeIsSIMD(asgType))
{
assert(!isCopyBlock); // Else we would have returned the tree above.
noway_assert(src->IsIntegralConst(0));
noway_assert(destVarDsc != nullptr);
src = gtNewSIMDNode(asgType, src, SIMDIntrinsicInit, destVarDsc->GetSimdBaseJitType(), size);
}
else
#endif
{
if (src->OperIsInitVal())
{
src = src->gtGetOp1();
}
assert(src->IsCnsIntOrI());
// This will mutate the integer constant, in place, to be the correct
// value for the type we are using in the assignment.
src->AsIntCon()->FixupInitBlkValue(asgType);
}
}
// Ensure that the dest is setup appropriately.
if (dest->gtEffectiveVal()->OperIsIndir())
{
dest = fgMorphBlockOperand(dest, asgType, size, false /*isBlkReqd*/);
}
// Ensure that the rhs is setup appropriately.
if (isCopyBlock)
{
src = fgMorphBlockOperand(src, asgType, size, false /*isBlkReqd*/);
}
// Set the lhs and rhs on the assignment.
if (dest != tree->AsOp()->gtOp1)
{
asg->AsOp()->gtOp1 = dest;
}
if (src != asg->AsOp()->gtOp2)
{
asg->AsOp()->gtOp2 = src;
}
asg->ChangeType(asgType);
dest->gtFlags |= GTF_DONT_CSE;
asg->gtFlags &= ~GTF_EXCEPT;
asg->gtFlags |= ((dest->gtFlags | src->gtFlags) & GTF_ALL_EFFECT);
// Un-set GTF_REVERSE_OPS, and it will be set later if appropriate.
asg->gtFlags &= ~GTF_REVERSE_OPS;
#ifdef DEBUG
if (verbose)
{
printf("fgMorphOneAsgBlock (after):\n");
gtDispTree(tree);
}
#endif
return tree;
}
return nullptr;
}
//------------------------------------------------------------------------
// fgMorphPromoteLocalInitBlock: Attempts to promote a local block init tree
// to a tree of promoted field initialization assignments.
//
// Arguments:
// destLclNode - The destination LclVar node
// initVal - The initialization value
// blockSize - The amount of bytes to initialize
//
// Return Value:
// A tree that performs field by field initialization of the destination
// struct variable if various conditions are met, nullptr otherwise.
//
// Notes:
// This transforms a single block initialization assignment like:
//
// * ASG struct (init)
// +--* BLK(12) struct
// | \--* ADDR long
// | \--* LCL_VAR struct(P) V02 loc0
// | \--* int V02.a (offs=0x00) -> V06 tmp3
// | \--* ubyte V02.c (offs=0x04) -> V07 tmp4
// | \--* float V02.d (offs=0x08) -> V08 tmp5
// \--* INIT_VAL int
// \--* CNS_INT int 42
//
// into a COMMA tree of assignments that initialize each promoted struct
// field:
//
// * COMMA void
// +--* COMMA void
// | +--* ASG int
// | | +--* LCL_VAR int V06 tmp3
// | | \--* CNS_INT int 0x2A2A2A2A
// | \--* ASG ubyte
// | +--* LCL_VAR ubyte V07 tmp4
// | \--* CNS_INT int 42
// \--* ASG float
// +--* LCL_VAR float V08 tmp5
// \--* CNS_DBL float 1.5113661732714390e-13
//
GenTree* Compiler::fgMorphPromoteLocalInitBlock(GenTreeLclVar* destLclNode, GenTree* initVal, unsigned blockSize)
{
assert(destLclNode->OperIs(GT_LCL_VAR));
LclVarDsc* destLclVar = lvaGetDesc(destLclNode);
assert(varTypeIsStruct(destLclVar->TypeGet()));
assert(destLclVar->lvPromoted);
if (blockSize == 0)
{
JITDUMP(" size is zero or unknown.\n");
return nullptr;
}
if (destLclVar->IsAddressExposed() && destLclVar->lvContainsHoles)
{
JITDUMP(" dest is address exposed and contains holes.\n");
return nullptr;
}
if (destLclVar->lvCustomLayout && destLclVar->lvContainsHoles)
{
// TODO-1stClassStructs: there are no reasons for this pessimization, delete it.
JITDUMP(" dest has custom layout and contains holes.\n");
return nullptr;
}
if (destLclVar->lvExactSize != blockSize)
{
JITDUMP(" dest size mismatch.\n");
return nullptr;
}
if (!initVal->OperIs(GT_CNS_INT))
{
JITDUMP(" source is not constant.\n");
return nullptr;
}
const int64_t initPattern = (initVal->AsIntCon()->IconValue() & 0xFF) * 0x0101010101010101LL;
if (initPattern != 0)
{
for (unsigned i = 0; i < destLclVar->lvFieldCnt; ++i)
{
LclVarDsc* fieldDesc = lvaGetDesc(destLclVar->lvFieldLclStart + i);
if (varTypeIsSIMD(fieldDesc->TypeGet()) || varTypeIsGC(fieldDesc->TypeGet()))
{
// Cannot initialize GC or SIMD types with a non-zero constant.
// The former is completly bogus. The later restriction could be
// lifted by supporting non-zero SIMD constants or by generating
// field initialization code that converts an integer constant to
// the appropiate SIMD value. Unlikely to be very useful, though.
JITDUMP(" dest contains GC and/or SIMD fields and source constant is not 0.\n");
return nullptr;
}
}
}
JITDUMP(" using field by field initialization.\n");
GenTree* tree = nullptr;
for (unsigned i = 0; i < destLclVar->lvFieldCnt; ++i)
{
unsigned fieldLclNum = destLclVar->lvFieldLclStart + i;
LclVarDsc* fieldDesc = lvaGetDesc(fieldLclNum);
GenTree* dest = gtNewLclvNode(fieldLclNum, fieldDesc->TypeGet());
// If it had been labeled a "USEASG", assignments to the individual promoted fields are not.
dest->gtFlags |= (destLclNode->gtFlags & ~(GTF_NODE_MASK | GTF_VAR_USEASG));
GenTree* src;
switch (dest->TypeGet())
{
case TYP_BOOL:
case TYP_BYTE:
case TYP_UBYTE:
case TYP_SHORT:
case TYP_USHORT:
// Promoted fields are expected to be "normalize on load". If that changes then
// we may need to adjust this code to widen the constant correctly.
assert(fieldDesc->lvNormalizeOnLoad());
FALLTHROUGH;
case TYP_INT:
{
int64_t mask = (int64_t(1) << (genTypeSize(dest->TypeGet()) * 8)) - 1;
src = gtNewIconNode(static_cast<int32_t>(initPattern & mask));
break;
}
case TYP_LONG:
src = gtNewLconNode(initPattern);
break;
case TYP_FLOAT:
float floatPattern;
memcpy(&floatPattern, &initPattern, sizeof(floatPattern));
src = gtNewDconNode(floatPattern, dest->TypeGet());
break;
case TYP_DOUBLE:
double doublePattern;
memcpy(&doublePattern, &initPattern, sizeof(doublePattern));
src = gtNewDconNode(doublePattern, dest->TypeGet());
break;
case TYP_REF:
case TYP_BYREF:
#ifdef FEATURE_SIMD
case TYP_SIMD8:
case TYP_SIMD12:
case TYP_SIMD16:
case TYP_SIMD32:
#endif // FEATURE_SIMD
assert(initPattern == 0);
src = gtNewIconNode(0, dest->TypeGet());
break;
default:
unreached();
}
GenTree* asg = gtNewAssignNode(dest, src);
if (optLocalAssertionProp)
{
optAssertionGen(asg);
}
if (tree != nullptr)
{
tree = gtNewOperNode(GT_COMMA, TYP_VOID, tree, asg);
}
else
{
tree = asg;
}
}
return tree;
}
//------------------------------------------------------------------------
// fgMorphGetStructAddr: Gets the address of a struct object
//
// Arguments:
// pTree - the parent's pointer to the struct object node
// clsHnd - the class handle for the struct type
// isRValue - true if this is a source (not dest)
//
// Return Value:
// Returns the address of the struct value, possibly modifying the existing tree to
// sink the address below any comma nodes (this is to canonicalize for value numbering).
// If this is a source, it will morph it to an GT_IND before taking its address,
// since it may not be remorphed (and we don't want blk nodes as rvalues).
GenTree* Compiler::fgMorphGetStructAddr(GenTree** pTree, CORINFO_CLASS_HANDLE clsHnd, bool isRValue)
{
GenTree* addr;
GenTree* tree = *pTree;
// If this is an indirection, we can return its address.
if (tree->OperIsIndir())
{
addr = tree->AsOp()->gtOp1;
}
else if (tree->gtOper == GT_COMMA)
{
// If this is a comma, we're going to "sink" the GT_ADDR below it.
(void)fgMorphGetStructAddr(&(tree->AsOp()->gtOp2), clsHnd, isRValue);
tree->gtType = TYP_BYREF;
addr = tree;
}
else
{
switch (tree->gtOper)
{
case GT_LCL_FLD:
case GT_LCL_VAR:
case GT_INDEX:
case GT_FIELD:
case GT_ARR_ELEM:
addr = gtNewOperNode(GT_ADDR, TYP_BYREF, tree);
break;
case GT_INDEX_ADDR:
addr = tree;
break;
default:
{
// TODO: Consider using lvaGrabTemp and gtNewTempAssign instead, since we're
// not going to use "temp"
GenTree* temp = fgInsertCommaFormTemp(pTree, clsHnd);
unsigned lclNum = temp->gtEffectiveVal()->AsLclVar()->GetLclNum();
lvaSetVarDoNotEnregister(lclNum DEBUG_ARG(DoNotEnregisterReason::VMNeedsStackAddr));
addr = fgMorphGetStructAddr(pTree, clsHnd, isRValue);
break;
}
}
}
*pTree = addr;
return addr;
}
//------------------------------------------------------------------------
// fgMorphBlockOperand: Canonicalize an operand of a block assignment
//
// Arguments:
// tree - The block operand
// asgType - The type of the assignment
// blockWidth - The size of the block
// isBlkReqd - true iff this operand must remain a block node
//
// Return Value:
// Returns the morphed block operand
//
// Notes:
// This does the following:
// - Ensures that a struct operand is a block node or lclVar.
// - Ensures that any COMMAs are above ADDR nodes.
// Although 'tree' WAS an operand of a block assignment, the assignment
// may have been retyped to be a scalar assignment.
GenTree* Compiler::fgMorphBlockOperand(GenTree* tree, var_types asgType, unsigned blockWidth, bool isBlkReqd)
{
GenTree* effectiveVal = tree->gtEffectiveVal();
if (asgType != TYP_STRUCT)
{
if (effectiveVal->OperIsIndir())
{
if (!isBlkReqd)
{
GenTree* addr = effectiveVal->AsIndir()->Addr();
if ((addr->OperGet() == GT_ADDR) && (addr->gtGetOp1()->TypeGet() == asgType))
{
effectiveVal = addr->gtGetOp1();
}
else if (effectiveVal->OperIsBlk())
{
effectiveVal->SetOper(GT_IND);
}
}
effectiveVal->gtType = asgType;
}
else if (effectiveVal->TypeGet() != asgType)
{
if (effectiveVal->IsCall())
{
#ifdef DEBUG
GenTreeCall* call = effectiveVal->AsCall();
assert(call->TypeGet() == TYP_STRUCT);
assert(blockWidth == info.compCompHnd->getClassSize(call->gtRetClsHnd));
#endif
}
else
{
GenTree* addr = gtNewOperNode(GT_ADDR, TYP_BYREF, effectiveVal);
effectiveVal = gtNewIndir(asgType, addr);
}
}
}
else
{
GenTreeIndir* indirTree = nullptr;
GenTreeLclVarCommon* lclNode = nullptr;
bool needsIndirection = true;
if (effectiveVal->OperIsIndir())
{
indirTree = effectiveVal->AsIndir();
GenTree* addr = effectiveVal->AsIndir()->Addr();
if ((addr->OperGet() == GT_ADDR) && (addr->gtGetOp1()->OperGet() == GT_LCL_VAR))
{
lclNode = addr->gtGetOp1()->AsLclVarCommon();
}
}
else if (effectiveVal->OperGet() == GT_LCL_VAR)
{
lclNode = effectiveVal->AsLclVarCommon();
}
else if (effectiveVal->IsCall())
{
needsIndirection = false;
#ifdef DEBUG
GenTreeCall* call = effectiveVal->AsCall();
assert(call->TypeGet() == TYP_STRUCT);
assert(blockWidth == info.compCompHnd->getClassSize(call->gtRetClsHnd));
#endif
}
#ifdef TARGET_ARM64
else if (effectiveVal->OperIsHWIntrinsic())
{
needsIndirection = false;
#ifdef DEBUG
GenTreeHWIntrinsic* intrinsic = effectiveVal->AsHWIntrinsic();
assert(intrinsic->TypeGet() == TYP_STRUCT);
assert(HWIntrinsicInfo::IsMultiReg(intrinsic->GetHWIntrinsicId()));
#endif
}
#endif // TARGET_ARM64
if (lclNode != nullptr)
{
const LclVarDsc* varDsc = lvaGetDesc(lclNode);
if (varTypeIsStruct(varDsc) && (varDsc->lvExactSize == blockWidth) && (varDsc->lvType == asgType))
{
if (effectiveVal != lclNode)
{
JITDUMP("Replacing block node [%06d] with lclVar V%02u\n", dspTreeID(tree), lclNode->GetLclNum());
effectiveVal = lclNode;
}
needsIndirection = false;
}
else
{
// This may be a lclVar that was determined to be address-exposed.
effectiveVal->gtFlags |= (lclNode->gtFlags & GTF_ALL_EFFECT);
}
}
if (needsIndirection)
{
if (indirTree != nullptr)
{
// If we have an indirection and a block is required, it should already be a block.
assert(indirTree->OperIsBlk() || !isBlkReqd);
effectiveVal->gtType = asgType;
}
else
{
GenTree* newTree;
GenTree* addr = gtNewOperNode(GT_ADDR, TYP_BYREF, effectiveVal);
if (isBlkReqd)
{
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleIfPresent(effectiveVal);
if (clsHnd == NO_CLASS_HANDLE)
{
newTree = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, addr, typGetBlkLayout(blockWidth));
}
else
{
newTree = gtNewObjNode(clsHnd, addr);
gtSetObjGcInfo(newTree->AsObj());
}
}
else
{
newTree = gtNewIndir(asgType, addr);
}
effectiveVal = newTree;
}
}
}
assert(effectiveVal->TypeIs(asgType) || (varTypeIsSIMD(asgType) && varTypeIsStruct(effectiveVal)));
tree = effectiveVal;
return tree;
}
//------------------------------------------------------------------------
// fgMorphCanUseLclFldForCopy: check if we can access LclVar2 using LclVar1's fields.
//
// Arguments:
// lclNum1 - a promoted lclVar that is used in fieldwise assignment;
// lclNum2 - the local variable on the other side of ASG, can be BAD_VAR_NUM.
//
// Return Value:
// True if the second local is valid and has the same struct handle as the first,
// false otherwise.
//
// Notes:
// This check is needed to avoid accessing LCL_VARs with incorrect
// CORINFO_FIELD_HANDLE that would confuse VN optimizations.
//
bool Compiler::fgMorphCanUseLclFldForCopy(unsigned lclNum1, unsigned lclNum2)
{
assert(lclNum1 != BAD_VAR_NUM);
if (lclNum2 == BAD_VAR_NUM)
{
return false;
}
const LclVarDsc* varDsc1 = lvaGetDesc(lclNum1);
const LclVarDsc* varDsc2 = lvaGetDesc(lclNum2);
assert(varTypeIsStruct(varDsc1));
if (!varTypeIsStruct(varDsc2))
{
return false;
}
CORINFO_CLASS_HANDLE struct1 = varDsc1->GetStructHnd();
CORINFO_CLASS_HANDLE struct2 = varDsc2->GetStructHnd();
assert(struct1 != NO_CLASS_HANDLE);
assert(struct2 != NO_CLASS_HANDLE);
if (struct1 != struct2)
{
return false;
}
return true;
}
// insert conversions and normalize to make tree amenable to register
// FP architectures
GenTree* Compiler::fgMorphForRegisterFP(GenTree* tree)
{
if (tree->OperIsArithmetic())
{
if (varTypeIsFloating(tree))
{
GenTree* op1 = tree->AsOp()->gtOp1;
GenTree* op2 = tree->gtGetOp2();
assert(varTypeIsFloating(op1->TypeGet()) && varTypeIsFloating(op2->TypeGet()));
if (op1->TypeGet() != tree->TypeGet())
{
tree->AsOp()->gtOp1 = gtNewCastNode(tree->TypeGet(), op1, false, tree->TypeGet());
}
if (op2->TypeGet() != tree->TypeGet())
{
tree->AsOp()->gtOp2 = gtNewCastNode(tree->TypeGet(), op2, false, tree->TypeGet());
}
}
}
else if (tree->OperIsCompare())
{
GenTree* op1 = tree->AsOp()->gtOp1;
if (varTypeIsFloating(op1))
{
GenTree* op2 = tree->gtGetOp2();
assert(varTypeIsFloating(op2));
if (op1->TypeGet() != op2->TypeGet())
{
// both had better be floating, just one bigger than other
if (op1->TypeGet() == TYP_FLOAT)
{
assert(op2->TypeGet() == TYP_DOUBLE);
tree->AsOp()->gtOp1 = gtNewCastNode(TYP_DOUBLE, op1, false, TYP_DOUBLE);
}
else if (op2->TypeGet() == TYP_FLOAT)
{
assert(op1->TypeGet() == TYP_DOUBLE);
tree->AsOp()->gtOp2 = gtNewCastNode(TYP_DOUBLE, op2, false, TYP_DOUBLE);
}
}
}
}
return tree;
}
#ifdef FEATURE_SIMD
//--------------------------------------------------------------------------------------------------------------
// getSIMDStructFromField:
// Checking whether the field belongs to a simd struct or not. If it is, return the GenTree* for
// the struct node, also base type, field index and simd size. If it is not, just return nullptr.
// Usually if the tree node is from a simd lclvar which is not used in any SIMD intrinsic, then we
// should return nullptr, since in this case we should treat SIMD struct as a regular struct.
// However if no matter what, you just want get simd struct node, you can set the ignoreUsedInSIMDIntrinsic
// as true. Then there will be no IsUsedInSIMDIntrinsic checking, and it will return SIMD struct node
// if the struct is a SIMD struct.
//
// Arguments:
// tree - GentreePtr. This node will be checked to see this is a field which belongs to a simd
// struct used for simd intrinsic or not.
// simdBaseJitTypeOut - CorInfoType pointer, if the tree node is the tree we want, we set *simdBaseJitTypeOut
// to simd lclvar's base JIT type.
// indexOut - unsigned pointer, if the tree is used for simd intrinsic, we will set *indexOut
// equals to the index number of this field.
// simdSizeOut - unsigned pointer, if the tree is used for simd intrinsic, set the *simdSizeOut
// equals to the simd struct size which this tree belongs to.
// ignoreUsedInSIMDIntrinsic - bool. If this is set to true, then this function will ignore
// the UsedInSIMDIntrinsic check.
//
// return value:
// A GenTree* which points the simd lclvar tree belongs to. If the tree is not the simd
// instrinic related field, return nullptr.
//
GenTree* Compiler::getSIMDStructFromField(GenTree* tree,
CorInfoType* simdBaseJitTypeOut,
unsigned* indexOut,
unsigned* simdSizeOut,
bool ignoreUsedInSIMDIntrinsic /*false*/)
{
GenTree* ret = nullptr;
if (tree->OperGet() == GT_FIELD)
{
GenTree* objRef = tree->AsField()->GetFldObj();
if (objRef != nullptr)
{
GenTree* obj = nullptr;
if (objRef->gtOper == GT_ADDR)
{
obj = objRef->AsOp()->gtOp1;
}
else if (ignoreUsedInSIMDIntrinsic)
{
obj = objRef;
}
else
{
return nullptr;
}
if (isSIMDTypeLocal(obj))
{
LclVarDsc* varDsc = lvaGetDesc(obj->AsLclVarCommon());
if (varDsc->lvIsUsedInSIMDIntrinsic() || ignoreUsedInSIMDIntrinsic)
{
*simdSizeOut = varDsc->lvExactSize;
*simdBaseJitTypeOut = getBaseJitTypeOfSIMDLocal(obj);
ret = obj;
}
}
else if (obj->OperGet() == GT_SIMD)
{
ret = obj;
GenTreeSIMD* simdNode = obj->AsSIMD();
*simdSizeOut = simdNode->GetSimdSize();
*simdBaseJitTypeOut = simdNode->GetSimdBaseJitType();
}
#ifdef FEATURE_HW_INTRINSICS
else if (obj->OperIsHWIntrinsic())
{
ret = obj;
GenTreeHWIntrinsic* simdNode = obj->AsHWIntrinsic();
*simdSizeOut = simdNode->GetSimdSize();
*simdBaseJitTypeOut = simdNode->GetSimdBaseJitType();
}
#endif // FEATURE_HW_INTRINSICS
}
}
if (ret != nullptr)
{
var_types fieldType = tree->TypeGet();
if (fieldType == TYP_LONG)
{
// Vector2/3/4 expose public float fields while Vector<T>
// and Vector64/128/256<T> have internal ulong fields. So
// we should only ever encounter accesses for TYP_FLOAT or
// TYP_LONG and in the case of the latter we don't want the
// generic type since we are executing some algorithm on the
// raw underlying bits instead.
*simdBaseJitTypeOut = CORINFO_TYPE_ULONG;
}
else
{
assert(fieldType == TYP_FLOAT);
}
unsigned baseTypeSize = genTypeSize(JITtype2varType(*simdBaseJitTypeOut));
*indexOut = tree->AsField()->gtFldOffset / baseTypeSize;
}
return ret;
}
/*****************************************************************************
* If a read operation tries to access simd struct field, then transform the operation
* to the SimdGetElementNode, and return the new tree. Otherwise, return the old tree.
* Argument:
* tree - GenTree*. If this pointer points to simd struct which is used for simd
* intrinsic, we will morph it as simd intrinsic NI_Vector128_GetElement.
* Return:
* A GenTree* which points to the new tree. If the tree is not for simd intrinsic,
* return nullptr.
*/
GenTree* Compiler::fgMorphFieldToSimdGetElement(GenTree* tree)
{
unsigned index = 0;
CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF;
unsigned simdSize = 0;
GenTree* simdStructNode = getSIMDStructFromField(tree, &simdBaseJitType, &index, &simdSize);
if (simdStructNode != nullptr)
{
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
GenTree* op2 = gtNewIconNode(index, TYP_INT);
assert(simdSize <= 16);
assert(simdSize >= ((index + 1) * genTypeSize(simdBaseType)));
#if defined(TARGET_XARCH)
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
case TYP_INT:
case TYP_UINT:
case TYP_LONG:
case TYP_ULONG:
{
if (!compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
return tree;
}
break;
}
case TYP_DOUBLE:
case TYP_FLOAT:
case TYP_SHORT:
case TYP_USHORT:
{
if (!compOpportunisticallyDependsOn(InstructionSet_SSE2))
{
return tree;
}
break;
}
default:
{
unreached();
}
}
#elif defined(TARGET_ARM64)
if (!compOpportunisticallyDependsOn(InstructionSet_AdvSimd))
{
return tree;
}
#endif // !TARGET_XARCH && !TARGET_ARM64
tree = gtNewSimdGetElementNode(simdBaseType, simdStructNode, op2, simdBaseJitType, simdSize,
/* isSimdAsHWIntrinsic */ true);
}
return tree;
}
/*****************************************************************************
* Transform an assignment of a SIMD struct field to SimdWithElementNode, and
* return a new tree. If it is not such an assignment, then return the old tree.
* Argument:
* tree - GenTree*. If this pointer points to simd struct which is used for simd
* intrinsic, we will morph it as simd intrinsic set.
* Return:
* A GenTree* which points to the new tree. If the tree is not for simd intrinsic,
* return nullptr.
*/
GenTree* Compiler::fgMorphFieldAssignToSimdSetElement(GenTree* tree)
{
assert(tree->OperGet() == GT_ASG);
unsigned index = 0;
CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF;
unsigned simdSize = 0;
GenTree* simdStructNode = getSIMDStructFromField(tree->gtGetOp1(), &simdBaseJitType, &index, &simdSize);
if (simdStructNode != nullptr)
{
var_types simdType = simdStructNode->gtType;
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(simdSize <= 16);
assert(simdSize >= ((index + 1) * genTypeSize(simdBaseType)));
GenTree* op2 = gtNewIconNode(index, TYP_INT);
GenTree* op3 = tree->gtGetOp2();
NamedIntrinsic intrinsicId = NI_Vector128_WithElement;
GenTree* target = gtClone(simdStructNode);
assert(target != nullptr);
GenTree* simdTree = gtNewSimdWithElementNode(simdType, simdStructNode, op2, op3, simdBaseJitType, simdSize,
/* isSimdAsHWIntrinsic */ true);
tree->AsOp()->gtOp1 = target;
tree->AsOp()->gtOp2 = simdTree;
// fgMorphTree has already called fgMorphImplicitByRefArgs() on this assignment, but the source
// and target have not yet been morphed.
// Therefore, in case the source and/or target are now implicit byrefs, we need to call it again.
if (fgMorphImplicitByRefArgs(tree))
{
if (tree->gtGetOp1()->OperIsBlk())
{
assert(tree->gtGetOp1()->TypeGet() == simdType);
tree->gtGetOp1()->SetOper(GT_IND);
tree->gtGetOp1()->gtType = simdType;
}
}
#ifdef DEBUG
tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
}
return tree;
}
#endif // FEATURE_SIMD
//------------------------------------------------------------------------------
// fgMorphCommutative : Try to simplify "(X op C1) op C2" to "X op C3"
// for commutative operators.
//
// Arguments:
// tree - node to fold
//
// return value:
// A folded GenTree* instance or nullptr if something prevents folding.
//
GenTreeOp* Compiler::fgMorphCommutative(GenTreeOp* tree)
{
assert(varTypeIsIntegralOrI(tree->TypeGet()));
assert(tree->OperIs(GT_ADD, GT_MUL, GT_OR, GT_AND, GT_XOR));
// op1 can be GT_COMMA, in this case we're going to fold
// "(op (COMMA(... (op X C1))) C2)" to "(COMMA(... (op X C3)))"
GenTree* op1 = tree->gtGetOp1()->gtEffectiveVal(true);
genTreeOps oper = tree->OperGet();
if (!op1->OperIs(oper) || !tree->gtGetOp2()->IsCnsIntOrI() || !op1->gtGetOp2()->IsCnsIntOrI() ||
op1->gtGetOp1()->IsCnsIntOrI())
{
return nullptr;
}
if (!fgGlobalMorph && (op1 != tree->gtGetOp1()))
{
// Since 'tree->gtGetOp1()' can have complex structure (e.g. COMMA(..(COMMA(..,op1)))
// don't run the optimization for such trees outside of global morph.
// Otherwise, there is a chance of violating VNs invariants and/or modifying a tree
// that is an active CSE candidate.
return nullptr;
}
if (gtIsActiveCSE_Candidate(tree) || gtIsActiveCSE_Candidate(op1))
{
// The optimization removes 'tree' from IR and changes the value of 'op1'.
return nullptr;
}
if (tree->OperMayOverflow() && (tree->gtOverflow() || op1->gtOverflow()))
{
return nullptr;
}
GenTreeIntCon* cns1 = op1->gtGetOp2()->AsIntCon();
GenTreeIntCon* cns2 = tree->gtGetOp2()->AsIntCon();
if (!varTypeIsIntegralOrI(tree->TypeGet()) || cns1->TypeIs(TYP_REF) || !cns1->TypeIs(cns2->TypeGet()))
{
return nullptr;
}
if (gtIsActiveCSE_Candidate(cns1) || gtIsActiveCSE_Candidate(cns2))
{
// The optimization removes 'cns2' from IR and changes the value of 'cns1'.
return nullptr;
}
GenTree* folded = gtFoldExprConst(gtNewOperNode(oper, cns1->TypeGet(), cns1, cns2));
if (!folded->IsCnsIntOrI())
{
// Give up if we can't fold "C1 op C2"
return nullptr;
}
auto foldedCns = folded->AsIntCon();
cns1->SetIconValue(foldedCns->IconValue());
cns1->SetVNsFromNode(foldedCns);
cns1->gtFieldSeq = foldedCns->gtFieldSeq;
op1 = tree->gtGetOp1();
op1->SetVNsFromNode(tree);
DEBUG_DESTROY_NODE(tree);
DEBUG_DESTROY_NODE(cns2);
DEBUG_DESTROY_NODE(foldedCns);
INDEBUG(cns1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
return op1->AsOp();
}
//------------------------------------------------------------------------------
// fgMorphCastedBitwiseOp : Try to simplify "(T)x op (T)y" to "(T)(x op y)".
//
// Arguments:
// tree - node to fold
//
// Return Value:
// A folded GenTree* instance, or nullptr if it couldn't be folded
GenTree* Compiler::fgMorphCastedBitwiseOp(GenTreeOp* tree)
{
// This transform does not preserve VNs and deletes a node.
assert(fgGlobalMorph);
assert(varTypeIsIntegralOrI(tree));
assert(tree->OperIs(GT_OR, GT_AND, GT_XOR));
GenTree* op1 = tree->gtGetOp1();
GenTree* op2 = tree->gtGetOp2();
genTreeOps oper = tree->OperGet();
// see whether both ops are casts, with matching to and from types.
if (op1->OperIs(GT_CAST) && op2->OperIs(GT_CAST))
{
// bail if either operand is a checked cast
if (op1->gtOverflow() || op2->gtOverflow())
{
return nullptr;
}
var_types fromType = op1->AsCast()->CastOp()->TypeGet();
var_types toType = op1->AsCast()->CastToType();
bool isUnsigned = op1->IsUnsigned();
if (varTypeIsFloating(fromType) || (op2->CastFromType() != fromType) || (op2->CastToType() != toType) ||
(op2->IsUnsigned() != isUnsigned))
{
return nullptr;
}
/*
// Reuse gentree nodes:
//
// tree op1
// / \ |
// op1 op2 ==> tree
// | | / \.
// x y x y
//
// (op2 becomes garbage)
*/
tree->gtOp1 = op1->AsCast()->CastOp();
tree->gtOp2 = op2->AsCast()->CastOp();
tree->gtType = genActualType(fromType);
op1->gtType = genActualType(toType);
op1->AsCast()->gtOp1 = tree;
op1->AsCast()->CastToType() = toType;
op1->SetAllEffectsFlags(tree);
// no need to update isUnsigned
DEBUG_DESTROY_NODE(op2);
INDEBUG(op1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
return op1;
}
return nullptr;
}
/*****************************************************************************
*
* Transform the given GTK_SMPOP tree for code generation.
*/
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac)
{
ALLOCA_CHECK();
assert(tree->OperKind() & GTK_SMPOP);
/* The steps in this function are :
o Perform required preorder processing
o Process the first, then second operand, if any
o Perform required postorder morphing
o Perform optional postorder morphing if optimizing
*/
bool isQmarkColon = false;
AssertionIndex origAssertionCount = DUMMY_INIT(0);
AssertionDsc* origAssertionTab = DUMMY_INIT(NULL);
AssertionIndex thenAssertionCount = DUMMY_INIT(0);
AssertionDsc* thenAssertionTab = DUMMY_INIT(NULL);
if (fgGlobalMorph)
{
tree = fgMorphForRegisterFP(tree);
}
genTreeOps oper = tree->OperGet();
var_types typ = tree->TypeGet();
GenTree* op1 = tree->AsOp()->gtOp1;
GenTree* op2 = tree->gtGetOp2IfPresent();
/*-------------------------------------------------------------------------
* First do any PRE-ORDER processing
*/
switch (oper)
{
// Some arithmetic operators need to use a helper call to the EE
int helper;
case GT_ASG:
tree = fgDoNormalizeOnStore(tree);
/* fgDoNormalizeOnStore can change op2 */
noway_assert(op1 == tree->AsOp()->gtOp1);
op2 = tree->AsOp()->gtOp2;
#ifdef FEATURE_SIMD
if (IsBaselineSimdIsaSupported())
{
// We should check whether op2 should be assigned to a SIMD field or not.
// If it is, we should tranlate the tree to simd intrinsic.
assert(!fgGlobalMorph || ((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) == 0));
GenTree* newTree = fgMorphFieldAssignToSimdSetElement(tree);
typ = tree->TypeGet();
op1 = tree->gtGetOp1();
op2 = tree->gtGetOp2();
#ifdef DEBUG
assert((tree == newTree) && (tree->OperGet() == oper));
if ((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) != 0)
{
tree->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED;
}
#endif // DEBUG
}
#endif
// We can't CSE the LHS of an assignment. Only r-values can be CSEed.
// Previously, the "lhs" (addr) of a block op was CSE'd. So, to duplicate the former
// behavior, allow CSE'ing if is a struct type (or a TYP_REF transformed from a struct type)
// TODO-1stClassStructs: improve this.
if (op1->IsLocal() || (op1->TypeGet() != TYP_STRUCT))
{
op1->gtFlags |= GTF_DONT_CSE;
}
break;
case GT_ADDR:
/* op1 of a GT_ADDR is an l-value. Only r-values can be CSEed */
op1->gtFlags |= GTF_DONT_CSE;
break;
case GT_QMARK:
case GT_JTRUE:
noway_assert(op1);
if (op1->OperIsCompare())
{
/* Mark the comparison node with GTF_RELOP_JMP_USED so it knows that it does
not need to materialize the result as a 0 or 1. */
/* We also mark it as DONT_CSE, as we don't handle QMARKs with nonRELOP op1s */
op1->gtFlags |= (GTF_RELOP_JMP_USED | GTF_DONT_CSE);
// Request that the codegen for op1 sets the condition flags
// when it generates the code for op1.
//
// Codegen for op1 must set the condition flags if
// this method returns true.
//
op1->gtRequestSetFlags();
}
else
{
GenTree* effOp1 = op1->gtEffectiveVal();
noway_assert((effOp1->gtOper == GT_CNS_INT) &&
(effOp1->IsIntegralConst(0) || effOp1->IsIntegralConst(1)));
}
break;
case GT_COLON:
if (optLocalAssertionProp)
{
isQmarkColon = true;
}
break;
case GT_FIELD:
return fgMorphField(tree, mac);
case GT_INDEX:
return fgMorphArrayIndex(tree);
case GT_CAST:
{
GenTree* morphedCast = fgMorphExpandCast(tree->AsCast());
if (morphedCast != nullptr)
{
return morphedCast;
}
op1 = tree->AsCast()->CastOp();
}
break;
case GT_MUL:
noway_assert(op2 != nullptr);
if (opts.OptimizationEnabled() && !optValnumCSE_phase && !tree->gtOverflow())
{
// MUL(NEG(a), C) => MUL(a, NEG(C))
if (op1->OperIs(GT_NEG) && !op1->gtGetOp1()->IsCnsIntOrI() && op2->IsCnsIntOrI() &&
!op2->IsIconHandle())
{
GenTree* newOp1 = op1->gtGetOp1();
GenTree* newConst = gtNewIconNode(-op2->AsIntCon()->IconValue(), op2->TypeGet());
DEBUG_DESTROY_NODE(op1);
DEBUG_DESTROY_NODE(op2);
tree->AsOp()->gtOp1 = newOp1;
tree->AsOp()->gtOp2 = newConst;
return fgMorphSmpOp(tree, mac);
}
}
#ifndef TARGET_64BIT
if (typ == TYP_LONG)
{
// For (long)int1 * (long)int2, we dont actually do the
// casts, and just multiply the 32 bit values, which will
// give us the 64 bit result in edx:eax.
if (tree->Is64RsltMul())
{
// We are seeing this node again.
// Morph only the children of casts,
// so as to avoid losing them.
tree = fgMorphLongMul(tree->AsOp());
goto DONE_MORPHING_CHILDREN;
}
tree = fgRecognizeAndMorphLongMul(tree->AsOp());
op1 = tree->AsOp()->gtGetOp1();
op2 = tree->AsOp()->gtGetOp2();
if (tree->Is64RsltMul())
{
goto DONE_MORPHING_CHILDREN;
}
else
{
if (tree->gtOverflow())
helper = tree->IsUnsigned() ? CORINFO_HELP_ULMUL_OVF : CORINFO_HELP_LMUL_OVF;
else
helper = CORINFO_HELP_LMUL;
goto USE_HELPER_FOR_ARITH;
}
}
#endif // !TARGET_64BIT
break;
case GT_ARR_LENGTH:
if (op1->OperIs(GT_CNS_STR))
{
// Optimize `ldstr + String::get_Length()` to CNS_INT
// e.g. "Hello".Length => 5
GenTreeIntCon* iconNode = gtNewStringLiteralLength(op1->AsStrCon());
if (iconNode != nullptr)
{
INDEBUG(iconNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
return iconNode;
}
}
break;
case GT_DIV:
// Replace "val / dcon" with "val * (1.0 / dcon)" if dcon is a power of two.
// Powers of two within range are always exactly represented,
// so multiplication by the reciprocal is safe in this scenario
if (fgGlobalMorph && op2->IsCnsFltOrDbl())
{
double divisor = op2->AsDblCon()->gtDconVal;
if (((typ == TYP_DOUBLE) && FloatingPointUtils::hasPreciseReciprocal(divisor)) ||
((typ == TYP_FLOAT) && FloatingPointUtils::hasPreciseReciprocal(forceCastToFloat(divisor))))
{
oper = GT_MUL;
tree->ChangeOper(oper);
op2->AsDblCon()->gtDconVal = 1.0 / divisor;
}
}
// Convert DIV to UDIV if boths op1 and op2 are known to be never negative
if (!gtIsActiveCSE_Candidate(tree) && varTypeIsIntegral(tree) && op1->IsNeverNegative(this) &&
op2->IsNeverNegative(this))
{
assert(tree->OperIs(GT_DIV));
tree->ChangeOper(GT_UDIV, GenTree::PRESERVE_VN);
return fgMorphSmpOp(tree, mac);
}
#ifndef TARGET_64BIT
if (typ == TYP_LONG)
{
helper = CORINFO_HELP_LDIV;
goto USE_HELPER_FOR_ARITH;
}
#if USE_HELPERS_FOR_INT_DIV
if (typ == TYP_INT)
{
helper = CORINFO_HELP_DIV;
goto USE_HELPER_FOR_ARITH;
}
#endif
#endif // !TARGET_64BIT
break;
case GT_UDIV:
#ifndef TARGET_64BIT
if (typ == TYP_LONG)
{
helper = CORINFO_HELP_ULDIV;
goto USE_HELPER_FOR_ARITH;
}
#if USE_HELPERS_FOR_INT_DIV
if (typ == TYP_INT)
{
helper = CORINFO_HELP_UDIV;
goto USE_HELPER_FOR_ARITH;
}
#endif
#endif // TARGET_64BIT
break;
case GT_MOD:
if (varTypeIsFloating(typ))
{
helper = CORINFO_HELP_DBLREM;
noway_assert(op2);
if (op1->TypeGet() == TYP_FLOAT)
{
if (op2->TypeGet() == TYP_FLOAT)
{
helper = CORINFO_HELP_FLTREM;
}
else
{
tree->AsOp()->gtOp1 = op1 = gtNewCastNode(TYP_DOUBLE, op1, false, TYP_DOUBLE);
}
}
else if (op2->TypeGet() == TYP_FLOAT)
{
tree->AsOp()->gtOp2 = op2 = gtNewCastNode(TYP_DOUBLE, op2, false, TYP_DOUBLE);
}
goto USE_HELPER_FOR_ARITH;
}
// Convert MOD to UMOD if boths op1 and op2 are known to be never negative
if (!gtIsActiveCSE_Candidate(tree) && varTypeIsIntegral(tree) && op1->IsNeverNegative(this) &&
op2->IsNeverNegative(this))
{
assert(tree->OperIs(GT_MOD));
tree->ChangeOper(GT_UMOD, GenTree::PRESERVE_VN);
return fgMorphSmpOp(tree, mac);
}
// Do not use optimizations (unlike UMOD's idiv optimizing during codegen) for signed mod.
// A similar optimization for signed mod will not work for a negative perfectly divisible
// HI-word. To make it correct, we would need to divide without the sign and then flip the
// result sign after mod. This requires 18 opcodes + flow making it not worthy to inline.
goto ASSIGN_HELPER_FOR_MOD;
case GT_UMOD:
#ifdef TARGET_ARMARCH
//
// Note for TARGET_ARMARCH we don't have a remainder instruction, so we don't do this optimization
//
#else // TARGET_XARCH
// If this is an unsigned long mod with a constant divisor,
// then don't morph to a helper call - it can be done faster inline using idiv.
noway_assert(op2);
if ((typ == TYP_LONG) && opts.OptEnabled(CLFLG_CONSTANTFOLD))
{
if (op2->OperIs(GT_CNS_NATIVELONG) && op2->AsIntConCommon()->LngValue() >= 2 &&
op2->AsIntConCommon()->LngValue() <= 0x3fffffff)
{
tree->AsOp()->gtOp1 = op1 = fgMorphTree(op1);
noway_assert(op1->TypeIs(TYP_LONG));
// Update flags for op1 morph.
tree->gtFlags &= ~GTF_ALL_EFFECT;
// Only update with op1 as op2 is a constant.
tree->gtFlags |= (op1->gtFlags & GTF_ALL_EFFECT);
// If op1 is a constant, then do constant folding of the division operator.
if (op1->OperIs(GT_CNS_NATIVELONG))
{
tree = gtFoldExpr(tree);
}
if (!tree->OperIsConst())
{
tree->AsOp()->CheckDivideByConstOptimized(this);
}
return tree;
}
}
#endif // TARGET_XARCH
ASSIGN_HELPER_FOR_MOD:
// For "val % 1", return 0 if op1 doesn't have any side effects
// and we are not in the CSE phase, we cannot discard 'tree'
// because it may contain CSE expressions that we haven't yet examined.
//
if (((op1->gtFlags & GTF_SIDE_EFFECT) == 0) && !optValnumCSE_phase)
{
if (op2->IsIntegralConst(1))
{
GenTree* zeroNode = gtNewZeroConNode(typ);
#ifdef DEBUG
zeroNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
DEBUG_DESTROY_NODE(tree);
return zeroNode;
}
}
#ifndef TARGET_64BIT
if (typ == TYP_LONG)
{
helper = (oper == GT_UMOD) ? CORINFO_HELP_ULMOD : CORINFO_HELP_LMOD;
goto USE_HELPER_FOR_ARITH;
}
#if USE_HELPERS_FOR_INT_DIV
if (typ == TYP_INT)
{
if (oper == GT_UMOD)
{
helper = CORINFO_HELP_UMOD;
goto USE_HELPER_FOR_ARITH;
}
else if (oper == GT_MOD)
{
helper = CORINFO_HELP_MOD;
goto USE_HELPER_FOR_ARITH;
}
}
#endif
#endif // !TARGET_64BIT
if (!optValnumCSE_phase)
{
#ifdef TARGET_ARM64
if (tree->OperIs(GT_UMOD) && op2->IsIntegralConstUnsignedPow2())
{
// Transformation: a % b = a & (b - 1);
tree = fgMorphUModToAndSub(tree->AsOp());
op1 = tree->AsOp()->gtOp1;
op2 = tree->AsOp()->gtOp2;
}
// ARM64 architecture manual suggests this transformation
// for the mod operator.
else
#else
// XARCH only applies this transformation if we know
// that magic division will be used - which is determined
// when 'b' is not a power of 2 constant and mod operator is signed.
// Lowering for XARCH does this optimization already,
// but is also done here to take advantage of CSE.
if (tree->OperIs(GT_MOD) && op2->IsIntegralConst() && !op2->IsIntegralConstAbsPow2())
#endif
{
// Transformation: a % b = a - (a / b) * b;
tree = fgMorphModToSubMulDiv(tree->AsOp());
op1 = tree->AsOp()->gtOp1;
op2 = tree->AsOp()->gtOp2;
}
}
break;
USE_HELPER_FOR_ARITH:
{
// TODO: this comment is wrong now, do an appropriate fix.
/* We have to morph these arithmetic operations into helper calls
before morphing the arguments (preorder), else the arguments
won't get correct values of fgPtrArgCntCur.
However, try to fold the tree first in case we end up with a
simple node which won't need a helper call at all */
noway_assert(tree->OperIsBinary());
GenTree* oldTree = tree;
tree = gtFoldExpr(tree);
// Were we able to fold it ?
// Note that gtFoldExpr may return a non-leaf even if successful
// e.g. for something like "expr / 1" - see also bug #290853
if (tree->OperIsLeaf() || (oldTree != tree))
{
return (oldTree != tree) ? fgMorphTree(tree) : fgMorphLeaf(tree);
}
// Did we fold it into a comma node with throw?
if (tree->gtOper == GT_COMMA)
{
noway_assert(fgIsCommaThrow(tree));
return fgMorphTree(tree);
}
}
return fgMorphIntoHelperCall(tree, helper, gtNewCallArgs(op1, op2));
case GT_RETURN:
if (!tree->TypeIs(TYP_VOID))
{
if (op1->OperIs(GT_OBJ, GT_BLK, GT_IND))
{
op1 = fgMorphRetInd(tree->AsUnOp());
}
if (op1->OperIs(GT_LCL_VAR))
{
// With a `genReturnBB` this `RETURN(src)` tree will be replaced by a `ASG(genReturnLocal, src)`
// and `ASG` will be tranformed into field by field copy without parent local referencing if
// possible.
GenTreeLclVar* lclVar = op1->AsLclVar();
unsigned lclNum = lclVar->GetLclNum();
if ((genReturnLocal == BAD_VAR_NUM) || (genReturnLocal == lclNum))
{
LclVarDsc* varDsc = lvaGetDesc(lclVar);
if (varDsc->CanBeReplacedWithItsField(this))
{
// We can replace the struct with its only field and allow copy propagation to replace
// return value that was written as a field.
unsigned fieldLclNum = varDsc->lvFieldLclStart;
LclVarDsc* fieldDsc = lvaGetDesc(fieldLclNum);
JITDUMP("Replacing an independently promoted local var V%02u with its only field "
"V%02u for "
"the return [%06u]\n",
lclVar->GetLclNum(), fieldLclNum, dspTreeID(tree));
lclVar->SetLclNum(fieldLclNum);
lclVar->ChangeType(fieldDsc->lvType);
}
}
}
}
// normalize small integer return values
if (fgGlobalMorph && varTypeIsSmall(info.compRetType) && (op1 != nullptr) && !op1->TypeIs(TYP_VOID) &&
fgCastNeeded(op1, info.compRetType))
{
// Small-typed return values are normalized by the callee
op1 = gtNewCastNode(TYP_INT, op1, false, info.compRetType);
// Propagate GTF_COLON_COND
op1->gtFlags |= (tree->gtFlags & GTF_COLON_COND);
tree->AsOp()->gtOp1 = fgMorphTree(op1);
// Propagate side effect flags
tree->SetAllEffectsFlags(tree->AsOp()->gtGetOp1());
return tree;
}
break;
case GT_EQ:
case GT_NE:
{
GenTree* optimizedTree = gtFoldTypeCompare(tree);
if (optimizedTree != tree)
{
return fgMorphTree(optimizedTree);
}
// Pattern-matching optimization:
// (a % c) ==/!= 0
// for power-of-2 constant `c`
// =>
// a & (c - 1) ==/!= 0
// For integer `a`, even if negative.
if (opts.OptimizationEnabled() && !optValnumCSE_phase)
{
assert(tree->OperIs(GT_EQ, GT_NE));
if (op1->OperIs(GT_MOD) && varTypeIsIntegral(op1) && op2->IsIntegralConst(0))
{
GenTree* op1op2 = op1->AsOp()->gtOp2;
if (op1op2->IsCnsIntOrI())
{
const ssize_t modValue = op1op2->AsIntCon()->IconValue();
if (isPow2(modValue))
{
JITDUMP("\nTransforming:\n");
DISPTREE(tree);
op1->SetOper(GT_AND); // Change % => &
op1op2->AsIntConCommon()->SetIconValue(modValue - 1); // Change c => c - 1
fgUpdateConstTreeValueNumber(op1op2);
JITDUMP("\ninto:\n");
DISPTREE(tree);
}
}
}
}
}
FALLTHROUGH;
case GT_GT:
{
// Try and optimize nullable boxes feeding compares
GenTree* optimizedTree = gtFoldBoxNullable(tree);
if (optimizedTree->OperGet() != tree->OperGet())
{
return optimizedTree;
}
else
{
tree = optimizedTree;
}
op1 = tree->AsOp()->gtOp1;
op2 = tree->gtGetOp2IfPresent();
break;
}
case GT_RUNTIMELOOKUP:
return fgMorphTree(op1);
#ifdef TARGET_ARM
case GT_INTRINSIC:
if (tree->AsIntrinsic()->gtIntrinsicName == NI_System_Math_Round)
{
switch (tree->TypeGet())
{
case TYP_DOUBLE:
return fgMorphIntoHelperCall(tree, CORINFO_HELP_DBLROUND, gtNewCallArgs(op1));
case TYP_FLOAT:
return fgMorphIntoHelperCall(tree, CORINFO_HELP_FLTROUND, gtNewCallArgs(op1));
default:
unreached();
}
}
break;
#endif
case GT_PUTARG_TYPE:
return fgMorphTree(tree->AsUnOp()->gtGetOp1());
case GT_NULLCHECK:
{
op1 = tree->AsUnOp()->gtGetOp1();
if (op1->IsCall())
{
GenTreeCall* const call = op1->AsCall();
if (call->IsHelperCall() && s_helperCallProperties.NonNullReturn(eeGetHelperNum(call->gtCallMethHnd)))
{
JITDUMP("\nNULLCHECK on [%06u] will always succeed\n", dspTreeID(call));
// TODO: Can we also remove the call?
//
return fgMorphTree(call);
}
}
}
break;
default:
break;
}
if (opts.OptimizationEnabled() && fgGlobalMorph)
{
GenTree* morphed = fgMorphReduceAddOps(tree);
if (morphed != tree)
return fgMorphTree(morphed);
}
/*-------------------------------------------------------------------------
* Process the first operand, if any
*/
if (op1)
{
// If we are entering the "then" part of a Qmark-Colon we must
// save the state of the current copy assignment table
// so that we can restore this state when entering the "else" part
if (isQmarkColon)
{
noway_assert(optLocalAssertionProp);
if (optAssertionCount)
{
noway_assert(optAssertionCount <= optMaxAssertionCount); // else ALLOCA() is a bad idea
unsigned tabSize = optAssertionCount * sizeof(AssertionDsc);
origAssertionTab = (AssertionDsc*)ALLOCA(tabSize);
origAssertionCount = optAssertionCount;
memcpy(origAssertionTab, optAssertionTabPrivate, tabSize);
}
else
{
origAssertionCount = 0;
origAssertionTab = nullptr;
}
}
// We might need a new MorphAddressContext context. (These are used to convey
// parent context about how addresses being calculated will be used; see the
// specification comment for MorphAddrContext for full details.)
// Assume it's an Ind context to start.
MorphAddrContext subIndMac1(MACK_Ind);
MorphAddrContext* subMac1 = mac;
if (subMac1 == nullptr || subMac1->m_kind == MACK_Ind)
{
switch (tree->gtOper)
{
case GT_ADDR:
// A non-null mac here implies this node is part of an address computation.
// If so, we need to pass the existing mac down to the child node.
//
// Otherwise, use a new mac.
if (subMac1 == nullptr)
{
subMac1 = &subIndMac1;
subMac1->m_kind = MACK_Addr;
}
break;
case GT_COMMA:
// In a comma, the incoming context only applies to the rightmost arg of the
// comma list. The left arg (op1) gets a fresh context.
subMac1 = nullptr;
break;
case GT_OBJ:
case GT_BLK:
case GT_IND:
// A non-null mac here implies this node is part of an address computation (the tree parent is
// GT_ADDR).
// If so, we need to pass the existing mac down to the child node.
//
// Otherwise, use a new mac.
if (subMac1 == nullptr)
{
subMac1 = &subIndMac1;
}
break;
default:
break;
}
}
// For additions, if we're in an IND context keep track of whether
// all offsets added to the address are constant, and their sum.
if (tree->gtOper == GT_ADD && subMac1 != nullptr)
{
assert(subMac1->m_kind == MACK_Ind || subMac1->m_kind == MACK_Addr); // Can't be a CopyBlock.
GenTree* otherOp = tree->AsOp()->gtOp2;
// Is the other operator a constant?
if (otherOp->IsCnsIntOrI())
{
ClrSafeInt<size_t> totalOffset(subMac1->m_totalOffset);
totalOffset += otherOp->AsIntConCommon()->IconValue();
if (totalOffset.IsOverflow())
{
// We will consider an offset so large as to overflow as "not a constant" --
// we will do a null check.
subMac1->m_allConstantOffsets = false;
}
else
{
subMac1->m_totalOffset += otherOp->AsIntConCommon()->IconValue();
}
}
else
{
subMac1->m_allConstantOffsets = false;
}
}
// If op1 is a GT_FIELD or indir, we need to pass down the mac if
// its parent is GT_ADDR, since the address of op1
// is part of an ongoing address computation. Otherwise
// op1 represents the value of the field and so any address
// calculations it does are in a new context.
if (((op1->gtOper == GT_FIELD) || op1->OperIsIndir()) && (tree->gtOper != GT_ADDR))
{
subMac1 = nullptr;
// The impact of op1's value to any ongoing
// address computation is handled below when looking
// at op2.
}
tree->AsOp()->gtOp1 = op1 = fgMorphTree(op1, subMac1);
// If we are exiting the "then" part of a Qmark-Colon we must
// save the state of the current copy assignment table
// so that we can merge this state with the "else" part exit
if (isQmarkColon)
{
noway_assert(optLocalAssertionProp);
if (optAssertionCount)
{
noway_assert(optAssertionCount <= optMaxAssertionCount); // else ALLOCA() is a bad idea
unsigned tabSize = optAssertionCount * sizeof(AssertionDsc);
thenAssertionTab = (AssertionDsc*)ALLOCA(tabSize);
thenAssertionCount = optAssertionCount;
memcpy(thenAssertionTab, optAssertionTabPrivate, tabSize);
}
else
{
thenAssertionCount = 0;
thenAssertionTab = nullptr;
}
}
/* Morphing along with folding and inlining may have changed the
* side effect flags, so we have to reset them
*
* NOTE: Don't reset the exception flags on nodes that may throw */
assert(tree->gtOper != GT_CALL);
if (!tree->OperRequiresCallFlag(this))
{
tree->gtFlags &= ~GTF_CALL;
}
/* Propagate the new flags */
tree->gtFlags |= (op1->gtFlags & GTF_ALL_EFFECT);
// &aliasedVar doesn't need GTF_GLOB_REF, though alisasedVar does
// Similarly for clsVar
if (oper == GT_ADDR && (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CLS_VAR))
{
tree->gtFlags &= ~GTF_GLOB_REF;
}
} // if (op1)
/*-------------------------------------------------------------------------
* Process the second operand, if any
*/
if (op2)
{
// If we are entering the "else" part of a Qmark-Colon we must
// reset the state of the current copy assignment table
if (isQmarkColon)
{
noway_assert(optLocalAssertionProp);
optAssertionReset(0);
if (origAssertionCount)
{
size_t tabSize = origAssertionCount * sizeof(AssertionDsc);
memcpy(optAssertionTabPrivate, origAssertionTab, tabSize);
optAssertionReset(origAssertionCount);
}
}
// We might need a new MorphAddressContext context to use in evaluating op2.
// (These are used to convey parent context about how addresses being calculated
// will be used; see the specification comment for MorphAddrContext for full details.)
// Assume it's an Ind context to start.
switch (tree->gtOper)
{
case GT_ADD:
if (mac != nullptr && mac->m_kind == MACK_Ind)
{
GenTree* otherOp = tree->AsOp()->gtOp1;
// Is the other operator a constant?
if (otherOp->IsCnsIntOrI())
{
mac->m_totalOffset += otherOp->AsIntConCommon()->IconValue();
}
else
{
mac->m_allConstantOffsets = false;
}
}
break;
default:
break;
}
// If op2 is a GT_FIELD or indir, we must be taking its value,
// so it should evaluate its address in a new context.
if ((op2->gtOper == GT_FIELD) || op2->OperIsIndir())
{
// The impact of op2's value to any ongoing
// address computation is handled above when looking
// at op1.
mac = nullptr;
}
tree->AsOp()->gtOp2 = op2 = fgMorphTree(op2, mac);
/* Propagate the side effect flags from op2 */
tree->gtFlags |= (op2->gtFlags & GTF_ALL_EFFECT);
// If we are exiting the "else" part of a Qmark-Colon we must
// merge the state of the current copy assignment table with
// that of the exit of the "then" part.
if (isQmarkColon)
{
noway_assert(optLocalAssertionProp);
// If either exit table has zero entries then
// the merged table also has zero entries
if (optAssertionCount == 0 || thenAssertionCount == 0)
{
optAssertionReset(0);
}
else
{
size_t tabSize = optAssertionCount * sizeof(AssertionDsc);
if ((optAssertionCount != thenAssertionCount) ||
(memcmp(thenAssertionTab, optAssertionTabPrivate, tabSize) != 0))
{
// Yes they are different so we have to find the merged set
// Iterate over the copy asgn table removing any entries
// that do not have an exact match in the thenAssertionTab
AssertionIndex index = 1;
while (index <= optAssertionCount)
{
AssertionDsc* curAssertion = optGetAssertion(index);
for (unsigned j = 0; j < thenAssertionCount; j++)
{
AssertionDsc* thenAssertion = &thenAssertionTab[j];
// Do the left sides match?
if ((curAssertion->op1.lcl.lclNum == thenAssertion->op1.lcl.lclNum) &&
(curAssertion->assertionKind == thenAssertion->assertionKind))
{
// Do the right sides match?
if ((curAssertion->op2.kind == thenAssertion->op2.kind) &&
(curAssertion->op2.lconVal == thenAssertion->op2.lconVal))
{
goto KEEP;
}
else
{
goto REMOVE;
}
}
}
//
// If we fall out of the loop above then we didn't find
// any matching entry in the thenAssertionTab so it must
// have been killed on that path so we remove it here
//
REMOVE:
// The data at optAssertionTabPrivate[i] is to be removed
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (verbose)
{
printf("The QMARK-COLON ");
printTreeID(tree);
printf(" removes assertion candidate #%d\n", index);
}
#endif
optAssertionRemove(index);
continue;
KEEP:
// The data at optAssertionTabPrivate[i] is to be kept
index++;
}
}
}
}
} // if (op2)
#ifndef TARGET_64BIT
DONE_MORPHING_CHILDREN:
#endif // !TARGET_64BIT
if (tree->OperIsIndirOrArrLength())
{
tree->SetIndirExceptionFlags(this);
}
else
{
if (tree->OperMayThrow(this))
{
// Mark the tree node as potentially throwing an exception
tree->gtFlags |= GTF_EXCEPT;
}
else
{
if (((op1 == nullptr) || ((op1->gtFlags & GTF_EXCEPT) == 0)) &&
((op2 == nullptr) || ((op2->gtFlags & GTF_EXCEPT) == 0)))
{
tree->gtFlags &= ~GTF_EXCEPT;
}
}
}
if (tree->OperRequiresAsgFlag())
{
tree->gtFlags |= GTF_ASG;
}
else
{
if (((op1 == nullptr) || ((op1->gtFlags & GTF_ASG) == 0)) &&
((op2 == nullptr) || ((op2->gtFlags & GTF_ASG) == 0)))
{
tree->gtFlags &= ~GTF_ASG;
}
}
if (tree->OperRequiresCallFlag(this))
{
tree->gtFlags |= GTF_CALL;
}
else
{
if (((op1 == nullptr) || ((op1->gtFlags & GTF_CALL) == 0)) &&
((op2 == nullptr) || ((op2->gtFlags & GTF_CALL) == 0)))
{
tree->gtFlags &= ~GTF_CALL;
}
}
/*-------------------------------------------------------------------------
* Now do POST-ORDER processing
*/
if (varTypeIsGC(tree->TypeGet()) && (op1 && !varTypeIsGC(op1->TypeGet())) && (op2 && !varTypeIsGC(op2->TypeGet())))
{
// The tree is really not GC but was marked as such. Now that the
// children have been unmarked, unmark the tree too.
// Remember that GT_COMMA inherits it's type only from op2
if (tree->gtOper == GT_COMMA)
{
tree->gtType = genActualType(op2->TypeGet());
}
else
{
tree->gtType = genActualType(op1->TypeGet());
}
}
GenTree* oldTree = tree;
GenTree* qmarkOp1 = nullptr;
GenTree* qmarkOp2 = nullptr;
if ((tree->OperGet() == GT_QMARK) && (tree->AsOp()->gtOp2->OperGet() == GT_COLON))
{
qmarkOp1 = oldTree->AsOp()->gtOp2->AsOp()->gtOp1;
qmarkOp2 = oldTree->AsOp()->gtOp2->AsOp()->gtOp2;
}
// Try to fold it, maybe we get lucky,
tree = gtFoldExpr(tree);
if (oldTree != tree)
{
/* if gtFoldExpr returned op1 or op2 then we are done */
if ((tree == op1) || (tree == op2) || (tree == qmarkOp1) || (tree == qmarkOp2))
{
return tree;
}
/* If we created a comma-throw tree then we need to morph op1 */
if (fgIsCommaThrow(tree))
{
tree->AsOp()->gtOp1 = fgMorphTree(tree->AsOp()->gtOp1);
fgMorphTreeDone(tree);
return tree;
}
return tree;
}
else if (tree->OperIsConst())
{
return tree;
}
/* gtFoldExpr could have used setOper to change the oper */
oper = tree->OperGet();
typ = tree->TypeGet();
/* gtFoldExpr could have changed op1 and op2 */
op1 = tree->AsOp()->gtOp1;
op2 = tree->gtGetOp2IfPresent();
// Do we have an integer compare operation?
//
if (tree->OperIsCompare() && varTypeIsIntegralOrI(tree->TypeGet()))
{
// Are we comparing against zero?
//
if (op2->IsIntegralConst(0))
{
// Request that the codegen for op1 sets the condition flags
// when it generates the code for op1.
//
// Codegen for op1 must set the condition flags if
// this method returns true.
//
op1->gtRequestSetFlags();
}
}
/*-------------------------------------------------------------------------
* Perform the required oper-specific postorder morphing
*/
GenTree* temp;
size_t ival1;
GenTree* lclVarTree;
GenTree* effectiveOp1;
FieldSeqNode* fieldSeq = nullptr;
switch (oper)
{
case GT_ASG:
if (op1->OperIs(GT_LCL_VAR) && ((op1->gtFlags & GTF_VAR_FOLDED_IND) != 0))
{
op1->gtFlags &= ~GTF_VAR_FOLDED_IND;
tree = fgDoNormalizeOnStore(tree);
op2 = tree->gtGetOp2();
}
lclVarTree = fgIsIndirOfAddrOfLocal(op1);
if (lclVarTree != nullptr)
{
lclVarTree->gtFlags |= GTF_VAR_DEF;
}
effectiveOp1 = op1->gtEffectiveVal();
// If we are storing a small type, we might be able to omit a cast.
if (effectiveOp1->OperIs(GT_IND, GT_CLS_VAR) && varTypeIsSmall(effectiveOp1))
{
if (!gtIsActiveCSE_Candidate(op2) && op2->OperIs(GT_CAST) &&
varTypeIsIntegral(op2->AsCast()->CastOp()) && !op2->gtOverflow())
{
var_types castType = op2->CastToType();
// If we are performing a narrowing cast and
// castType is larger or the same as op1's type
// then we can discard the cast.
if (varTypeIsSmall(castType) && (genTypeSize(castType) >= genTypeSize(effectiveOp1)))
{
tree->AsOp()->gtOp2 = op2 = op2->AsCast()->CastOp();
}
}
}
fgAssignSetVarDef(tree);
/* We can't CSE the LHS of an assignment */
/* We also must set in the pre-morphing phase, otherwise assertionProp doesn't see it */
if (op1->IsLocal() || (op1->TypeGet() != TYP_STRUCT))
{
op1->gtFlags |= GTF_DONT_CSE;
}
break;
case GT_CAST:
tree = fgOptimizeCast(tree->AsCast());
if (!tree->OperIsSimple())
{
return tree;
}
if (tree->OperIs(GT_CAST) && tree->gtOverflow())
{
fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_OVERFLOW);
}
typ = tree->TypeGet();
oper = tree->OperGet();
op1 = tree->AsOp()->gtGetOp1();
op2 = tree->gtGetOp2IfPresent();
break;
case GT_EQ:
case GT_NE:
// It is not safe to reorder/delete CSE's
if (!optValnumCSE_phase && op2->IsIntegralConst())
{
tree = fgOptimizeEqualityComparisonWithConst(tree->AsOp());
assert(tree->OperIsCompare());
oper = tree->OperGet();
op1 = tree->gtGetOp1();
op2 = tree->gtGetOp2();
}
goto COMPARE;
case GT_LT:
case GT_LE:
case GT_GE:
case GT_GT:
if (!optValnumCSE_phase && (op1->OperIs(GT_CAST) || op2->OperIs(GT_CAST)))
{
tree = fgOptimizeRelationalComparisonWithCasts(tree->AsOp());
oper = tree->OperGet();
op1 = tree->gtGetOp1();
op2 = tree->gtGetOp2();
}
// op2's value may be changed, so it cannot be a CSE candidate.
if (op2->IsIntegralConst() && !gtIsActiveCSE_Candidate(op2))
{
tree = fgOptimizeRelationalComparisonWithConst(tree->AsOp());
oper = tree->OperGet();
assert(op1 == tree->AsOp()->gtGetOp1());
assert(op2 == tree->AsOp()->gtGetOp2());
}
COMPARE:
noway_assert(tree->OperIsCompare());
break;
case GT_MUL:
#ifndef TARGET_64BIT
if (typ == TYP_LONG)
{
// This must be GTF_MUL_64RSLT
INDEBUG(tree->AsOp()->DebugCheckLongMul());
return tree;
}
#endif // TARGET_64BIT
goto CM_OVF_OP;
case GT_SUB:
if (tree->gtOverflow())
{
goto CM_OVF_OP;
}
// TODO #4104: there are a lot of other places where
// this condition is not checked before transformations.
if (fgGlobalMorph)
{
/* Check for "op1 - cns2" , we change it to "op1 + (-cns2)" */
noway_assert(op2);
if (op2->IsCnsIntOrI() && !op2->IsIconHandle())
{
// Negate the constant and change the node to be "+",
// except when `op2` is a const byref.
op2->AsIntConCommon()->SetIconValue(-op2->AsIntConCommon()->IconValue());
op2->AsIntConRef().gtFieldSeq = FieldSeqStore::NotAField();
oper = GT_ADD;
tree->ChangeOper(oper);
goto CM_ADD_OP;
}
/* Check for "cns1 - op2" , we change it to "(cns1 + (-op2))" */
noway_assert(op1);
if (op1->IsCnsIntOrI())
{
noway_assert(varTypeIsIntOrI(tree));
// The type of the new GT_NEG node cannot just be op2->TypeGet().
// Otherwise we may sign-extend incorrectly in cases where the GT_NEG
// node ends up feeding directly into a cast, for example in
// GT_CAST<ubyte>(GT_SUB(0, s_1.ubyte))
tree->AsOp()->gtOp2 = op2 = gtNewOperNode(GT_NEG, genActualType(op2->TypeGet()), op2);
fgMorphTreeDone(op2);
oper = GT_ADD;
tree->ChangeOper(oper);
goto CM_ADD_OP;
}
/* No match - exit */
}
// Skip optimization if non-NEG operand is constant.
// Both op1 and op2 are not constant because it was already checked above.
if (opts.OptimizationEnabled() && fgGlobalMorph)
{
// a - -b = > a + b
// SUB(a, (NEG(b)) => ADD(a, b)
if (!op1->OperIs(GT_NEG) && op2->OperIs(GT_NEG))
{
// tree: SUB
// op1: a
// op2: NEG
// op2Child: b
GenTree* op2Child = op2->AsOp()->gtOp1; // b
oper = GT_ADD;
tree->SetOper(oper, GenTree::PRESERVE_VN);
tree->AsOp()->gtOp2 = op2Child;
DEBUG_DESTROY_NODE(op2);
op2 = op2Child;
}
// -a - -b = > b - a
// SUB(NEG(a), (NEG(b)) => SUB(b, a)
else if (op1->OperIs(GT_NEG) && op2->OperIs(GT_NEG) && gtCanSwapOrder(op1, op2))
{
// tree: SUB
// op1: NEG
// op1Child: a
// op2: NEG
// op2Child: b
GenTree* op1Child = op1->AsOp()->gtOp1; // a
GenTree* op2Child = op2->AsOp()->gtOp1; // b
tree->AsOp()->gtOp1 = op2Child;
tree->AsOp()->gtOp2 = op1Child;
DEBUG_DESTROY_NODE(op1);
DEBUG_DESTROY_NODE(op2);
op1 = op2Child;
op2 = op1Child;
}
}
break;
#if defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)
case GT_DIV:
#ifdef TARGET_LOONGARCH64
case GT_MOD:
#endif
if (!varTypeIsFloating(tree->gtType))
{
// Codegen for this instruction needs to be able to throw two exceptions:
fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_OVERFLOW);
fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_DIV_BY_ZERO);
}
break;
case GT_UDIV:
#ifdef TARGET_LOONGARCH64
case GT_UMOD:
#endif
// Codegen for this instruction needs to be able to throw one exception:
fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_DIV_BY_ZERO);
break;
#endif // defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)
case GT_ADD:
CM_OVF_OP:
if (tree->gtOverflow())
{
tree->gtRequestSetFlags();
// Add the excptn-throwing basic block to jump to on overflow
fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_OVERFLOW);
// We can't do any commutative morphing for overflow instructions
break;
}
CM_ADD_OP:
FALLTHROUGH;
case GT_OR:
case GT_XOR:
case GT_AND:
tree = fgOptimizeCommutativeArithmetic(tree->AsOp());
if (!tree->OperIsSimple())
{
return tree;
}
typ = tree->TypeGet();
oper = tree->OperGet();
op1 = tree->gtGetOp1();
op2 = tree->gtGetOp2IfPresent();
break;
case GT_NOT:
case GT_NEG:
// Remove double negation/not.
// Note: this is not a safe tranformation if "tree" is a CSE candidate.
// Consider for example the following expression: NEG(NEG(OP)), where any
// NEG is a CSE candidate. Were we to morph this to just OP, CSE would fail to find
// the original NEG in the statement.
if (op1->OperIs(oper) && opts.OptimizationEnabled() && !gtIsActiveCSE_Candidate(tree) &&
!gtIsActiveCSE_Candidate(op1))
{
JITDUMP("Remove double negation/not\n")
GenTree* op1op1 = op1->gtGetOp1();
DEBUG_DESTROY_NODE(tree);
DEBUG_DESTROY_NODE(op1);
return op1op1;
}
// Distribute negation over simple multiplication/division expressions
if (opts.OptimizationEnabled() && !optValnumCSE_phase && tree->OperIs(GT_NEG) &&
op1->OperIs(GT_MUL, GT_DIV))
{
GenTreeOp* mulOrDiv = op1->AsOp();
GenTree* op1op1 = mulOrDiv->gtGetOp1();
GenTree* op1op2 = mulOrDiv->gtGetOp2();
if (!op1op1->IsCnsIntOrI() && op1op2->IsCnsIntOrI() && !op1op2->IsIconHandle())
{
// NEG(MUL(a, C)) => MUL(a, -C)
// NEG(DIV(a, C)) => DIV(a, -C), except when C = {-1, 1}
ssize_t constVal = op1op2->AsIntCon()->IconValue();
if ((mulOrDiv->OperIs(GT_DIV) && (constVal != -1) && (constVal != 1)) ||
(mulOrDiv->OperIs(GT_MUL) && !mulOrDiv->gtOverflow()))
{
GenTree* newOp1 = op1op1; // a
GenTree* newOp2 = gtNewIconNode(-constVal, op1op2->TypeGet()); // -C
mulOrDiv->gtOp1 = newOp1;
mulOrDiv->gtOp2 = newOp2;
mulOrDiv->SetVNsFromNode(tree);
DEBUG_DESTROY_NODE(tree);
DEBUG_DESTROY_NODE(op1op2);
return mulOrDiv;
}
}
}
/* Any constant cases should have been folded earlier */
noway_assert(!op1->OperIsConst() || !opts.OptEnabled(CLFLG_CONSTANTFOLD) || optValnumCSE_phase);
break;
case GT_CKFINITE:
noway_assert(varTypeIsFloating(op1->TypeGet()));
fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_ARITH_EXCPN);
break;
case GT_BOUNDS_CHECK:
fgSetRngChkTarget(tree);
break;
case GT_OBJ:
case GT_BLK:
case GT_IND:
{
// If we have IND(ADDR(X)) and X has GTF_GLOB_REF, we must set GTF_GLOB_REF on
// the OBJ. Note that the GTF_GLOB_REF will have been cleared on ADDR(X) where X
// is a local or CLS_VAR, even if it has been address-exposed.
if (op1->OperIs(GT_ADDR))
{
tree->gtFlags |= (op1->AsUnOp()->gtGetOp1()->gtFlags & GTF_GLOB_REF);
}
if (!tree->OperIs(GT_IND))
{
break;
}
// Can not remove a GT_IND if it is currently a CSE candidate.
if (gtIsActiveCSE_Candidate(tree))
{
break;
}
bool foldAndReturnTemp = false;
temp = nullptr;
ival1 = 0;
// Don't remove a volatile GT_IND, even if the address points to a local variable.
if ((tree->gtFlags & GTF_IND_VOLATILE) == 0)
{
/* Try to Fold *(&X) into X */
if (op1->gtOper == GT_ADDR)
{
// Can not remove a GT_ADDR if it is currently a CSE candidate.
if (gtIsActiveCSE_Candidate(op1))
{
break;
}
temp = op1->AsOp()->gtOp1; // X
// In the test below, if they're both TYP_STRUCT, this of course does *not* mean that
// they are the *same* struct type. In fact, they almost certainly aren't. If the
// address has an associated field sequence, that identifies this case; go through
// the "lcl_fld" path rather than this one.
FieldSeqNode* addrFieldSeq = nullptr; // This is an unused out parameter below.
if (typ == temp->TypeGet() && !GetZeroOffsetFieldMap()->Lookup(op1, &addrFieldSeq))
{
foldAndReturnTemp = true;
}
else if (temp->OperIsLocal())
{
unsigned lclNum = temp->AsLclVarCommon()->GetLclNum();
LclVarDsc* varDsc = lvaGetDesc(lclNum);
// We will try to optimize when we have a promoted struct promoted with a zero lvFldOffset
if (varDsc->lvPromoted && (varDsc->lvFldOffset == 0))
{
noway_assert(varTypeIsStruct(varDsc));
// We will try to optimize when we have a single field struct that is being struct promoted
if (varDsc->lvFieldCnt == 1)
{
unsigned lclNumFld = varDsc->lvFieldLclStart;
// just grab the promoted field
LclVarDsc* fieldVarDsc = lvaGetDesc(lclNumFld);
// Also make sure that the tree type matches the fieldVarType and that it's lvFldOffset
// is zero
if (fieldVarDsc->TypeGet() == typ && (fieldVarDsc->lvFldOffset == 0))
{
// We can just use the existing promoted field LclNum
temp->AsLclVarCommon()->SetLclNum(lclNumFld);
temp->gtType = fieldVarDsc->TypeGet();
foldAndReturnTemp = true;
}
}
}
// If the type of the IND (typ) is a "small int", and the type of the local has the
// same width, then we can reduce to just the local variable -- it will be
// correctly normalized.
//
// The below transformation cannot be applied if the local var needs to be normalized on load.
else if (varTypeIsSmall(typ) && (genTypeSize(varDsc) == genTypeSize(typ)) &&
!lvaTable[lclNum].lvNormalizeOnLoad())
{
const bool definitelyLoad = (tree->gtFlags & GTF_DONT_CSE) == 0;
const bool possiblyStore = !definitelyLoad;
if (possiblyStore || (varTypeIsUnsigned(varDsc) == varTypeIsUnsigned(typ)))
{
typ = temp->TypeGet();
tree->gtType = typ;
foldAndReturnTemp = true;
if (possiblyStore)
{
// This node can be on the left-hand-side of an assignment node.
// Mark this node with GTF_VAR_FOLDED_IND to make sure that fgDoNormalizeOnStore()
// is called on its parent in post-order morph.
temp->gtFlags |= GTF_VAR_FOLDED_IND;
}
}
}
// For matching types we can fold
else if (!varTypeIsStruct(typ) && (lvaTable[lclNum].lvType == typ) &&
!lvaTable[lclNum].lvNormalizeOnLoad())
{
tree->gtType = typ = temp->TypeGet();
foldAndReturnTemp = true;
}
else
{
// Assumes that when Lookup returns "false" it will leave "fieldSeq" unmodified (i.e.
// nullptr)
assert(fieldSeq == nullptr);
bool b = GetZeroOffsetFieldMap()->Lookup(op1, &fieldSeq);
assert(b || fieldSeq == nullptr);
if ((fieldSeq != nullptr) && (temp->OperGet() == GT_LCL_FLD))
{
// Append the field sequence, change the type.
temp->AsLclFld()->SetFieldSeq(
GetFieldSeqStore()->Append(temp->AsLclFld()->GetFieldSeq(), fieldSeq));
temp->gtType = typ;
foldAndReturnTemp = true;
}
}
// Otherwise will will fold this into a GT_LCL_FLD below
// where we check (temp != nullptr)
}
else // !temp->OperIsLocal()
{
// We don't try to fold away the GT_IND/GT_ADDR for this case
temp = nullptr;
}
}
else if (op1->OperGet() == GT_ADD)
{
#ifdef TARGET_ARM
// Check for a misalignment floating point indirection.
if (varTypeIsFloating(typ))
{
GenTree* addOp2 = op1->AsOp()->gtGetOp2();
if (addOp2->IsCnsIntOrI())
{
ssize_t offset = addOp2->AsIntCon()->gtIconVal;
if ((offset % emitTypeSize(TYP_FLOAT)) != 0)
{
tree->gtFlags |= GTF_IND_UNALIGNED;
}
}
}
#endif // TARGET_ARM
/* Try to change *(&lcl + cns) into lcl[cns] to prevent materialization of &lcl */
if (op1->AsOp()->gtOp1->OperGet() == GT_ADDR && op1->AsOp()->gtOp2->OperGet() == GT_CNS_INT &&
opts.OptimizationEnabled())
{
// No overflow arithmetic with pointers
noway_assert(!op1->gtOverflow());
temp = op1->AsOp()->gtOp1->AsOp()->gtOp1;
if (!temp->OperIsLocal())
{
temp = nullptr;
break;
}
// Can not remove the GT_ADDR if it is currently a CSE candidate.
if (gtIsActiveCSE_Candidate(op1->AsOp()->gtOp1))
{
break;
}
ival1 = op1->AsOp()->gtOp2->AsIntCon()->gtIconVal;
fieldSeq = op1->AsOp()->gtOp2->AsIntCon()->gtFieldSeq;
// Does the address have an associated zero-offset field sequence?
FieldSeqNode* addrFieldSeq = nullptr;
if (GetZeroOffsetFieldMap()->Lookup(op1->AsOp()->gtOp1, &addrFieldSeq))
{
fieldSeq = GetFieldSeqStore()->Append(addrFieldSeq, fieldSeq);
}
if (ival1 == 0 && typ == temp->TypeGet() && temp->TypeGet() != TYP_STRUCT)
{
noway_assert(!varTypeIsGC(temp->TypeGet()));
foldAndReturnTemp = true;
}
else
{
// The emitter can't handle large offsets
if (ival1 != (unsigned short)ival1)
{
break;
}
// The emitter can get confused by invalid offsets
if (ival1 >= Compiler::lvaLclSize(temp->AsLclVarCommon()->GetLclNum()))
{
break;
}
}
// Now we can fold this into a GT_LCL_FLD below
// where we check (temp != nullptr)
}
}
}
// At this point we may have a lclVar or lclFld that might be foldable with a bit of extra massaging:
// - We may have a load of a local where the load has a different type than the local
// - We may have a load of a local plus an offset
//
// In these cases, we will change the lclVar or lclFld into a lclFld of the appropriate type and
// offset if doing so is legal. The only cases in which this transformation is illegal are if the load
// begins before the local or if the load extends beyond the end of the local (i.e. if the load is
// out-of-bounds w.r.t. the local).
if ((temp != nullptr) && !foldAndReturnTemp)
{
assert(temp->OperIsLocal());
const unsigned lclNum = temp->AsLclVarCommon()->GetLclNum();
LclVarDsc* const varDsc = lvaGetDesc(lclNum);
const var_types tempTyp = temp->TypeGet();
const bool useExactSize = varTypeIsStruct(tempTyp) || (tempTyp == TYP_BLK) || (tempTyp == TYP_LCLBLK);
const unsigned varSize = useExactSize ? varDsc->lvExactSize : genTypeSize(temp);
// Make sure we do not enregister this lclVar.
lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::LocalField));
// If the size of the load is greater than the size of the lclVar, we cannot fold this access into
// a lclFld: the access represented by an lclFld node must begin at or after the start of the
// lclVar and must not extend beyond the end of the lclVar.
if ((ival1 >= 0) && ((ival1 + genTypeSize(typ)) <= varSize))
{
GenTreeLclFld* lclFld;
// We will turn a GT_LCL_VAR into a GT_LCL_FLD with an gtLclOffs of 'ival'
// or if we already have a GT_LCL_FLD we will adjust the gtLclOffs by adding 'ival'
// Then we change the type of the GT_LCL_FLD to match the orginal GT_IND type.
//
if (temp->OperGet() == GT_LCL_FLD)
{
lclFld = temp->AsLclFld();
lclFld->SetLclOffs(lclFld->GetLclOffs() + static_cast<unsigned>(ival1));
lclFld->SetFieldSeq(GetFieldSeqStore()->Append(lclFld->GetFieldSeq(), fieldSeq));
}
else // We have a GT_LCL_VAR.
{
assert(temp->OperGet() == GT_LCL_VAR);
temp->ChangeOper(GT_LCL_FLD); // Note that this makes the gtFieldSeq "NotAField".
lclFld = temp->AsLclFld();
lclFld->SetLclOffs(static_cast<unsigned>(ival1));
if (fieldSeq != nullptr)
{
// If it does represent a field, note that.
lclFld->SetFieldSeq(fieldSeq);
}
}
temp->gtType = tree->gtType;
foldAndReturnTemp = true;
}
}
if (foldAndReturnTemp)
{
assert(temp != nullptr);
assert(temp->TypeGet() == typ);
assert((op1->OperGet() == GT_ADD) || (op1->OperGet() == GT_ADDR));
// Copy the value of GTF_DONT_CSE from the original tree to `temp`: it can be set for
// 'temp' because a GT_ADDR always marks it for its operand.
temp->gtFlags &= ~GTF_DONT_CSE;
temp->gtFlags |= (tree->gtFlags & GTF_DONT_CSE);
if (op1->OperGet() == GT_ADD)
{
DEBUG_DESTROY_NODE(op1->AsOp()->gtOp1); // GT_ADDR
DEBUG_DESTROY_NODE(op1->AsOp()->gtOp2); // GT_CNS_INT
}
DEBUG_DESTROY_NODE(op1); // GT_ADD or GT_ADDR
DEBUG_DESTROY_NODE(tree); // GT_IND
// If the result of the fold is a local var, we may need to perform further adjustments e.g. for
// normalization.
if (temp->OperIs(GT_LCL_VAR))
{
#ifdef DEBUG
// We clear this flag on `temp` because `fgMorphLocalVar` may assert that this bit is clear
// and the node in question must have this bit set (as it has already been morphed).
temp->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED;
#endif // DEBUG
const bool forceRemorph = true;
temp = fgMorphLocalVar(temp, forceRemorph);
#ifdef DEBUG
// We then set this flag on `temp` because `fgMorhpLocalVar` may not set it itself, and the
// caller of `fgMorphSmpOp` may assert that this flag is set on `temp` once this function
// returns.
temp->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif // DEBUG
}
return temp;
}
// Only do this optimization when we are in the global optimizer. Doing this after value numbering
// could result in an invalid value number for the newly generated GT_IND node.
if ((op1->OperGet() == GT_COMMA) && fgGlobalMorph)
{
// Perform the transform IND(COMMA(x, ..., z)) == COMMA(x, ..., IND(z)).
// TBD: this transformation is currently necessary for correctness -- it might
// be good to analyze the failures that result if we don't do this, and fix them
// in other ways. Ideally, this should be optional.
GenTree* commaNode = op1;
GenTreeFlags treeFlags = tree->gtFlags;
commaNode->gtType = typ;
commaNode->gtFlags = (treeFlags & ~GTF_REVERSE_OPS); // Bashing the GT_COMMA flags here is
// dangerous, clear the GTF_REVERSE_OPS at
// least.
#ifdef DEBUG
commaNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
while (commaNode->AsOp()->gtOp2->gtOper == GT_COMMA)
{
commaNode = commaNode->AsOp()->gtOp2;
commaNode->gtType = typ;
commaNode->gtFlags =
(treeFlags & ~GTF_REVERSE_OPS & ~GTF_ASG & ~GTF_CALL); // Bashing the GT_COMMA flags here is
// dangerous, clear the GTF_REVERSE_OPS, GT_ASG, and GT_CALL at
// least.
commaNode->gtFlags |= ((commaNode->AsOp()->gtOp1->gtFlags | commaNode->AsOp()->gtOp2->gtFlags) &
(GTF_ASG | GTF_CALL));
#ifdef DEBUG
commaNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
}
tree = op1;
GenTree* addr = commaNode->AsOp()->gtOp2;
// TODO-1stClassStructs: we often create a struct IND without a handle, fix it.
op1 = gtNewIndir(typ, addr);
// This is very conservative
op1->gtFlags |= treeFlags & ~GTF_ALL_EFFECT & ~GTF_IND_NONFAULTING;
op1->gtFlags |= (addr->gtFlags & GTF_ALL_EFFECT);
#ifdef DEBUG
op1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
commaNode->AsOp()->gtOp2 = op1;
commaNode->gtFlags |= (op1->gtFlags & GTF_ALL_EFFECT);
return tree;
}
break;
}
case GT_ADDR:
// Can not remove op1 if it is currently a CSE candidate.
if (gtIsActiveCSE_Candidate(op1))
{
break;
}
if (op1->OperGet() == GT_IND)
{
// Can not remove a GT_ADDR if it is currently a CSE candidate.
if (gtIsActiveCSE_Candidate(tree))
{
break;
}
// Perform the transform ADDR(IND(...)) == (...).
GenTree* addr = op1->AsOp()->gtOp1;
// If tree has a zero field sequence annotation, update the annotation
// on addr node.
FieldSeqNode* zeroFieldSeq = nullptr;
if (GetZeroOffsetFieldMap()->Lookup(tree, &zeroFieldSeq))
{
fgAddFieldSeqForZeroOffset(addr, zeroFieldSeq);
}
noway_assert(varTypeIsGC(addr->gtType) || addr->gtType == TYP_I_IMPL);
DEBUG_DESTROY_NODE(op1);
DEBUG_DESTROY_NODE(tree);
return addr;
}
else if (op1->OperGet() == GT_OBJ)
{
// Can not remove a GT_ADDR if it is currently a CSE candidate.
if (gtIsActiveCSE_Candidate(tree))
{
break;
}
// Perform the transform ADDR(OBJ(...)) == (...).
GenTree* addr = op1->AsObj()->Addr();
noway_assert(varTypeIsGC(addr->gtType) || addr->gtType == TYP_I_IMPL);
DEBUG_DESTROY_NODE(op1);
DEBUG_DESTROY_NODE(tree);
return addr;
}
else if ((op1->gtOper == GT_COMMA) && !optValnumCSE_phase)
{
// Perform the transform ADDR(COMMA(x, ..., z)) == COMMA(x, ..., ADDR(z)).
// (Be sure to mark "z" as an l-value...)
ArrayStack<GenTree*> commas(getAllocator(CMK_ArrayStack));
for (GenTree* comma = op1; comma != nullptr && comma->gtOper == GT_COMMA; comma = comma->gtGetOp2())
{
commas.Push(comma);
}
GenTree* commaNode = commas.Top();
// The top-level addr might be annotated with a zeroOffset field.
FieldSeqNode* zeroFieldSeq = nullptr;
bool isZeroOffset = GetZeroOffsetFieldMap()->Lookup(tree, &zeroFieldSeq);
tree = op1;
commaNode->AsOp()->gtOp2->gtFlags |= GTF_DONT_CSE;
// If the node we're about to put under a GT_ADDR is an indirection, it
// doesn't need to be materialized, since we only want the addressing mode. Because
// of this, this GT_IND is not a faulting indirection and we don't have to extract it
// as a side effect.
GenTree* commaOp2 = commaNode->AsOp()->gtOp2;
if (commaOp2->OperIsBlk())
{
commaOp2->SetOper(GT_IND);
}
if (commaOp2->gtOper == GT_IND)
{
commaOp2->gtFlags |= GTF_IND_NONFAULTING;
commaOp2->gtFlags &= ~GTF_EXCEPT;
commaOp2->gtFlags |= (commaOp2->AsOp()->gtOp1->gtFlags & GTF_EXCEPT);
}
op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, commaOp2);
if (isZeroOffset)
{
// Transfer the annotation to the new GT_ADDR node.
fgAddFieldSeqForZeroOffset(op1, zeroFieldSeq);
}
commaNode->AsOp()->gtOp2 = op1;
// Originally, I gave all the comma nodes type "byref". But the ADDR(IND(x)) == x transform
// might give op1 a type different from byref (like, say, native int). So now go back and give
// all the comma nodes the type of op1.
while (!commas.Empty())
{
GenTree* comma = commas.Pop();
comma->gtType = op1->gtType;
#ifdef DEBUG
comma->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
gtUpdateNodeSideEffects(comma);
}
return tree;
}
break;
case GT_COLON:
if (fgGlobalMorph)
{
/* Mark the nodes that are conditionally executed */
fgWalkTreePre(&tree, gtMarkColonCond);
}
/* Since we're doing this postorder we clear this if it got set by a child */
fgRemoveRestOfBlock = false;
break;
case GT_COMMA:
/* Special case: trees that don't produce a value */
if (op2->OperIs(GT_ASG) || (op2->OperGet() == GT_COMMA && op2->TypeGet() == TYP_VOID) || fgIsThrow(op2))
{
typ = tree->gtType = TYP_VOID;
}
// If we are in the Valuenum CSE phase then don't morph away anything as these
// nodes may have CSE defs/uses in them.
//
if (!optValnumCSE_phase)
{
// Extract the side effects from the left side of the comma. Since they don't "go" anywhere, this
// is all we need.
GenTree* op1SideEffects = nullptr;
// The addition of "GTF_MAKE_CSE" below prevents us from throwing away (for example)
// hoisted expressions in loops.
gtExtractSideEffList(op1, &op1SideEffects, (GTF_SIDE_EFFECT | GTF_MAKE_CSE));
if (op1SideEffects)
{
// Replace the left hand side with the side effect list.
op1 = op1SideEffects;
tree->AsOp()->gtOp1 = op1SideEffects;
gtUpdateNodeSideEffects(tree);
}
else
{
op2->gtFlags |= (tree->gtFlags & (GTF_DONT_CSE | GTF_LATE_ARG));
DEBUG_DESTROY_NODE(tree);
DEBUG_DESTROY_NODE(op1);
return op2;
}
// If the right operand is just a void nop node, throw it away. Unless this is a
// comma throw, in which case we want the top-level morphing loop to recognize it.
if (op2->IsNothingNode() && op1->TypeIs(TYP_VOID) && !fgIsCommaThrow(tree))
{
op1->gtFlags |= (tree->gtFlags & (GTF_DONT_CSE | GTF_LATE_ARG));
DEBUG_DESTROY_NODE(tree);
DEBUG_DESTROY_NODE(op2);
return op1;
}
}
break;
case GT_JTRUE:
/* Special case if fgRemoveRestOfBlock is set to true */
if (fgRemoveRestOfBlock)
{
if (fgIsCommaThrow(op1, true))
{
GenTree* throwNode = op1->AsOp()->gtOp1;
JITDUMP("Removing [%06d] GT_JTRUE as the block now unconditionally throws an exception.\n",
dspTreeID(tree));
DEBUG_DESTROY_NODE(tree);
return throwNode;
}
noway_assert(op1->OperIsCompare());
noway_assert(op1->gtFlags & GTF_EXCEPT);
// We need to keep op1 for the side-effects. Hang it off
// a GT_COMMA node
JITDUMP("Keeping side-effects by bashing [%06d] GT_JTRUE into a GT_COMMA.\n", dspTreeID(tree));
tree->ChangeOper(GT_COMMA);
tree->AsOp()->gtOp2 = op2 = gtNewNothingNode();
// Additionally since we're eliminating the JTRUE
// codegen won't like it if op1 is a RELOP of longs, floats or doubles.
// So we change it into a GT_COMMA as well.
JITDUMP("Also bashing [%06d] (a relop) into a GT_COMMA.\n", dspTreeID(op1));
op1->ChangeOper(GT_COMMA);
op1->gtFlags &= ~GTF_UNSIGNED; // Clear the unsigned flag if it was set on the relop
op1->gtType = op1->AsOp()->gtOp1->gtType;
return tree;
}
break;
case GT_INTRINSIC:
if (tree->AsIntrinsic()->gtIntrinsicName ==
NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant)
{
// Should be expanded by the time it reaches CSE phase
assert(!optValnumCSE_phase);
JITDUMP("\nExpanding RuntimeHelpers.IsKnownConstant to ");
if (op1->OperIsConst())
{
// We're lucky to catch a constant here while importer was not
JITDUMP("true\n");
DEBUG_DESTROY_NODE(tree, op1);
tree = gtNewIconNode(1);
}
else
{
GenTree* op1SideEffects = nullptr;
gtExtractSideEffList(op1, &op1SideEffects, GTF_ALL_EFFECT);
if (op1SideEffects != nullptr)
{
DEBUG_DESTROY_NODE(tree);
// Keep side-effects of op1
tree = gtNewOperNode(GT_COMMA, TYP_INT, op1SideEffects, gtNewIconNode(0));
JITDUMP("false with side effects:\n")
DISPTREE(tree);
}
else
{
JITDUMP("false\n");
DEBUG_DESTROY_NODE(tree, op1);
tree = gtNewIconNode(0);
}
}
INDEBUG(tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
return tree;
}
break;
default:
break;
}
assert(oper == tree->gtOper);
// Propagate comma throws.
// If we are in the Valuenum CSE phase then don't morph away anything as these
// nodes may have CSE defs/uses in them.
if (fgGlobalMorph && (oper != GT_ASG) && (oper != GT_COLON))
{
if ((op1 != nullptr) && fgIsCommaThrow(op1, true))
{
GenTree* propagatedThrow = fgPropagateCommaThrow(tree, op1->AsOp(), GTF_EMPTY);
if (propagatedThrow != nullptr)
{
return propagatedThrow;
}
}
if ((op2 != nullptr) && fgIsCommaThrow(op2, true))
{
GenTree* propagatedThrow = fgPropagateCommaThrow(tree, op2->AsOp(), op1->gtFlags & GTF_ALL_EFFECT);
if (propagatedThrow != nullptr)
{
return propagatedThrow;
}
}
}
/*-------------------------------------------------------------------------
* Optional morphing is done if tree transformations is permitted
*/
if ((opts.compFlags & CLFLG_TREETRANS) == 0)
{
return tree;
}
tree = fgMorphSmpOpOptional(tree->AsOp());
return tree;
}
//------------------------------------------------------------------------
// fgOptimizeCast: Optimizes the supplied GT_CAST tree.
//
// Tries to get rid of the cast, its operand, the GTF_OVERFLOW flag, calls
// calls "optNarrowTree". Called in post-order by "fgMorphSmpOp".
//
// Arguments:
// tree - the cast tree to optimize
//
// Return Value:
// The optimized tree (that can have any shape).
//
GenTree* Compiler::fgOptimizeCast(GenTreeCast* cast)
{
GenTree* src = cast->CastOp();
if (gtIsActiveCSE_Candidate(cast) || gtIsActiveCSE_Candidate(src))
{
return cast;
}
// See if we can discard the cast.
if (varTypeIsIntegral(cast) && varTypeIsIntegral(src))
{
IntegralRange srcRange = IntegralRange::ForNode(src, this);
IntegralRange noOvfRange = IntegralRange::ForCastInput(cast);
if (noOvfRange.Contains(srcRange))
{
// Casting between same-sized types is a no-op,
// given we have proven this cast cannot overflow.
if (genActualType(cast) == genActualType(src))
{
return src;
}
cast->ClearOverflow();
cast->SetAllEffectsFlags(src);
// Try and see if we can make this cast into a cheaper zero-extending version.
if (genActualTypeIsInt(src) && cast->TypeIs(TYP_LONG) && srcRange.IsPositive())
{
cast->SetUnsigned();
}
}
// For checked casts, we're done.
if (cast->gtOverflow())
{
return cast;
}
var_types castToType = cast->CastToType();
// For indir-like nodes, we may be able to change their type to satisfy (and discard) the cast.
if (varTypeIsSmall(castToType) && (genTypeSize(castToType) == genTypeSize(src)) &&
src->OperIs(GT_IND, GT_CLS_VAR, GT_LCL_FLD))
{
// We're changing the type here so we need to update the VN;
// in other cases we discard the cast without modifying src
// so the VN doesn't change.
src->ChangeType(castToType);
src->SetVNsFromNode(cast);
return src;
}
// Try to narrow the operand of the cast and discard the cast.
if (opts.OptEnabled(CLFLG_TREETRANS) && (genTypeSize(src) > genTypeSize(castToType)) &&
optNarrowTree(src, src->TypeGet(), castToType, cast->gtVNPair, false))
{
optNarrowTree(src, src->TypeGet(), castToType, cast->gtVNPair, true);
// "optNarrowTree" may leave a dead cast behind.
if (src->OperIs(GT_CAST) && (src->AsCast()->CastToType() == genActualType(src->AsCast()->CastOp())))
{
src = src->AsCast()->CastOp();
}
return src;
}
// Check for two consecutive casts, we may be able to discard the intermediate one.
if (opts.OptimizationEnabled() && src->OperIs(GT_CAST) && !src->gtOverflow())
{
var_types dstCastToType = castToType;
var_types srcCastToType = src->AsCast()->CastToType();
// CAST(ubyte <- CAST(short <- X)): CAST(ubyte <- X).
// CAST(ushort <- CAST(short <- X)): CAST(ushort <- X).
if (varTypeIsSmall(srcCastToType) && (genTypeSize(dstCastToType) <= genTypeSize(srcCastToType)))
{
cast->CastOp() = src->AsCast()->CastOp();
DEBUG_DESTROY_NODE(src);
}
}
}
return cast;
}
//------------------------------------------------------------------------
// fgOptimizeEqualityComparisonWithConst: optimizes various EQ/NE(OP, CONST) patterns.
//
// Arguments:
// cmp - The GT_NE/GT_EQ tree the second operand of which is an integral constant
//
// Return Value:
// The optimized tree, "cmp" in case no optimizations were done.
// Currently only returns relop trees.
//
GenTree* Compiler::fgOptimizeEqualityComparisonWithConst(GenTreeOp* cmp)
{
assert(cmp->OperIs(GT_EQ, GT_NE));
assert(cmp->gtGetOp2()->IsIntegralConst());
assert(!optValnumCSE_phase);
GenTree* op1 = cmp->gtGetOp1();
GenTreeIntConCommon* op2 = cmp->gtGetOp2()->AsIntConCommon();
// Check for "(expr +/- icon1) ==/!= (non-zero-icon2)".
if (op2->IsCnsIntOrI() && (op2->IconValue() != 0))
{
// Since this can occur repeatedly we use a while loop.
while (op1->OperIs(GT_ADD, GT_SUB) && op1->AsOp()->gtGetOp2()->IsCnsIntOrI() && op1->TypeIs(TYP_INT) &&
!op1->gtOverflow())
{
// Got it; change "x + icon1 == icon2" to "x == icon2 - icon1".
ssize_t op1Value = op1->AsOp()->gtGetOp2()->AsIntCon()->IconValue();
ssize_t op2Value = op2->IconValue();
if (op1->OperIs(GT_ADD))
{
op2Value -= op1Value;
}
else
{
op2Value += op1Value;
}
op1 = op1->AsOp()->gtGetOp1();
op2->SetIconValue(static_cast<int32_t>(op2Value));
}
cmp->gtOp1 = op1;
fgUpdateConstTreeValueNumber(op2);
}
// Here we look for the following tree
//
// EQ/NE
// / \.
// op1 CNS 0/1
//
if (op2->IsIntegralConst(0) || op2->IsIntegralConst(1))
{
ssize_t op2Value = static_cast<ssize_t>(op2->IntegralValue());
if (op1->OperIsCompare())
{
// Here we look for the following tree
//
// EQ/NE -> RELOP/!RELOP
// / \ / \.
// RELOP CNS 0/1
// / \.
//
// Note that we will remove/destroy the EQ/NE node and move
// the RELOP up into it's location.
// Here we reverse the RELOP if necessary.
bool reverse = ((op2Value == 0) == (cmp->OperIs(GT_EQ)));
if (reverse)
{
gtReverseCond(op1);
}
noway_assert((op1->gtFlags & GTF_RELOP_JMP_USED) == 0);
op1->gtFlags |= cmp->gtFlags & (GTF_RELOP_JMP_USED | GTF_DONT_CSE);
op1->SetVNsFromNode(cmp);
DEBUG_DESTROY_NODE(cmp);
return op1;
}
//
// Now we check for a compare with the result of an '&' operator
//
// Here we look for the following transformation:
//
// EQ/NE EQ/NE
// / \ / \.
// AND CNS 0/1 -> AND CNS 0
// / \ / \.
// RSZ/RSH CNS 1 x CNS (1 << y)
// / \.
// x CNS_INT +y
if (fgGlobalMorph && op1->OperIs(GT_AND) && op1->AsOp()->gtGetOp1()->OperIs(GT_RSZ, GT_RSH))
{
GenTreeOp* andOp = op1->AsOp();
GenTreeOp* rshiftOp = andOp->gtGetOp1()->AsOp();
if (!rshiftOp->gtGetOp2()->IsCnsIntOrI())
{
goto SKIP;
}
ssize_t shiftAmount = rshiftOp->gtGetOp2()->AsIntCon()->IconValue();
if (shiftAmount < 0)
{
goto SKIP;
}
if (!andOp->gtGetOp2()->IsIntegralConst(1))
{
goto SKIP;
}
GenTreeIntConCommon* andMask = andOp->gtGetOp2()->AsIntConCommon();
if (andOp->TypeIs(TYP_INT))
{
if (shiftAmount > 31)
{
goto SKIP;
}
andMask->SetIconValue(static_cast<int32_t>(1 << shiftAmount));
// Reverse the condition if necessary.
if (op2Value == 1)
{
gtReverseCond(cmp);
op2->SetIconValue(0);
}
}
else if (andOp->TypeIs(TYP_LONG))
{
if (shiftAmount > 63)
{
goto SKIP;
}
andMask->SetLngValue(1ll << shiftAmount);
// Reverse the cond if necessary
if (op2Value == 1)
{
gtReverseCond(cmp);
op2->SetLngValue(0);
}
}
andOp->gtOp1 = rshiftOp->gtGetOp1();
DEBUG_DESTROY_NODE(rshiftOp->gtGetOp2());
DEBUG_DESTROY_NODE(rshiftOp);
}
}
SKIP:
// Now check for compares with small constant longs that can be cast to int.
// Note that we filter out negative values here so that the transformations
// below are correct. E. g. "EQ(-1L, CAST_UN(int))" is always "false", but were
// we to make it into "EQ(-1, int)", "true" becomes possible for negative inputs.
if (!op2->TypeIs(TYP_LONG) || ((op2->LngValue() >> 31) != 0))
{
return cmp;
}
if (!op1->OperIs(GT_AND))
{
// Another interesting case: cast from int.
if (op1->OperIs(GT_CAST) && op1->AsCast()->CastOp()->TypeIs(TYP_INT) && !op1->gtOverflow())
{
// Simply make this into an integer comparison.
cmp->gtOp1 = op1->AsCast()->CastOp();
op2->BashToConst(static_cast<int32_t>(op2->LngValue()));
fgUpdateConstTreeValueNumber(op2);
}
return cmp;
}
// Now we perform the following optimization:
// EQ/NE(AND(OP long, CNS_LNG), CNS_LNG) =>
// EQ/NE(AND(CAST(int <- OP), CNS_INT), CNS_INT)
// when the constants are sufficiently small.
// This transform cannot preserve VNs.
if (fgGlobalMorph)
{
assert(op1->TypeIs(TYP_LONG) && op1->OperIs(GT_AND));
// Is the result of the mask effectively an INT?
GenTreeOp* andOp = op1->AsOp();
if (!andOp->gtGetOp2()->OperIs(GT_CNS_NATIVELONG))
{
return cmp;
}
GenTreeIntConCommon* andMask = andOp->gtGetOp2()->AsIntConCommon();
if ((andMask->LngValue() >> 32) != 0)
{
return cmp;
}
// Now we narrow the first operand of AND to int.
if (optNarrowTree(andOp->gtGetOp1(), TYP_LONG, TYP_INT, ValueNumPair(), false))
{
optNarrowTree(andOp->gtGetOp1(), TYP_LONG, TYP_INT, ValueNumPair(), true);
}
else
{
andOp->gtOp1 = gtNewCastNode(TYP_INT, andOp->gtGetOp1(), false, TYP_INT);
}
assert(andMask == andOp->gtGetOp2());
// Now replace the mask node.
andMask->BashToConst(static_cast<int32_t>(andMask->LngValue()));
// Now change the type of the AND node.
andOp->ChangeType(TYP_INT);
// Finally we replace the comparand.
op2->BashToConst(static_cast<int32_t>(op2->LngValue()));
}
return cmp;
}
//------------------------------------------------------------------------
// fgOptimizeRelationalComparisonWithConst: optimizes a comparison operation.
//
// Recognizes comparisons against various constant operands and morphs
// them, if possible, into comparisons against zero.
//
// Arguments:
// cmp - the GT_LE/GT_LT/GT_GE/GT_GT tree to morph.
//
// Return Value:
// The "cmp" tree, possibly with a modified oper.
// The second operand's constant value may be modified as well.
//
// Assumptions:
// The operands have been swapped so that any constants are on the right.
// The second operand is an integral constant.
//
GenTree* Compiler::fgOptimizeRelationalComparisonWithConst(GenTreeOp* cmp)
{
assert(cmp->OperIs(GT_LE, GT_LT, GT_GE, GT_GT));
assert(cmp->gtGetOp2()->IsIntegralConst());
assert(!gtIsActiveCSE_Candidate(cmp->gtGetOp2()));
GenTree* op1 = cmp->gtGetOp1();
GenTreeIntConCommon* op2 = cmp->gtGetOp2()->AsIntConCommon();
assert(genActualType(op1) == genActualType(op2));
genTreeOps oper = cmp->OperGet();
int64_t op2Value = op2->IntegralValue();
if (op2Value == 1)
{
// Check for "expr >= 1".
if (oper == GT_GE)
{
// Change to "expr != 0" for unsigned and "expr > 0" for signed.
oper = cmp->IsUnsigned() ? GT_NE : GT_GT;
}
// Check for "expr < 1".
else if (oper == GT_LT)
{
// Change to "expr == 0" for unsigned and "expr <= 0".
oper = cmp->IsUnsigned() ? GT_EQ : GT_LE;
}
}
// Check for "expr relop -1".
else if (!cmp->IsUnsigned() && (op2Value == -1))
{
// Check for "expr <= -1".
if (oper == GT_LE)
{
// Change to "expr < 0".
oper = GT_LT;
}
// Check for "expr > -1".
else if (oper == GT_GT)
{
// Change to "expr >= 0".
oper = GT_GE;
}
}
else if (cmp->IsUnsigned())
{
if ((oper == GT_LE) || (oper == GT_GT))
{
if (op2Value == 0)
{
// IL doesn't have a cne instruction so compilers use cgt.un instead. The JIT
// recognizes certain patterns that involve GT_NE (e.g (x & 4) != 0) and fails
// if GT_GT is used instead. Transform (x GT_GT.unsigned 0) into (x GT_NE 0)
// and (x GT_LE.unsigned 0) into (x GT_EQ 0). The later case is rare, it sometimes
// occurs as a result of branch inversion.
oper = (oper == GT_LE) ? GT_EQ : GT_NE;
cmp->gtFlags &= ~GTF_UNSIGNED;
}
// LE_UN/GT_UN(expr, int/long.MaxValue) => GE/LT(expr, 0).
else if (((op1->TypeIs(TYP_LONG) && (op2Value == INT64_MAX))) ||
((genActualType(op1) == TYP_INT) && (op2Value == INT32_MAX)))
{
oper = (oper == GT_LE) ? GT_GE : GT_LT;
cmp->gtFlags &= ~GTF_UNSIGNED;
}
}
}
if (!cmp->OperIs(oper))
{
// Keep the old ValueNumber for 'tree' as the new expr
// will still compute the same value as before.
cmp->SetOper(oper, GenTree::PRESERVE_VN);
op2->SetIntegralValue(0);
fgUpdateConstTreeValueNumber(op2);
}
return cmp;
}
#ifdef FEATURE_HW_INTRINSICS
//------------------------------------------------------------------------
// fgOptimizeHWIntrinsic: optimize a HW intrinsic node
//
// Arguments:
// node - HWIntrinsic node to examine
//
// Returns:
// The original node if no optimization happened or if tree bashing occured.
// An alternative tree if an optimization happened.
//
// Notes:
// Checks for HWIntrinsic nodes: Vector64.Create/Vector128.Create/Vector256.Create,
// and if the call is one of these, attempt to optimize.
// This is post-order, meaning that it will not morph the children.
//
GenTree* Compiler::fgOptimizeHWIntrinsic(GenTreeHWIntrinsic* node)
{
assert(!optValnumCSE_phase);
if (opts.OptimizationDisabled())
{
return node;
}
switch (node->GetHWIntrinsicId())
{
case NI_Vector128_Create:
#if defined(TARGET_XARCH)
case NI_Vector256_Create:
#elif defined(TARGET_ARM64)
case NI_Vector64_Create:
#endif
{
bool hwAllArgsAreConstZero = true;
for (GenTree* arg : node->Operands())
{
if (!arg->IsIntegralConst(0) && !arg->IsFloatPositiveZero())
{
hwAllArgsAreConstZero = false;
break;
}
}
if (hwAllArgsAreConstZero)
{
switch (node->GetHWIntrinsicId())
{
case NI_Vector128_Create:
{
node->ResetHWIntrinsicId(NI_Vector128_get_Zero);
break;
}
#if defined(TARGET_XARCH)
case NI_Vector256_Create:
{
node->ResetHWIntrinsicId(NI_Vector256_get_Zero);
break;
}
#elif defined(TARGET_ARM64)
case NI_Vector64_Create:
{
node->ResetHWIntrinsicId(NI_Vector64_get_Zero);
break;
}
#endif
default:
unreached();
}
}
break;
}
default:
break;
}
return node;
}
#endif
//------------------------------------------------------------------------
// fgOptimizeCommutativeArithmetic: Optimizes commutative operations.
//
// Arguments:
// tree - the unchecked GT_ADD/GT_MUL/GT_OR/GT_XOR/GT_AND tree to optimize.
//
// Return Value:
// The optimized tree that can have any shape.
//
GenTree* Compiler::fgOptimizeCommutativeArithmetic(GenTreeOp* tree)
{
assert(tree->OperIs(GT_ADD, GT_MUL, GT_OR, GT_XOR, GT_AND));
assert(!tree->gtOverflowEx());
// Commute constants to the right.
if (tree->gtGetOp1()->OperIsConst() && !tree->gtGetOp1()->TypeIs(TYP_REF))
{
// TODO-Review: We used to assert here that "(!op2->OperIsConst() || !opts.OptEnabled(CLFLG_CONSTANTFOLD))".
// This may indicate a missed "remorph". Task is to re-enable this assertion and investigate.
std::swap(tree->gtOp1, tree->gtOp2);
}
if (fgOperIsBitwiseRotationRoot(tree->OperGet()))
{
GenTree* rotationTree = fgRecognizeAndMorphBitwiseRotation(tree);
if (rotationTree != nullptr)
{
return rotationTree;
}
}
if (fgGlobalMorph && tree->OperIs(GT_AND, GT_OR, GT_XOR))
{
GenTree* castTree = fgMorphCastedBitwiseOp(tree->AsOp());
if (castTree != nullptr)
{
return castTree;
}
}
if (varTypeIsIntegralOrI(tree))
{
genTreeOps oldTreeOper = tree->OperGet();
GenTreeOp* optimizedTree = fgMorphCommutative(tree->AsOp());
if (optimizedTree != nullptr)
{
if (!optimizedTree->OperIs(oldTreeOper))
{
// "optimizedTree" could end up being a COMMA.
return optimizedTree;
}
tree = optimizedTree;
}
}
if (!optValnumCSE_phase)
{
GenTree* optimizedTree = nullptr;
if (tree->OperIs(GT_ADD))
{
optimizedTree = fgOptimizeAddition(tree);
}
else if (tree->OperIs(GT_MUL))
{
optimizedTree = fgOptimizeMultiply(tree);
}
else if (tree->OperIs(GT_AND))
{
optimizedTree = fgOptimizeBitwiseAnd(tree);
}
else if (tree->OperIs(GT_XOR))
{
optimizedTree = fgOptimizeBitwiseXor(tree);
}
if (optimizedTree != nullptr)
{
return optimizedTree;
}
}
return tree;
}
//------------------------------------------------------------------------
// fgOptimizeAddition: optimizes addition.
//
// Arguments:
// add - the unchecked GT_ADD tree to optimize.
//
// Return Value:
// The optimized tree, that can have any shape, in case any transformations
// were performed. Otherwise, "nullptr", guaranteeing no state change.
//
GenTree* Compiler::fgOptimizeAddition(GenTreeOp* add)
{
assert(add->OperIs(GT_ADD) && !add->gtOverflow());
assert(!optValnumCSE_phase);
GenTree* op1 = add->gtGetOp1();
GenTree* op2 = add->gtGetOp2();
// Fold "((x + icon1) + (y + icon2))" to ((x + y) + (icon1 + icon2))".
// Be careful not to create a byref pointer that may point outside of the ref object.
// Only do this in global morph as we don't recompute the VN for "(x + y)", the new "op2".
if (op1->OperIs(GT_ADD) && op2->OperIs(GT_ADD) && !op1->gtOverflow() && !op2->gtOverflow() &&
op1->AsOp()->gtGetOp2()->IsCnsIntOrI() && op2->AsOp()->gtGetOp2()->IsCnsIntOrI() &&
!varTypeIsGC(op1->AsOp()->gtGetOp1()) && !varTypeIsGC(op2->AsOp()->gtGetOp1()) && fgGlobalMorph)
{
GenTreeOp* addOne = op1->AsOp();
GenTreeOp* addTwo = op2->AsOp();
GenTreeIntCon* constOne = addOne->gtGetOp2()->AsIntCon();
GenTreeIntCon* constTwo = addTwo->gtGetOp2()->AsIntCon();
addOne->gtOp2 = addTwo->gtGetOp1();
addOne->SetAllEffectsFlags(addOne->gtGetOp1(), addOne->gtGetOp2());
DEBUG_DESTROY_NODE(addTwo);
constOne->SetValueTruncating(constOne->IconValue() + constTwo->IconValue());
op2 = constOne;
add->gtOp2 = constOne;
DEBUG_DESTROY_NODE(constTwo);
}
// Fold (x + 0) - given it won't change the tree type to TYP_REF.
// TODO-Bug: this code will lose the GC-ness of a tree like "native int + byref(0)".
if (op2->IsIntegralConst(0) && ((add->TypeGet() == op1->TypeGet()) || !op1->TypeIs(TYP_REF)))
{
if (op2->IsCnsIntOrI() && varTypeIsI(op1))
{
fgAddFieldSeqForZeroOffset(op1, op2->AsIntCon()->gtFieldSeq);
}
DEBUG_DESTROY_NODE(op2);
DEBUG_DESTROY_NODE(add);
return op1;
}
// Note that these transformations are legal for floating-point ADDs as well.
if (opts.OptimizationEnabled())
{
// - a + b = > b - a
// ADD((NEG(a), b) => SUB(b, a)
// Do not do this if "op2" is constant for canonicalization purposes.
if (op1->OperIs(GT_NEG) && !op2->OperIs(GT_NEG) && !op2->IsIntegralConst() && gtCanSwapOrder(op1, op2))
{
add->SetOper(GT_SUB);
add->gtOp1 = op2;
add->gtOp2 = op1->AsOp()->gtGetOp1();
DEBUG_DESTROY_NODE(op1);
return add;
}
// a + -b = > a - b
// ADD(a, (NEG(b)) => SUB(a, b)
if (!op1->OperIs(GT_NEG) && op2->OperIs(GT_NEG))
{
add->SetOper(GT_SUB);
add->gtOp2 = op2->AsOp()->gtGetOp1();
DEBUG_DESTROY_NODE(op2);
return add;
}
}
return nullptr;
}
//------------------------------------------------------------------------
// fgOptimizeMultiply: optimizes multiplication.
//
// Arguments:
// mul - the unchecked TYP_I_IMPL/TYP_INT GT_MUL tree to optimize.
//
// Return Value:
// The optimized tree, that can have any shape, in case any transformations
// were performed. Otherwise, "nullptr", guaranteeing no state change.
//
GenTree* Compiler::fgOptimizeMultiply(GenTreeOp* mul)
{
assert(mul->OperIs(GT_MUL));
assert(varTypeIsIntOrI(mul) || varTypeIsFloating(mul));
assert(!mul->gtOverflow());
assert(!optValnumCSE_phase);
GenTree* op1 = mul->gtGetOp1();
GenTree* op2 = mul->gtGetOp2();
assert(mul->TypeGet() == genActualType(op1));
assert(mul->TypeGet() == genActualType(op2));
if (opts.OptimizationEnabled() && op2->IsCnsFltOrDbl())
{
double multiplierValue = op2->AsDblCon()->gtDconVal;
if (multiplierValue == 1.0)
{
// Fold "x * 1.0" to "x".
DEBUG_DESTROY_NODE(op2);
DEBUG_DESTROY_NODE(mul);
return op1;
}
// Fold "x * 2.0" to "x + x".
// If op1 is not a local we will have to introduce a temporary via GT_COMMA.
// Unfortunately, it's not optHoistLoopCode-friendly (yet), so we'll only do
// this for locals / after hoisting has run (when rationalization remorphs
// math INTRINSICSs into calls...).
if ((multiplierValue == 2.0) && (op1->IsLocal() || (fgOrder == FGOrderLinear)))
{
op2 = fgMakeMultiUse(&op1);
GenTree* add = gtNewOperNode(GT_ADD, mul->TypeGet(), op1, op2);
INDEBUG(add->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
return add;
}
}
if (op2->IsIntegralConst())
{
ssize_t mult = op2->AsIntConCommon()->IconValue();
if (mult == 0)
{
// We may be able to throw away op1 (unless it has side-effects)
if ((op1->gtFlags & GTF_SIDE_EFFECT) == 0)
{
DEBUG_DESTROY_NODE(op1);
DEBUG_DESTROY_NODE(mul);
return op2; // Just return the "0" node
}
// We need to keep op1 for the side-effects. Hang it off a GT_COMMA node.
mul->ChangeOper(GT_COMMA, GenTree::PRESERVE_VN);
return mul;
}
#ifdef TARGET_XARCH
// Should we try to replace integer multiplication with lea/add/shift sequences?
bool mulShiftOpt = compCodeOpt() != SMALL_CODE;
#else // !TARGET_XARCH
bool mulShiftOpt = false;
#endif // !TARGET_XARCH
size_t abs_mult = (mult >= 0) ? mult : -mult;
size_t lowestBit = genFindLowestBit(abs_mult);
bool changeToShift = false;
// is it a power of two? (positive or negative)
if (abs_mult == lowestBit)
{
// if negative negate (min-int does not need negation)
if (mult < 0 && mult != SSIZE_T_MIN)
{
op1 = gtNewOperNode(GT_NEG, genActualType(op1), op1);
mul->gtOp1 = op1;
fgMorphTreeDone(op1);
}
if (abs_mult == 1)
{
DEBUG_DESTROY_NODE(op2);
DEBUG_DESTROY_NODE(mul);
return op1;
}
// Change the multiplication into a shift by log2(val) bits.
op2->AsIntConCommon()->SetIconValue(genLog2(abs_mult));
changeToShift = true;
}
else if (mulShiftOpt && (lowestBit > 1) && jitIsScaleIndexMul(lowestBit))
{
int shift = genLog2(lowestBit);
ssize_t factor = abs_mult >> shift;
if (factor == 3 || factor == 5 || factor == 9)
{
// if negative negate (min-int does not need negation)
if (mult < 0 && mult != SSIZE_T_MIN)
{
op1 = gtNewOperNode(GT_NEG, genActualType(op1), op1);
mul->gtOp1 = op1;
fgMorphTreeDone(op1);
}
// change the multiplication into a smaller multiplication (by 3, 5 or 9) and a shift
op1 = gtNewOperNode(GT_MUL, mul->TypeGet(), op1, gtNewIconNode(factor, mul->TypeGet()));
mul->gtOp1 = op1;
fgMorphTreeDone(op1);
op2->AsIntConCommon()->SetIconValue(shift);
changeToShift = true;
}
}
if (changeToShift)
{
fgUpdateConstTreeValueNumber(op2);
mul->ChangeOper(GT_LSH, GenTree::PRESERVE_VN);
return mul;
}
}
return nullptr;
}
//------------------------------------------------------------------------
// fgOptimizeBitwiseAnd: optimizes the "and" operation.
//
// Arguments:
// andOp - the GT_AND tree to optimize.
//
// Return Value:
// The optimized tree, currently always a relop, in case any transformations
// were performed. Otherwise, "nullptr", guaranteeing no state change.
//
GenTree* Compiler::fgOptimizeBitwiseAnd(GenTreeOp* andOp)
{
assert(andOp->OperIs(GT_AND));
assert(!optValnumCSE_phase);
GenTree* op1 = andOp->gtGetOp1();
GenTree* op2 = andOp->gtGetOp2();
// Fold "cmp & 1" to just "cmp".
if (andOp->TypeIs(TYP_INT) && op1->OperIsCompare() && op2->IsIntegralConst(1))
{
DEBUG_DESTROY_NODE(op2);
DEBUG_DESTROY_NODE(andOp);
return op1;
}
return nullptr;
}
//------------------------------------------------------------------------
// fgOptimizeRelationalComparisonWithCasts: Recognizes comparisons against
// various cast operands and tries to remove them. E.g.:
//
// * GE int
// +--* CAST long <- ulong <- uint
// | \--* X int
// \--* CNS_INT long
//
// to:
//
// * GE_un int
// +--* X int
// \--* CNS_INT int
//
// same for:
//
// * GE int
// +--* CAST long <- ulong <- uint
// | \--* X int
// \--* CAST long <- [u]long <- int
// \--* ARR_LEN int
//
// These patterns quite often show up along with index checks
//
// Arguments:
// cmp - the GT_LE/GT_LT/GT_GE/GT_GT tree to morph.
//
// Return Value:
// Returns the same tree where operands might have narrower types
//
// Notes:
// TODO-Casts: consider unifying this function with "optNarrowTree"
//
GenTree* Compiler::fgOptimizeRelationalComparisonWithCasts(GenTreeOp* cmp)
{
assert(cmp->OperIs(GT_LE, GT_LT, GT_GE, GT_GT));
assert(!optValnumCSE_phase);
GenTree* op1 = cmp->gtGetOp1();
GenTree* op2 = cmp->gtGetOp2();
// Caller is expected to call this function only if we have CAST nodes
assert(op1->OperIs(GT_CAST) || op2->OperIs(GT_CAST));
if (!op1->TypeIs(TYP_LONG))
{
// We can extend this logic to handle small types as well, but currently it's done mostly to
// assist range check elimination
return cmp;
}
GenTree* castOp;
GenTree* knownPositiveOp;
bool knownPositiveIsOp2;
if (op2->IsIntegralConst() || ((op2->OperIs(GT_CAST) && op2->AsCast()->CastOp()->OperIs(GT_ARR_LENGTH))))
{
// op2 is either a LONG constant or (T)ARR_LENGTH
knownPositiveIsOp2 = true;
castOp = cmp->gtGetOp1();
knownPositiveOp = cmp->gtGetOp2();
}
else
{
// op1 is either a LONG constant (yes, it's pretty normal for relops)
// or (T)ARR_LENGTH
castOp = cmp->gtGetOp2();
knownPositiveOp = cmp->gtGetOp1();
knownPositiveIsOp2 = false;
}
if (castOp->OperIs(GT_CAST) && varTypeIsLong(castOp->CastToType()) && castOp->AsCast()->CastOp()->TypeIs(TYP_INT) &&
castOp->IsUnsigned() && !castOp->gtOverflow())
{
bool knownPositiveFitsIntoU32 = false;
if (knownPositiveOp->IsIntegralConst() && FitsIn<UINT32>(knownPositiveOp->AsIntConCommon()->IntegralValue()))
{
// BTW, we can fold the whole condition if op2 doesn't fit into UINT_MAX.
knownPositiveFitsIntoU32 = true;
}
else if (knownPositiveOp->OperIs(GT_CAST) && varTypeIsLong(knownPositiveOp->CastToType()) &&
knownPositiveOp->AsCast()->CastOp()->OperIs(GT_ARR_LENGTH))
{
knownPositiveFitsIntoU32 = true;
// TODO-Casts: recognize Span.Length here as well.
}
if (!knownPositiveFitsIntoU32)
{
return cmp;
}
JITDUMP("Removing redundant cast(s) for:\n")
DISPTREE(cmp)
JITDUMP("\n\nto:\n\n")
cmp->SetUnsigned();
// Drop cast from castOp
if (knownPositiveIsOp2)
{
cmp->gtOp1 = castOp->AsCast()->CastOp();
}
else
{
cmp->gtOp2 = castOp->AsCast()->CastOp();
}
DEBUG_DESTROY_NODE(castOp);
if (knownPositiveOp->OperIs(GT_CAST))
{
// Drop cast from knownPositiveOp too
if (knownPositiveIsOp2)
{
cmp->gtOp2 = knownPositiveOp->AsCast()->CastOp();
}
else
{
cmp->gtOp1 = knownPositiveOp->AsCast()->CastOp();
}
DEBUG_DESTROY_NODE(knownPositiveOp);
}
else
{
// Change type for constant from LONG to INT
knownPositiveOp->ChangeType(TYP_INT);
#ifndef TARGET_64BIT
assert(knownPositiveOp->OperIs(GT_CNS_LNG));
knownPositiveOp->BashToConst(static_cast<int>(knownPositiveOp->AsIntConCommon()->IntegralValue()));
#endif
fgUpdateConstTreeValueNumber(knownPositiveOp);
}
DISPTREE(cmp)
JITDUMP("\n")
}
return cmp;
}
// fgOptimizeBitwiseXor: optimizes the "xor" operation.
//
// Arguments:
// xorOp - the GT_XOR tree to optimize.
//
// Return Value:
// The optimized tree, currently always a local variable, in case any transformations
// were performed. Otherwise, "nullptr", guaranteeing no state change.
//
GenTree* Compiler::fgOptimizeBitwiseXor(GenTreeOp* xorOp)
{
assert(xorOp->OperIs(GT_XOR));
assert(!optValnumCSE_phase);
GenTree* op1 = xorOp->gtGetOp1();
GenTree* op2 = xorOp->gtGetOp2();
if (op2->IsIntegralConst(0))
{
/* "x ^ 0" is "x" */
DEBUG_DESTROY_NODE(xorOp, op2);
return op1;
}
else if (op2->IsIntegralConst(-1))
{
/* "x ^ -1" is "~x" */
xorOp->ChangeOper(GT_NOT);
xorOp->gtOp2 = nullptr;
DEBUG_DESTROY_NODE(op2);
return xorOp;
}
else if (op2->IsIntegralConst(1) && op1->OperIsCompare())
{
/* "binaryVal ^ 1" is "!binaryVal" */
gtReverseCond(op1);
DEBUG_DESTROY_NODE(op2);
DEBUG_DESTROY_NODE(xorOp);
return op1;
}
return nullptr;
}
//------------------------------------------------------------------------
// fgPropagateCommaThrow: propagate a "comma throw" up the tree.
//
// "Comma throws" in the compiler represent the canonical form of an always
// throwing expression. They have the shape of COMMA(THROW, ZERO), to satisfy
// the semantic that the original expression produced some value and are
// generated by "gtFoldExprConst" when it encounters checked arithmetic that
// will determinably overflow.
//
// In the global morphing phase, "comma throws" are "propagated" up the tree,
// in post-order, to eliminate nodes that will never execute. This method,
// called by "fgMorphSmpOp", encapsulates this optimization.
//
// Arguments:
// parent - the node currently being processed.
// commaThrow - the comma throw in question, "parent"'s operand.
// precedingSideEffects - side effects of nodes preceding "comma" in execution order.
//
// Return Value:
// If "parent" is to be replaced with a comma throw, i. e. the propagation was successful,
// the new "parent", otherwise "nullptr", guaranteeing no state change, with one exception:
// the "fgRemoveRestOfBlock" "global" may be set. Note that the new returned tree does not
// have to be a "comma throw", it can be "bare" throw call if the "parent" node did not
// produce any value.
//
// Notes:
// "Comma throws" are very rare.
//
GenTree* Compiler::fgPropagateCommaThrow(GenTree* parent, GenTreeOp* commaThrow, GenTreeFlags precedingSideEffects)
{
// Comma throw propagation does not preserve VNs, and deletes nodes.
assert(fgGlobalMorph);
assert(fgIsCommaThrow(commaThrow));
if ((commaThrow->gtFlags & GTF_COLON_COND) == 0)
{
fgRemoveRestOfBlock = true;
}
if ((precedingSideEffects & GTF_ALL_EFFECT) == 0)
{
if (parent->TypeIs(TYP_VOID))
{
// Return the throw node as the new tree.
return commaThrow->gtGetOp1();
}
// Fix up the COMMA's type if needed.
if (genActualType(parent) != genActualType(commaThrow))
{
commaThrow->gtGetOp2()->BashToZeroConst(genActualType(parent));
commaThrow->ChangeType(genActualType(parent));
}
return commaThrow;
}
return nullptr;
}
//----------------------------------------------------------------------------------------------
// fgMorphRetInd: Try to get rid of extra IND(ADDR()) pairs in a return tree.
//
// Arguments:
// node - The return node that uses an indirection.
//
// Return Value:
// the original op1 of the ret if there was no optimization or an optimized new op1.
//
GenTree* Compiler::fgMorphRetInd(GenTreeUnOp* ret)
{
assert(ret->OperIs(GT_RETURN));
assert(ret->gtGetOp1()->OperIs(GT_IND, GT_BLK, GT_OBJ));
GenTreeIndir* ind = ret->gtGetOp1()->AsIndir();
GenTree* addr = ind->Addr();
if (addr->OperIs(GT_ADDR) && addr->gtGetOp1()->OperIs(GT_LCL_VAR))
{
// If struct promotion was undone, adjust the annotations
if (fgGlobalMorph && fgMorphImplicitByRefArgs(addr))
{
return ind;
}
// If `return` retypes LCL_VAR as a smaller struct it should not set `doNotEnregister` on that
// LclVar.
// Example: in `Vector128:AsVector2` we have RETURN SIMD8(OBJ SIMD8(ADDR byref(LCL_VAR SIMD16))).
GenTreeLclVar* lclVar = addr->gtGetOp1()->AsLclVar();
if (!lvaIsImplicitByRefLocal(lclVar->GetLclNum()))
{
assert(!gtIsActiveCSE_Candidate(addr) && !gtIsActiveCSE_Candidate(ind));
unsigned indSize;
if (ind->OperIs(GT_IND))
{
indSize = genTypeSize(ind);
}
else
{
indSize = ind->AsBlk()->GetLayout()->GetSize();
}
LclVarDsc* varDsc = lvaGetDesc(lclVar);
unsigned lclVarSize;
if (!lclVar->TypeIs(TYP_STRUCT))
{
lclVarSize = genTypeSize(varDsc->TypeGet());
}
else
{
lclVarSize = varDsc->lvExactSize;
}
// TODO: change conditions in `canFold` to `indSize <= lclVarSize`, but currently do not support `BITCAST
// int<-SIMD16` etc.
assert((indSize <= lclVarSize) || varDsc->lvDoNotEnregister);
#if defined(TARGET_64BIT)
bool canFold = (indSize == lclVarSize);
#else // !TARGET_64BIT
// TODO: improve 32 bit targets handling for LONG returns if necessary, nowadays we do not support `BITCAST
// long<->double` there.
bool canFold = (indSize == lclVarSize) && (lclVarSize <= REGSIZE_BYTES);
#endif
// TODO: support `genReturnBB != nullptr`, it requires #11413 to avoid `Incompatible types for
// gtNewTempAssign`.
if (canFold && (genReturnBB == nullptr))
{
// Fold (TYPE1)*(&(TYPE2)x) even if types do not match, lowering will handle it.
// Getting rid of this IND(ADDR()) pair allows to keep lclVar as not address taken
// and enregister it.
DEBUG_DESTROY_NODE(ind);
DEBUG_DESTROY_NODE(addr);
ret->gtOp1 = lclVar;
// We use GTF_DONT_CSE as an "is under GT_ADDR" check. We can
// get rid of it now since the GT_RETURN node should never have
// its address taken.
assert((ret->gtFlags & GTF_DONT_CSE) == 0);
lclVar->gtFlags &= ~GTF_DONT_CSE;
return lclVar;
}
else if (!varDsc->lvDoNotEnregister)
{
lvaSetVarDoNotEnregister(lclVar->GetLclNum() DEBUGARG(DoNotEnregisterReason::BlockOpRet));
}
}
}
return ind;
}
#ifdef _PREFAST_
#pragma warning(pop)
#endif
GenTree* Compiler::fgMorphSmpOpOptional(GenTreeOp* tree)
{
genTreeOps oper = tree->gtOper;
GenTree* op1 = tree->gtOp1;
GenTree* op2 = tree->gtOp2;
var_types typ = tree->TypeGet();
if (fgGlobalMorph && GenTree::OperIsCommutative(oper))
{
/* Swap the operands so that the more expensive one is 'op1' */
if (tree->gtFlags & GTF_REVERSE_OPS)
{
tree->gtOp1 = op2;
tree->gtOp2 = op1;
op2 = op1;
op1 = tree->gtOp1;
tree->gtFlags &= ~GTF_REVERSE_OPS;
}
if (oper == op2->gtOper)
{
/* Reorder nested operators at the same precedence level to be
left-recursive. For example, change "(a+(b+c))" to the
equivalent expression "((a+b)+c)".
*/
/* Things are handled differently for floating-point operators */
if (!varTypeIsFloating(tree->TypeGet()))
{
fgMoveOpsLeft(tree);
op1 = tree->gtOp1;
op2 = tree->gtOp2;
}
}
}
#if REARRANGE_ADDS
/* Change "((x+icon)+y)" to "((x+y)+icon)"
Don't reorder floating-point operations */
if (fgGlobalMorph && (oper == GT_ADD) && !tree->gtOverflow() && (op1->gtOper == GT_ADD) && !op1->gtOverflow() &&
varTypeIsIntegralOrI(typ))
{
GenTree* ad1 = op1->AsOp()->gtOp1;
GenTree* ad2 = op1->AsOp()->gtOp2;
if (!op2->OperIsConst() && ad2->OperIsConst())
{
// This takes
// + (tree)
// / \.
// / \.
// / \.
// + (op1) op2
// / \.
// / \.
// ad1 ad2
//
// and it swaps ad2 and op2.
// Don't create a byref pointer that may point outside of the ref object.
// If a GC happens, the byref won't get updated. This can happen if one
// of the int components is negative. It also requires the address generation
// be in a fully-interruptible code region.
if (!varTypeIsGC(ad1->TypeGet()) && !varTypeIsGC(op2->TypeGet()))
{
tree->gtOp2 = ad2;
op1->AsOp()->gtOp2 = op2;
op1->gtFlags |= op2->gtFlags & GTF_ALL_EFFECT;
op2 = tree->gtOp2;
}
}
}
#endif
/*-------------------------------------------------------------------------
* Perform optional oper-specific postorder morphing
*/
switch (oper)
{
case GT_ASG:
// Make sure we're allowed to do this.
if (optValnumCSE_phase)
{
// It is not safe to reorder/delete CSE's
break;
}
if (varTypeIsStruct(typ) && !tree->IsPhiDefn())
{
if (tree->OperIsCopyBlkOp())
{
return fgMorphCopyBlock(tree);
}
else
{
return fgMorphInitBlock(tree);
}
}
if (typ == TYP_LONG)
{
break;
}
if (op2->gtFlags & GTF_ASG)
{
break;
}
if ((op2->gtFlags & GTF_CALL) && (op1->gtFlags & GTF_ALL_EFFECT))
{
break;
}
/* Special case: a cast that can be thrown away */
// TODO-Cleanup: fgMorphSmp does a similar optimization. However, it removes only
// one cast and sometimes there is another one after it that gets removed by this
// code. fgMorphSmp should be improved to remove all redundant casts so this code
// can be removed.
if (op1->gtOper == GT_IND && op2->gtOper == GT_CAST && !op2->gtOverflow())
{
var_types srct;
var_types cast;
var_types dstt;
srct = op2->AsCast()->CastOp()->TypeGet();
cast = (var_types)op2->CastToType();
dstt = op1->TypeGet();
/* Make sure these are all ints and precision is not lost */
if (genTypeSize(cast) >= genTypeSize(dstt) && dstt <= TYP_INT && srct <= TYP_INT)
{
op2 = tree->gtOp2 = op2->AsCast()->CastOp();
}
}
break;
case GT_MUL:
/* Check for the case "(val + icon) * icon" */
if (op2->gtOper == GT_CNS_INT && op1->gtOper == GT_ADD)
{
GenTree* add = op1->AsOp()->gtOp2;
if (add->IsCnsIntOrI() && (op2->GetScaleIndexMul() != 0))
{
if (tree->gtOverflow() || op1->gtOverflow())
{
break;
}
ssize_t imul = op2->AsIntCon()->gtIconVal;
ssize_t iadd = add->AsIntCon()->gtIconVal;
/* Change '(val + iadd) * imul' -> '(val * imul) + (iadd * imul)' */
oper = GT_ADD;
tree->ChangeOper(oper);
op2->AsIntCon()->SetValueTruncating(iadd * imul);
op1->ChangeOper(GT_MUL);
add->AsIntCon()->SetIconValue(imul);
}
}
break;
case GT_DIV:
/* For "val / 1", just return "val" */
if (op2->IsIntegralConst(1))
{
DEBUG_DESTROY_NODE(tree);
return op1;
}
break;
case GT_UDIV:
case GT_UMOD:
tree->CheckDivideByConstOptimized(this);
break;
case GT_LSH:
/* Check for the case "(val + icon) << icon" */
if (!optValnumCSE_phase && op2->IsCnsIntOrI() && op1->gtOper == GT_ADD && !op1->gtOverflow())
{
GenTree* cns = op1->AsOp()->gtOp2;
if (cns->IsCnsIntOrI() && (op2->GetScaleIndexShf() != 0))
{
ssize_t ishf = op2->AsIntConCommon()->IconValue();
ssize_t iadd = cns->AsIntConCommon()->IconValue();
// printf("Changing '(val+icon1)<<icon2' into '(val<<icon2+icon1<<icon2)'\n");
/* Change "(val + iadd) << ishf" into "(val<<ishf + iadd<<ishf)" */
tree->ChangeOper(GT_ADD);
// we are reusing the shift amount node here, but the type we want is that of the shift result
op2->gtType = op1->gtType;
op2->AsIntConCommon()->SetValueTruncating(iadd << ishf);
op1->ChangeOper(GT_LSH);
cns->AsIntConCommon()->SetIconValue(ishf);
}
}
break;
case GT_INIT_VAL:
// Initialization values for initBlk have special semantics - their lower
// byte is used to fill the struct. However, we allow 0 as a "bare" value,
// which enables them to get a VNForZero, and be propagated.
if (op1->IsIntegralConst(0))
{
return op1;
}
break;
default:
break;
}
return tree;
}
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
//------------------------------------------------------------------------
// fgMorphMultiOp: Morph a GenTreeMultiOp (SIMD/HWINTRINSIC) tree.
//
// Arguments:
// multiOp - The tree to morph
//
// Return Value:
// The fully morphed tree.
//
GenTree* Compiler::fgMorphMultiOp(GenTreeMultiOp* multiOp)
{
gtUpdateNodeOperSideEffects(multiOp);
bool dontCseConstArguments = false;
#if defined(FEATURE_HW_INTRINSICS)
// Opportunistically, avoid unexpected CSE for hw intrinsics with IMM arguments
if (multiOp->OperIs(GT_HWINTRINSIC))
{
NamedIntrinsic hwIntrinsic = multiOp->AsHWIntrinsic()->GetHWIntrinsicId();
#if defined(TARGET_XARCH)
if (HWIntrinsicInfo::lookupCategory(hwIntrinsic) == HW_Category_IMM)
{
dontCseConstArguments = true;
}
#elif defined(TARGET_ARMARCH)
if (HWIntrinsicInfo::HasImmediateOperand(hwIntrinsic))
{
dontCseConstArguments = true;
}
#endif
}
#endif
for (GenTree** use : multiOp->UseEdges())
{
*use = fgMorphTree(*use);
GenTree* operand = *use;
multiOp->gtFlags |= (operand->gtFlags & GTF_ALL_EFFECT);
if (dontCseConstArguments && operand->OperIsConst())
{
operand->SetDoNotCSE();
}
// Promoted structs after morph must be in one of two states:
// a) Fully eliminated from the IR (independent promotion) OR only be
// used by "special" nodes (e. g. LHS of ASGs for multi-reg structs).
// b) Marked as do-not-enregister (dependent promotion).
//
// So here we preserve this invariant and mark any promoted structs as do-not-enreg.
//
if (operand->OperIs(GT_LCL_VAR) && lvaGetDesc(operand->AsLclVar())->lvPromoted)
{
lvaSetVarDoNotEnregister(operand->AsLclVar()->GetLclNum()
DEBUGARG(DoNotEnregisterReason::SimdUserForcesDep));
}
}
#if defined(FEATURE_HW_INTRINSICS)
if (opts.OptimizationEnabled() && multiOp->OperIs(GT_HWINTRINSIC))
{
GenTreeHWIntrinsic* hw = multiOp->AsHWIntrinsic();
switch (hw->GetHWIntrinsicId())
{
#if defined(TARGET_XARCH)
case NI_SSE_Xor:
case NI_SSE2_Xor:
case NI_AVX_Xor:
case NI_AVX2_Xor:
{
// Transform XOR(X, 0) to X for vectors
GenTree* op1 = hw->Op(1);
GenTree* op2 = hw->Op(2);
if (!gtIsActiveCSE_Candidate(hw))
{
if (op1->IsIntegralConstVector(0) && !gtIsActiveCSE_Candidate(op1))
{
DEBUG_DESTROY_NODE(hw);
DEBUG_DESTROY_NODE(op1);
return op2;
}
if (op2->IsIntegralConstVector(0) && !gtIsActiveCSE_Candidate(op2))
{
DEBUG_DESTROY_NODE(hw);
DEBUG_DESTROY_NODE(op2);
return op1;
}
}
break;
}
#endif
case NI_Vector128_Create:
#if defined(TARGET_XARCH)
case NI_Vector256_Create:
#elif defined(TARGET_ARMARCH)
case NI_Vector64_Create:
#endif
{
bool hwAllArgsAreConst = true;
for (GenTree** use : multiOp->UseEdges())
{
if (!(*use)->OperIsConst())
{
hwAllArgsAreConst = false;
break;
}
}
// Avoid unexpected CSE for constant arguments for Vector_.Create
// but only if all arguments are constants.
if (hwAllArgsAreConst)
{
for (GenTree** use : multiOp->UseEdges())
{
(*use)->SetDoNotCSE();
}
}
}
break;
default:
break;
}
}
#endif // defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH)
#ifdef FEATURE_HW_INTRINSICS
if (multiOp->OperIsHWIntrinsic() && !optValnumCSE_phase)
{
return fgOptimizeHWIntrinsic(multiOp->AsHWIntrinsic());
}
#endif
return multiOp;
}
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
//------------------------------------------------------------------------
// fgMorphModToSubMulDiv: Transform a % b into the equivalent a - (a / b) * b
// (see ECMA III 3.55 and III.3.56).
//
// Arguments:
// tree - The GT_MOD/GT_UMOD tree to morph
//
// Returns:
// The morphed tree
//
// Notes:
// For ARM64 we don't have a remainder instruction so this transform is
// always done. For XARCH this transform is done if we know that magic
// division will be used, in that case this transform allows CSE to
// eliminate the redundant div from code like "x = a / 3; y = a % 3;".
//
GenTree* Compiler::fgMorphModToSubMulDiv(GenTreeOp* tree)
{
JITDUMP("\nMorphing MOD/UMOD [%06u] to Sub/Mul/Div\n", dspTreeID(tree));
if (tree->OperGet() == GT_MOD)
{
tree->SetOper(GT_DIV);
}
else if (tree->OperGet() == GT_UMOD)
{
tree->SetOper(GT_UDIV);
}
else
{
noway_assert(!"Illegal gtOper in fgMorphModToSubMulDiv");
}
var_types type = tree->gtType;
GenTree* const copyOfNumeratorValue = fgMakeMultiUse(&tree->gtOp1);
GenTree* const copyOfDenominatorValue = fgMakeMultiUse(&tree->gtOp2);
GenTree* const mul = gtNewOperNode(GT_MUL, type, tree, copyOfDenominatorValue);
GenTree* const sub = gtNewOperNode(GT_SUB, type, copyOfNumeratorValue, mul);
// Ensure "sub" does not evaluate "copyOfNumeratorValue" before it is defined by "mul".
//
sub->gtFlags |= GTF_REVERSE_OPS;
#ifdef DEBUG
sub->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
tree->CheckDivideByConstOptimized(this);
return sub;
}
//------------------------------------------------------------------------
// fgMorphUModToAndSub: Transform a % b into the equivalent a & (b - 1).
// '%' must be unsigned (GT_UMOD).
// 'a' and 'b' must be integers.
// 'b' must be a constant and a power of two.
//
// Arguments:
// tree - The GT_UMOD tree to morph
//
// Returns:
// The morphed tree
//
// Notes:
// This is more optimized than calling fgMorphModToSubMulDiv.
//
GenTree* Compiler::fgMorphUModToAndSub(GenTreeOp* tree)
{
JITDUMP("\nMorphing UMOD [%06u] to And/Sub\n", dspTreeID(tree));
assert(tree->OperIs(GT_UMOD));
assert(tree->gtOp2->IsIntegralConstUnsignedPow2());
const var_types type = tree->TypeGet();
const size_t cnsValue = (static_cast<size_t>(tree->gtOp2->AsIntConCommon()->IntegralValue())) - 1;
GenTree* const newTree = gtNewOperNode(GT_AND, type, tree->gtOp1, gtNewIconNode(cnsValue, type));
INDEBUG(newTree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
DEBUG_DESTROY_NODE(tree->gtOp2);
DEBUG_DESTROY_NODE(tree);
return newTree;
}
//------------------------------------------------------------------------------
// fgOperIsBitwiseRotationRoot : Check if the operation can be a root of a bitwise rotation tree.
//
//
// Arguments:
// oper - Operation to check
//
// Return Value:
// True if the operation can be a root of a bitwise rotation tree; false otherwise.
bool Compiler::fgOperIsBitwiseRotationRoot(genTreeOps oper)
{
return (oper == GT_OR) || (oper == GT_XOR);
}
//------------------------------------------------------------------------------
// fgRecognizeAndMorphBitwiseRotation : Check if the tree represents a left or right rotation. If so, return
// an equivalent GT_ROL or GT_ROR tree; otherwise, return the original tree.
//
// Arguments:
// tree - tree to check for a rotation pattern
//
// Return Value:
// An equivalent GT_ROL or GT_ROR tree if a pattern is found; "nullptr" otherwise.
//
// Assumption:
// The input is a GT_OR or a GT_XOR tree.
GenTree* Compiler::fgRecognizeAndMorphBitwiseRotation(GenTree* tree)
{
//
// Check for a rotation pattern, e.g.,
//
// OR ROL
// / \ / \.
// LSH RSZ -> x y
// / \ / \.
// x AND x AND
// / \ / \.
// y 31 ADD 31
// / \.
// NEG 32
// |
// y
// The patterns recognized:
// (x << (y & M)) op (x >>> ((-y + N) & M))
// (x >>> ((-y + N) & M)) op (x << (y & M))
//
// (x << y) op (x >>> (-y + N))
// (x >> > (-y + N)) op (x << y)
//
// (x >>> (y & M)) op (x << ((-y + N) & M))
// (x << ((-y + N) & M)) op (x >>> (y & M))
//
// (x >>> y) op (x << (-y + N))
// (x << (-y + N)) op (x >>> y)
//
// (x << c1) op (x >>> c2)
// (x >>> c1) op (x << c2)
//
// where
// c1 and c2 are const
// c1 + c2 == bitsize(x)
// N == bitsize(x)
// M is const
// M & (N - 1) == N - 1
// op is either | or ^
if (((tree->gtFlags & GTF_PERSISTENT_SIDE_EFFECTS) != 0) || ((tree->gtFlags & GTF_ORDER_SIDEEFF) != 0))
{
// We can't do anything if the tree has assignments, calls, or volatile
// reads. Note that we allow GTF_EXCEPT side effect since any exceptions
// thrown by the original tree will be thrown by the transformed tree as well.
return nullptr;
}
genTreeOps oper = tree->OperGet();
assert(fgOperIsBitwiseRotationRoot(oper));
// Check if we have an LSH on one side of the OR and an RSZ on the other side.
GenTree* op1 = tree->gtGetOp1();
GenTree* op2 = tree->gtGetOp2();
GenTree* leftShiftTree = nullptr;
GenTree* rightShiftTree = nullptr;
if ((op1->OperGet() == GT_LSH) && (op2->OperGet() == GT_RSZ))
{
leftShiftTree = op1;
rightShiftTree = op2;
}
else if ((op1->OperGet() == GT_RSZ) && (op2->OperGet() == GT_LSH))
{
leftShiftTree = op2;
rightShiftTree = op1;
}
else
{
return nullptr;
}
// Check if the trees representing the value to shift are identical.
// We already checked that there are no side effects above.
if (GenTree::Compare(leftShiftTree->gtGetOp1(), rightShiftTree->gtGetOp1()))
{
GenTree* rotatedValue = leftShiftTree->gtGetOp1();
var_types rotatedValueActualType = genActualType(rotatedValue->gtType);
ssize_t rotatedValueBitSize = genTypeSize(rotatedValueActualType) * 8;
noway_assert((rotatedValueBitSize == 32) || (rotatedValueBitSize == 64));
GenTree* leftShiftIndex = leftShiftTree->gtGetOp2();
GenTree* rightShiftIndex = rightShiftTree->gtGetOp2();
// The shift index may be masked. At least (rotatedValueBitSize - 1) lower bits
// shouldn't be masked for the transformation to be valid. If additional
// higher bits are not masked, the transformation is still valid since the result
// of MSIL shift instructions is unspecified if the shift amount is greater or equal
// than the width of the value being shifted.
ssize_t minimalMask = rotatedValueBitSize - 1;
ssize_t leftShiftMask = -1;
ssize_t rightShiftMask = -1;
if ((leftShiftIndex->OperGet() == GT_AND))
{
if (leftShiftIndex->gtGetOp2()->IsCnsIntOrI())
{
leftShiftMask = leftShiftIndex->gtGetOp2()->AsIntCon()->gtIconVal;
leftShiftIndex = leftShiftIndex->gtGetOp1();
}
else
{
return nullptr;
}
}
if ((rightShiftIndex->OperGet() == GT_AND))
{
if (rightShiftIndex->gtGetOp2()->IsCnsIntOrI())
{
rightShiftMask = rightShiftIndex->gtGetOp2()->AsIntCon()->gtIconVal;
rightShiftIndex = rightShiftIndex->gtGetOp1();
}
else
{
return nullptr;
}
}
if (((minimalMask & leftShiftMask) != minimalMask) || ((minimalMask & rightShiftMask) != minimalMask))
{
// The shift index is overmasked, e.g., we have
// something like (x << y & 15) or
// (x >> (32 - y) & 15 with 32 bit x.
// The transformation is not valid.
return nullptr;
}
GenTree* shiftIndexWithAdd = nullptr;
GenTree* shiftIndexWithoutAdd = nullptr;
genTreeOps rotateOp = GT_NONE;
GenTree* rotateIndex = nullptr;
if (leftShiftIndex->OperGet() == GT_ADD)
{
shiftIndexWithAdd = leftShiftIndex;
shiftIndexWithoutAdd = rightShiftIndex;
rotateOp = GT_ROR;
}
else if (rightShiftIndex->OperGet() == GT_ADD)
{
shiftIndexWithAdd = rightShiftIndex;
shiftIndexWithoutAdd = leftShiftIndex;
rotateOp = GT_ROL;
}
if (shiftIndexWithAdd != nullptr)
{
if (shiftIndexWithAdd->gtGetOp2()->IsCnsIntOrI())
{
if (shiftIndexWithAdd->gtGetOp2()->AsIntCon()->gtIconVal == rotatedValueBitSize)
{
if (shiftIndexWithAdd->gtGetOp1()->OperGet() == GT_NEG)
{
if (GenTree::Compare(shiftIndexWithAdd->gtGetOp1()->gtGetOp1(), shiftIndexWithoutAdd))
{
// We found one of these patterns:
// (x << (y & M)) | (x >>> ((-y + N) & M))
// (x << y) | (x >>> (-y + N))
// (x >>> (y & M)) | (x << ((-y + N) & M))
// (x >>> y) | (x << (-y + N))
// where N == bitsize(x), M is const, and
// M & (N - 1) == N - 1
CLANG_FORMAT_COMMENT_ANCHOR;
#ifndef TARGET_64BIT
if (!shiftIndexWithoutAdd->IsCnsIntOrI() && (rotatedValueBitSize == 64))
{
// TODO-X86-CQ: we need to handle variable-sized long shifts specially on x86.
// GT_LSH, GT_RSH, and GT_RSZ have helpers for this case. We may need
// to add helpers for GT_ROL and GT_ROR.
return nullptr;
}
#endif
rotateIndex = shiftIndexWithoutAdd;
}
}
}
}
}
else if ((leftShiftIndex->IsCnsIntOrI() && rightShiftIndex->IsCnsIntOrI()))
{
if (leftShiftIndex->AsIntCon()->gtIconVal + rightShiftIndex->AsIntCon()->gtIconVal == rotatedValueBitSize)
{
// We found this pattern:
// (x << c1) | (x >>> c2)
// where c1 and c2 are const and c1 + c2 == bitsize(x)
rotateOp = GT_ROL;
rotateIndex = leftShiftIndex;
}
}
if (rotateIndex != nullptr)
{
noway_assert(GenTree::OperIsRotate(rotateOp));
GenTreeFlags inputTreeEffects = tree->gtFlags & GTF_ALL_EFFECT;
// We can use the same tree only during global morph; reusing the tree in a later morph
// may invalidate value numbers.
if (fgGlobalMorph)
{
tree->AsOp()->gtOp1 = rotatedValue;
tree->AsOp()->gtOp2 = rotateIndex;
tree->ChangeOper(rotateOp);
unsigned childFlags = 0;
for (GenTree* op : tree->Operands())
{
childFlags |= (op->gtFlags & GTF_ALL_EFFECT);
}
// The parent's flags should be a superset of its operands' flags
noway_assert((inputTreeEffects & childFlags) == childFlags);
}
else
{
tree = gtNewOperNode(rotateOp, rotatedValueActualType, rotatedValue, rotateIndex);
noway_assert(inputTreeEffects == (tree->gtFlags & GTF_ALL_EFFECT));
}
return tree;
}
}
return nullptr;
}
#if !defined(TARGET_64BIT)
//------------------------------------------------------------------------------
// fgRecognizeAndMorphLongMul : Check for and morph long multiplication with 32 bit operands.
//
// Uses "GenTree::IsValidLongMul" to check for the long multiplication pattern. Will swap
// operands if the first one is a constant and the second one is not, even for trees which
// end up not being eligibile for long multiplication.
//
// Arguments:
// mul - GT_MUL tree to check for a long multiplication opportunity
//
// Return Value:
// The original tree, with operands possibly swapped, if it is not eligible for long multiplication.
// Tree with GTF_MUL_64RSLT set, side effect flags propagated, and children morphed if it is.
//
GenTreeOp* Compiler::fgRecognizeAndMorphLongMul(GenTreeOp* mul)
{
assert(mul->OperIs(GT_MUL));
assert(mul->TypeIs(TYP_LONG));
GenTree* op1 = mul->gtGetOp1();
GenTree* op2 = mul->gtGetOp2();
// "IsValidLongMul" and decomposition do not handle constant op1.
if (op1->IsIntegralConst())
{
std::swap(op1, op2);
mul->gtOp1 = op1;
mul->gtOp2 = op2;
}
if (!mul->IsValidLongMul())
{
return mul;
}
// MUL_LONG needs to do the work the casts would have done.
mul->ClearUnsigned();
if (op1->IsUnsigned())
{
mul->SetUnsigned();
}
// "IsValidLongMul" returned "true", so this GT_MUL cannot overflow.
mul->ClearOverflow();
mul->Set64RsltMul();
return fgMorphLongMul(mul);
}
//------------------------------------------------------------------------------
// fgMorphLongMul : Morphs GT_MUL nodes marked with GTF_MUL_64RSLT.
//
// Morphs *only* the operands of casts that compose the long mul to
// avoid them being folded aways.
//
// Arguments:
// mul - GT_MUL tree to morph operands of
//
// Return Value:
// The original tree, with operands morphed and flags propagated.
//
GenTreeOp* Compiler::fgMorphLongMul(GenTreeOp* mul)
{
INDEBUG(mul->DebugCheckLongMul());
GenTree* op1 = mul->gtGetOp1();
GenTree* op2 = mul->gtGetOp2();
// Morph the operands. We cannot allow the casts to go away, so we morph their operands directly.
op1->AsCast()->CastOp() = fgMorphTree(op1->AsCast()->CastOp());
op1->SetAllEffectsFlags(op1->AsCast()->CastOp());
if (op2->OperIs(GT_CAST))
{
op2->AsCast()->CastOp() = fgMorphTree(op2->AsCast()->CastOp());
op2->SetAllEffectsFlags(op2->AsCast()->CastOp());
}
mul->SetAllEffectsFlags(op1, op2);
op1->SetDoNotCSE();
op2->SetDoNotCSE();
return mul;
}
#endif // !defined(TARGET_64BIT)
/*****************************************************************************
*
* Transform the given tree for code generation and return an equivalent tree.
*/
GenTree* Compiler::fgMorphTree(GenTree* tree, MorphAddrContext* mac)
{
assert(tree);
#ifdef DEBUG
if (verbose)
{
if ((unsigned)JitConfig.JitBreakMorphTree() == tree->gtTreeID)
{
noway_assert(!"JitBreakMorphTree hit");
}
}
#endif
#ifdef DEBUG
int thisMorphNum = 0;
if (verbose && treesBeforeAfterMorph)
{
thisMorphNum = morphNum++;
printf("\nfgMorphTree (before %d):\n", thisMorphNum);
gtDispTree(tree);
}
#endif
if (fgGlobalMorph)
{
// Apply any rewrites for implicit byref arguments before morphing the
// tree.
if (fgMorphImplicitByRefArgs(tree))
{
#ifdef DEBUG
if (verbose && treesBeforeAfterMorph)
{
printf("\nfgMorphTree (%d), after implicit-byref rewrite:\n", thisMorphNum);
gtDispTree(tree);
}
#endif
}
}
/*-------------------------------------------------------------------------
* fgMorphTree() can potentially replace a tree with another, and the
* caller has to store the return value correctly.
* Turn this on to always make copy of "tree" here to shake out
* hidden/unupdated references.
*/
#ifdef DEBUG
if (compStressCompile(STRESS_GENERIC_CHECK, 0))
{
GenTree* copy;
if (GenTree::s_gtNodeSizes[tree->gtOper] == TREE_NODE_SZ_SMALL)
{
copy = gtNewLargeOperNode(GT_ADD, TYP_INT);
}
else
{
copy = new (this, GT_CALL) GenTreeCall(TYP_INT);
}
copy->ReplaceWith(tree, this);
#if defined(LATE_DISASM)
// GT_CNS_INT is considered small, so ReplaceWith() won't copy all fields
if ((tree->gtOper == GT_CNS_INT) && tree->IsIconHandle())
{
copy->AsIntCon()->gtCompileTimeHandle = tree->AsIntCon()->gtCompileTimeHandle;
}
#endif
DEBUG_DESTROY_NODE(tree);
tree = copy;
}
#endif // DEBUG
if (fgGlobalMorph)
{
/* Ensure that we haven't morphed this node already */
assert(((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) == 0) && "ERROR: Already morphed this node!");
/* Before morphing the tree, we try to propagate any active assertions */
if (optLocalAssertionProp)
{
/* Do we have any active assertions? */
if (optAssertionCount > 0)
{
GenTree* newTree = tree;
while (newTree != nullptr)
{
tree = newTree;
/* newTree is non-Null if we propagated an assertion */
newTree = optAssertionProp(apFull, tree, nullptr, nullptr);
}
assert(tree != nullptr);
}
}
PREFAST_ASSUME(tree != nullptr);
}
/* Save the original un-morphed tree for fgMorphTreeDone */
GenTree* oldTree = tree;
/* Figure out what kind of a node we have */
unsigned kind = tree->OperKind();
/* Is this a constant node? */
if (tree->OperIsConst())
{
tree = fgMorphConst(tree);
goto DONE;
}
/* Is this a leaf node? */
if (kind & GTK_LEAF)
{
tree = fgMorphLeaf(tree);
goto DONE;
}
/* Is it a 'simple' unary/binary operator? */
if (kind & GTK_SMPOP)
{
tree = fgMorphSmpOp(tree, mac);
goto DONE;
}
/* See what kind of a special operator we have here */
switch (tree->OperGet())
{
case GT_CALL:
if (tree->OperMayThrow(this))
{
tree->gtFlags |= GTF_EXCEPT;
}
else
{
tree->gtFlags &= ~GTF_EXCEPT;
}
tree = fgMorphCall(tree->AsCall());
break;
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
#if defined(FEATURE_SIMD)
case GT_SIMD:
#endif
#if defined(FEATURE_HW_INTRINSICS)
case GT_HWINTRINSIC:
#endif
tree = fgMorphMultiOp(tree->AsMultiOp());
break;
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
case GT_ARR_ELEM:
tree->AsArrElem()->gtArrObj = fgMorphTree(tree->AsArrElem()->gtArrObj);
unsigned dim;
for (dim = 0; dim < tree->AsArrElem()->gtArrRank; dim++)
{
tree->AsArrElem()->gtArrInds[dim] = fgMorphTree(tree->AsArrElem()->gtArrInds[dim]);
}
tree->gtFlags &= ~GTF_CALL;
tree->gtFlags |= tree->AsArrElem()->gtArrObj->gtFlags & GTF_ALL_EFFECT;
for (dim = 0; dim < tree->AsArrElem()->gtArrRank; dim++)
{
tree->gtFlags |= tree->AsArrElem()->gtArrInds[dim]->gtFlags & GTF_ALL_EFFECT;
}
if (fgGlobalMorph)
{
fgSetRngChkTarget(tree, false);
}
break;
case GT_ARR_OFFSET:
tree->AsArrOffs()->gtOffset = fgMorphTree(tree->AsArrOffs()->gtOffset);
tree->AsArrOffs()->gtIndex = fgMorphTree(tree->AsArrOffs()->gtIndex);
tree->AsArrOffs()->gtArrObj = fgMorphTree(tree->AsArrOffs()->gtArrObj);
tree->gtFlags &= ~GTF_CALL;
tree->gtFlags |= tree->AsArrOffs()->gtOffset->gtFlags & GTF_ALL_EFFECT;
tree->gtFlags |= tree->AsArrOffs()->gtIndex->gtFlags & GTF_ALL_EFFECT;
tree->gtFlags |= tree->AsArrOffs()->gtArrObj->gtFlags & GTF_ALL_EFFECT;
if (fgGlobalMorph)
{
fgSetRngChkTarget(tree, false);
}
break;
case GT_PHI:
tree->gtFlags &= ~GTF_ALL_EFFECT;
for (GenTreePhi::Use& use : tree->AsPhi()->Uses())
{
use.SetNode(fgMorphTree(use.GetNode()));
tree->gtFlags |= use.GetNode()->gtFlags & GTF_ALL_EFFECT;
}
break;
case GT_FIELD_LIST:
tree->gtFlags &= ~GTF_ALL_EFFECT;
for (GenTreeFieldList::Use& use : tree->AsFieldList()->Uses())
{
use.SetNode(fgMorphTree(use.GetNode()));
tree->gtFlags |= (use.GetNode()->gtFlags & GTF_ALL_EFFECT);
}
break;
case GT_CMPXCHG:
tree->AsCmpXchg()->gtOpLocation = fgMorphTree(tree->AsCmpXchg()->gtOpLocation);
tree->AsCmpXchg()->gtOpValue = fgMorphTree(tree->AsCmpXchg()->gtOpValue);
tree->AsCmpXchg()->gtOpComparand = fgMorphTree(tree->AsCmpXchg()->gtOpComparand);
tree->gtFlags &= (~GTF_EXCEPT & ~GTF_CALL);
tree->gtFlags |= tree->AsCmpXchg()->gtOpLocation->gtFlags & GTF_ALL_EFFECT;
tree->gtFlags |= tree->AsCmpXchg()->gtOpValue->gtFlags & GTF_ALL_EFFECT;
tree->gtFlags |= tree->AsCmpXchg()->gtOpComparand->gtFlags & GTF_ALL_EFFECT;
break;
case GT_STORE_DYN_BLK:
tree = fgMorphStoreDynBlock(tree->AsStoreDynBlk());
break;
default:
#ifdef DEBUG
gtDispTree(tree);
#endif
noway_assert(!"unexpected operator");
}
DONE:
fgMorphTreeDone(tree, oldTree DEBUGARG(thisMorphNum));
return tree;
}
//------------------------------------------------------------------------
// fgKillDependentAssertionsSingle: Kill all assertions specific to lclNum
//
// Arguments:
// lclNum - The varNum of the lclVar for which we're killing assertions.
// tree - (DEBUG only) the tree responsible for killing its assertions.
//
void Compiler::fgKillDependentAssertionsSingle(unsigned lclNum DEBUGARG(GenTree* tree))
{
/* All dependent assertions are killed here */
ASSERT_TP killed = BitVecOps::MakeCopy(apTraits, GetAssertionDep(lclNum));
if (killed)
{
AssertionIndex index = optAssertionCount;
while (killed && (index > 0))
{
if (BitVecOps::IsMember(apTraits, killed, index - 1))
{
#ifdef DEBUG
AssertionDsc* curAssertion = optGetAssertion(index);
noway_assert((curAssertion->op1.lcl.lclNum == lclNum) ||
((curAssertion->op2.kind == O2K_LCLVAR_COPY) && (curAssertion->op2.lcl.lclNum == lclNum)));
if (verbose)
{
printf("\nThe assignment ");
printTreeID(tree);
printf(" using V%02u removes: ", curAssertion->op1.lcl.lclNum);
optPrintAssertion(curAssertion);
}
#endif
// Remove this bit from the killed mask
BitVecOps::RemoveElemD(apTraits, killed, index - 1);
optAssertionRemove(index);
}
index--;
}
// killed mask should now be zero
noway_assert(BitVecOps::IsEmpty(apTraits, killed));
}
}
//------------------------------------------------------------------------
// fgKillDependentAssertions: Kill all dependent assertions with regard to lclNum.
//
// Arguments:
// lclNum - The varNum of the lclVar for which we're killing assertions.
// tree - (DEBUG only) the tree responsible for killing its assertions.
//
// Notes:
// For structs and struct fields, it will invalidate the children and parent
// respectively.
// Calls fgKillDependentAssertionsSingle to kill the assertions for a single lclVar.
//
void Compiler::fgKillDependentAssertions(unsigned lclNum DEBUGARG(GenTree* tree))
{
LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (varDsc->lvPromoted)
{
noway_assert(varTypeIsStruct(varDsc));
// Kill the field locals.
for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i)
{
fgKillDependentAssertionsSingle(i DEBUGARG(tree));
}
// Kill the struct local itself.
fgKillDependentAssertionsSingle(lclNum DEBUGARG(tree));
}
else if (varDsc->lvIsStructField)
{
// Kill the field local.
fgKillDependentAssertionsSingle(lclNum DEBUGARG(tree));
// Kill the parent struct.
fgKillDependentAssertionsSingle(varDsc->lvParentLcl DEBUGARG(tree));
}
else
{
fgKillDependentAssertionsSingle(lclNum DEBUGARG(tree));
}
}
/*****************************************************************************
*
* This function is called to complete the morphing of a tree node
* It should only be called once for each node.
* If DEBUG is defined the flag GTF_DEBUG_NODE_MORPHED is checked and updated,
* to enforce the invariant that each node is only morphed once.
* If local assertion prop is enabled the result tree may be replaced
* by an equivalent tree.
*
*/
void Compiler::fgMorphTreeDone(GenTree* tree,
GenTree* oldTree /* == NULL */
DEBUGARG(int morphNum))
{
#ifdef DEBUG
if (verbose && treesBeforeAfterMorph)
{
printf("\nfgMorphTree (after %d):\n", morphNum);
gtDispTree(tree);
printf(""); // in our logic this causes a flush
}
#endif
if (!fgGlobalMorph)
{
return;
}
if ((oldTree != nullptr) && (oldTree != tree))
{
/* Ensure that we have morphed this node */
assert((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) && "ERROR: Did not morph this node!");
#ifdef DEBUG
TransferTestDataToNode(oldTree, tree);
#endif
}
else
{
// Ensure that we haven't morphed this node already
assert(((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) == 0) && "ERROR: Already morphed this node!");
}
if (tree->OperIsConst())
{
goto DONE;
}
if (!optLocalAssertionProp)
{
goto DONE;
}
/* Do we have any active assertions? */
if (optAssertionCount > 0)
{
/* Is this an assignment to a local variable */
GenTreeLclVarCommon* lclVarTree = nullptr;
// The check below will miss LIR-style assignments.
//
// But we shouldn't be running local assertion prop on these,
// as local prop gets disabled when we run global prop.
assert(!tree->OperIs(GT_STORE_LCL_VAR, GT_STORE_LCL_FLD));
// DefinesLocal can return true for some BLK op uses, so
// check what gets assigned only when we're at an assignment.
if (tree->OperIsSsaDef() && tree->DefinesLocal(this, &lclVarTree))
{
unsigned lclNum = lclVarTree->GetLclNum();
noway_assert(lclNum < lvaCount);
fgKillDependentAssertions(lclNum DEBUGARG(tree));
}
}
/* If this tree makes a new assertion - make it available */
optAssertionGen(tree);
DONE:;
#ifdef DEBUG
/* Mark this node as being morphed */
tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
}
//------------------------------------------------------------------------
// fgFoldConditional: try and fold conditionals and optimize BBJ_COND or
// BBJ_SWITCH blocks.
//
// Argumetns:
// block - block to examine
//
// Returns:
// FoldResult indicating what changes were made, if any
//
Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block)
{
FoldResult result = FoldResult::FOLD_DID_NOTHING;
// We don't want to make any code unreachable
//
if (opts.OptimizationDisabled())
{
return result;
}
if (block->bbJumpKind == BBJ_COND)
{
noway_assert(block->bbStmtList != nullptr && block->bbStmtList->GetPrevStmt() != nullptr);
Statement* lastStmt = block->lastStmt();
noway_assert(lastStmt->GetNextStmt() == nullptr);
if (lastStmt->GetRootNode()->gtOper == GT_CALL)
{
noway_assert(fgRemoveRestOfBlock);
// Unconditional throw - transform the basic block into a BBJ_THROW
//
fgConvertBBToThrowBB(block);
result = FoldResult::FOLD_CHANGED_CONTROL_FLOW;
JITDUMP("\nConditional folded at " FMT_BB "\n", block->bbNum);
JITDUMP(FMT_BB " becomes a BBJ_THROW\n", block->bbNum);
return result;
}
noway_assert(lastStmt->GetRootNode()->gtOper == GT_JTRUE);
/* Did we fold the conditional */
noway_assert(lastStmt->GetRootNode()->AsOp()->gtOp1);
GenTree* condTree;
condTree = lastStmt->GetRootNode()->AsOp()->gtOp1;
GenTree* cond;
cond = condTree->gtEffectiveVal(true);
if (cond->OperIsConst())
{
/* Yupee - we folded the conditional!
* Remove the conditional statement */
noway_assert(cond->gtOper == GT_CNS_INT);
noway_assert((block->bbNext->countOfInEdges() > 0) && (block->bbJumpDest->countOfInEdges() > 0));
if (condTree != cond)
{
// Preserve any side effects
assert(condTree->OperIs(GT_COMMA));
lastStmt->SetRootNode(condTree);
result = FoldResult::FOLD_ALTERED_LAST_STMT;
}
else
{
// no side effects, remove the jump entirely
fgRemoveStmt(block, lastStmt);
result = FoldResult::FOLD_REMOVED_LAST_STMT;
}
// block is a BBJ_COND that we are folding the conditional for.
// bTaken is the path that will always be taken from block.
// bNotTaken is the path that will never be taken from block.
//
BasicBlock* bTaken;
BasicBlock* bNotTaken;
if (cond->AsIntCon()->gtIconVal != 0)
{
/* JTRUE 1 - transform the basic block into a BBJ_ALWAYS */
block->bbJumpKind = BBJ_ALWAYS;
bTaken = block->bbJumpDest;
bNotTaken = block->bbNext;
}
else
{
/* Unmark the loop if we are removing a backwards branch */
/* dest block must also be marked as a loop head and */
/* We must be able to reach the backedge block */
if ((block->bbJumpDest->isLoopHead()) && (block->bbJumpDest->bbNum <= block->bbNum) &&
fgReachable(block->bbJumpDest, block))
{
optUnmarkLoopBlocks(block->bbJumpDest, block);
}
/* JTRUE 0 - transform the basic block into a BBJ_NONE */
block->bbJumpKind = BBJ_NONE;
bTaken = block->bbNext;
bNotTaken = block->bbJumpDest;
}
if (fgHaveValidEdgeWeights)
{
// We are removing an edge from block to bNotTaken
// and we have already computed the edge weights, so
// we will try to adjust some of the weights
//
flowList* edgeTaken = fgGetPredForBlock(bTaken, block);
BasicBlock* bUpdated = nullptr; // non-NULL if we updated the weight of an internal block
// We examine the taken edge (block -> bTaken)
// if block has valid profile weight and bTaken does not we try to adjust bTaken's weight
// else if bTaken has valid profile weight and block does not we try to adjust block's weight
// We can only adjust the block weights when (the edge block -> bTaken) is the only edge into bTaken
//
if (block->hasProfileWeight())
{
// The edge weights for (block -> bTaken) are 100% of block's weight
edgeTaken->setEdgeWeights(block->bbWeight, block->bbWeight, bTaken);
if (!bTaken->hasProfileWeight())
{
if ((bTaken->countOfInEdges() == 1) || (bTaken->bbWeight < block->bbWeight))
{
// Update the weight of bTaken
bTaken->inheritWeight(block);
bUpdated = bTaken;
}
}
}
else if (bTaken->hasProfileWeight())
{
if (bTaken->countOfInEdges() == 1)
{
// There is only one in edge to bTaken
edgeTaken->setEdgeWeights(bTaken->bbWeight, bTaken->bbWeight, bTaken);
// Update the weight of block
block->inheritWeight(bTaken);
bUpdated = block;
}
}
if (bUpdated != nullptr)
{
weight_t newMinWeight;
weight_t newMaxWeight;
flowList* edge;
// Now fix the weights of the edges out of 'bUpdated'
switch (bUpdated->bbJumpKind)
{
case BBJ_NONE:
edge = fgGetPredForBlock(bUpdated->bbNext, bUpdated);
newMaxWeight = bUpdated->bbWeight;
newMinWeight = min(edge->edgeWeightMin(), newMaxWeight);
edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->bbNext);
break;
case BBJ_COND:
edge = fgGetPredForBlock(bUpdated->bbNext, bUpdated);
newMaxWeight = bUpdated->bbWeight;
newMinWeight = min(edge->edgeWeightMin(), newMaxWeight);
edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->bbNext);
FALLTHROUGH;
case BBJ_ALWAYS:
edge = fgGetPredForBlock(bUpdated->bbJumpDest, bUpdated);
newMaxWeight = bUpdated->bbWeight;
newMinWeight = min(edge->edgeWeightMin(), newMaxWeight);
edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->bbNext);
break;
default:
// We don't handle BBJ_SWITCH
break;
}
}
}
/* modify the flow graph */
/* Remove 'block' from the predecessor list of 'bNotTaken' */
fgRemoveRefPred(bNotTaken, block);
#ifdef DEBUG
if (verbose)
{
printf("\nConditional folded at " FMT_BB "\n", block->bbNum);
printf(FMT_BB " becomes a %s", block->bbNum,
block->bbJumpKind == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE");
if (block->bbJumpKind == BBJ_ALWAYS)
{
printf(" to " FMT_BB, block->bbJumpDest->bbNum);
}
printf("\n");
}
#endif
/* if the block was a loop condition we may have to modify
* the loop table */
for (unsigned loopNum = 0; loopNum < optLoopCount; loopNum++)
{
/* Some loops may have been already removed by
* loop unrolling or conditional folding */
if (optLoopTable[loopNum].lpFlags & LPFLG_REMOVED)
{
continue;
}
/* We are only interested in the loop bottom */
if (optLoopTable[loopNum].lpBottom == block)
{
if (cond->AsIntCon()->gtIconVal == 0)
{
/* This was a bogus loop (condition always false)
* Remove the loop from the table */
optMarkLoopRemoved(loopNum);
optLoopTable[loopNum].lpTop->unmarkLoopAlign(this DEBUG_ARG("Bogus loop"));
#ifdef DEBUG
if (verbose)
{
printf("Removing loop " FMT_LP " (from " FMT_BB " to " FMT_BB ")\n\n", loopNum,
optLoopTable[loopNum].lpTop->bbNum, optLoopTable[loopNum].lpBottom->bbNum);
}
#endif
}
}
}
}
}
else if (block->bbJumpKind == BBJ_SWITCH)
{
noway_assert(block->bbStmtList != nullptr && block->bbStmtList->GetPrevStmt() != nullptr);
Statement* lastStmt = block->lastStmt();
noway_assert(lastStmt->GetNextStmt() == nullptr);
if (lastStmt->GetRootNode()->gtOper == GT_CALL)
{
noway_assert(fgRemoveRestOfBlock);
// Unconditional throw - transform the basic block into a BBJ_THROW
//
fgConvertBBToThrowBB(block);
result = FoldResult::FOLD_CHANGED_CONTROL_FLOW;
JITDUMP("\nConditional folded at " FMT_BB "\n", block->bbNum);
JITDUMP(FMT_BB " becomes a BBJ_THROW\n", block->bbNum);
return result;
}
noway_assert(lastStmt->GetRootNode()->gtOper == GT_SWITCH);
/* Did we fold the conditional */
noway_assert(lastStmt->GetRootNode()->AsOp()->gtOp1);
GenTree* condTree;
condTree = lastStmt->GetRootNode()->AsOp()->gtOp1;
GenTree* cond;
cond = condTree->gtEffectiveVal(true);
if (cond->OperIsConst())
{
/* Yupee - we folded the conditional!
* Remove the conditional statement */
noway_assert(cond->gtOper == GT_CNS_INT);
if (condTree != cond)
{
// Preserve any side effects
assert(condTree->OperIs(GT_COMMA));
lastStmt->SetRootNode(condTree);
result = FoldResult::FOLD_ALTERED_LAST_STMT;
}
else
{
// no side effects, remove the switch entirely
fgRemoveStmt(block, lastStmt);
result = FoldResult::FOLD_REMOVED_LAST_STMT;
}
/* modify the flow graph */
/* Find the actual jump target */
unsigned switchVal;
switchVal = (unsigned)cond->AsIntCon()->gtIconVal;
unsigned jumpCnt;
jumpCnt = block->bbJumpSwt->bbsCount;
BasicBlock** jumpTab;
jumpTab = block->bbJumpSwt->bbsDstTab;
bool foundVal;
foundVal = false;
for (unsigned val = 0; val < jumpCnt; val++, jumpTab++)
{
BasicBlock* curJump = *jumpTab;
assert(curJump->countOfInEdges() > 0);
// If val matches switchVal or we are at the last entry and
// we never found the switch value then set the new jump dest
if ((val == switchVal) || (!foundVal && (val == jumpCnt - 1)))
{
if (curJump != block->bbNext)
{
/* transform the basic block into a BBJ_ALWAYS */
block->bbJumpKind = BBJ_ALWAYS;
block->bbJumpDest = curJump;
}
else
{
/* transform the basic block into a BBJ_NONE */
block->bbJumpKind = BBJ_NONE;
}
foundVal = true;
}
else
{
/* Remove 'block' from the predecessor list of 'curJump' */
fgRemoveRefPred(curJump, block);
}
}
assert(foundVal);
#ifdef DEBUG
if (verbose)
{
printf("\nConditional folded at " FMT_BB "\n", block->bbNum);
printf(FMT_BB " becomes a %s", block->bbNum,
block->bbJumpKind == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE");
if (block->bbJumpKind == BBJ_ALWAYS)
{
printf(" to " FMT_BB, block->bbJumpDest->bbNum);
}
printf("\n");
}
#endif
}
}
return result;
}
//------------------------------------------------------------------------
// fgMorphBlockStmt: morph a single statement in a block.
//
// Arguments:
// block - block containing the statement
// stmt - statement to morph
// msg - string to identify caller in a dump
//
// Returns:
// true if 'stmt' was removed from the block.
// s false if 'stmt' is still in the block (even if other statements were removed).
//
// Notes:
// Can be called anytime, unlike fgMorphStmts() which should only be called once.
//
bool Compiler::fgMorphBlockStmt(BasicBlock* block, Statement* stmt DEBUGARG(const char* msg))
{
assert(block != nullptr);
assert(stmt != nullptr);
// Reset some ambient state
fgRemoveRestOfBlock = false;
compCurBB = block;
compCurStmt = stmt;
GenTree* morph = fgMorphTree(stmt->GetRootNode());
// Bug 1106830 - During the CSE phase we can't just remove
// morph->AsOp()->gtOp2 as it could contain CSE expressions.
// This leads to a noway_assert in OptCSE.cpp when
// searching for the removed CSE ref. (using gtFindLink)
//
if (!optValnumCSE_phase)
{
// Check for morph as a GT_COMMA with an unconditional throw
if (fgIsCommaThrow(morph, true))
{
#ifdef DEBUG
if (verbose)
{
printf("Folding a top-level fgIsCommaThrow stmt\n");
printf("Removing op2 as unreachable:\n");
gtDispTree(morph->AsOp()->gtOp2);
printf("\n");
}
#endif
// Use the call as the new stmt
morph = morph->AsOp()->gtOp1;
noway_assert(morph->gtOper == GT_CALL);
}
// we can get a throw as a statement root
if (fgIsThrow(morph))
{
#ifdef DEBUG
if (verbose)
{
printf("We have a top-level fgIsThrow stmt\n");
printf("Removing the rest of block as unreachable:\n");
}
#endif
noway_assert((morph->gtFlags & GTF_COLON_COND) == 0);
fgRemoveRestOfBlock = true;
}
}
stmt->SetRootNode(morph);
// Can the entire tree be removed?
bool removedStmt = false;
// Defer removing statements during CSE so we don't inadvertently remove any CSE defs.
if (!optValnumCSE_phase)
{
removedStmt = fgCheckRemoveStmt(block, stmt);
}
// Or this is the last statement of a conditional branch that was just folded?
if (!removedStmt && (stmt->GetNextStmt() == nullptr) && !fgRemoveRestOfBlock)
{
FoldResult const fr = fgFoldConditional(block);
removedStmt = (fr == FoldResult::FOLD_REMOVED_LAST_STMT);
}
if (!removedStmt)
{
// Have to re-do the evaluation order since for example some later code does not expect constants as op1
gtSetStmtInfo(stmt);
// Have to re-link the nodes for this statement
fgSetStmtSeq(stmt);
}
#ifdef DEBUG
if (verbose)
{
printf("%s %s tree:\n", msg, (removedStmt ? "removed" : "morphed"));
gtDispTree(morph);
printf("\n");
}
#endif
if (fgRemoveRestOfBlock)
{
// Remove the rest of the stmts in the block
for (Statement* removeStmt : StatementList(stmt->GetNextStmt()))
{
fgRemoveStmt(block, removeStmt);
}
// The rest of block has been removed and we will always throw an exception.
//
// For compDbgCode, we prepend an empty BB as the firstBB, it is BBJ_NONE.
// We should not convert it to a ThrowBB.
if ((block != fgFirstBB) || ((fgFirstBB->bbFlags & BBF_INTERNAL) == 0))
{
// Convert block to a throw bb
fgConvertBBToThrowBB(block);
}
#ifdef DEBUG
if (verbose)
{
printf("\n%s Block " FMT_BB " becomes a throw block.\n", msg, block->bbNum);
}
#endif
fgRemoveRestOfBlock = false;
}
return removedStmt;
}
/*****************************************************************************
*
* Morph the statements of the given block.
* This function should be called just once for a block. Use fgMorphBlockStmt()
* for reentrant calls.
*/
void Compiler::fgMorphStmts(BasicBlock* block)
{
fgRemoveRestOfBlock = false;
fgCurrentlyInUseArgTemps = hashBv::Create(this);
for (Statement* const stmt : block->Statements())
{
if (fgRemoveRestOfBlock)
{
fgRemoveStmt(block, stmt);
continue;
}
#ifdef FEATURE_SIMD
if (opts.OptimizationEnabled() && stmt->GetRootNode()->TypeGet() == TYP_FLOAT &&
stmt->GetRootNode()->OperGet() == GT_ASG)
{
fgMorphCombineSIMDFieldAssignments(block, stmt);
}
#endif
fgMorphStmt = stmt;
compCurStmt = stmt;
GenTree* oldTree = stmt->GetRootNode();
#ifdef DEBUG
unsigned oldHash = verbose ? gtHashValue(oldTree) : DUMMY_INIT(~0);
if (verbose)
{
printf("\nfgMorphTree " FMT_BB ", " FMT_STMT " (before)\n", block->bbNum, stmt->GetID());
gtDispTree(oldTree);
}
#endif
/* Morph this statement tree */
GenTree* morphedTree = fgMorphTree(oldTree);
// mark any outgoing arg temps as free so we can reuse them in the next statement.
fgCurrentlyInUseArgTemps->ZeroAll();
// Has fgMorphStmt been sneakily changed ?
if ((stmt->GetRootNode() != oldTree) || (block != compCurBB))
{
if (stmt->GetRootNode() != oldTree)
{
/* This must be tailcall. Ignore 'morphedTree' and carry on with
the tail-call node */
morphedTree = stmt->GetRootNode();
}
else
{
/* This must be a tailcall that caused a GCPoll to get
injected. We haven't actually morphed the call yet
but the flag still got set, clear it here... */
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
morphedTree->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED;
#endif
}
noway_assert(compTailCallUsed);
noway_assert(morphedTree->gtOper == GT_CALL);
GenTreeCall* call = morphedTree->AsCall();
// Could be
// - a fast call made as jmp in which case block will be ending with
// BBJ_RETURN (as we need epilog) and marked as containing a jmp.
// - a tailcall dispatched via JIT helper, on x86, in which case
// block will be ending with BBJ_THROW.
// - a tail call dispatched via runtime help (IL stubs), in which
// case there will not be any tailcall and the block will be ending
// with BBJ_RETURN (as normal control flow)
noway_assert((call->IsFastTailCall() && (compCurBB->bbJumpKind == BBJ_RETURN) &&
((compCurBB->bbFlags & BBF_HAS_JMP)) != 0) ||
(call->IsTailCallViaJitHelper() && (compCurBB->bbJumpKind == BBJ_THROW)) ||
(!call->IsTailCall() && (compCurBB->bbJumpKind == BBJ_RETURN)));
}
#ifdef DEBUG
if (compStressCompile(STRESS_CLONE_EXPR, 30))
{
// Clone all the trees to stress gtCloneExpr()
if (verbose)
{
printf("\nfgMorphTree (stressClone from):\n");
gtDispTree(morphedTree);
}
morphedTree = gtCloneExpr(morphedTree);
noway_assert(morphedTree != nullptr);
if (verbose)
{
printf("\nfgMorphTree (stressClone to):\n");
gtDispTree(morphedTree);
}
}
/* If the hash value changes. we modified the tree during morphing */
if (verbose)
{
unsigned newHash = gtHashValue(morphedTree);
if (newHash != oldHash)
{
printf("\nfgMorphTree " FMT_BB ", " FMT_STMT " (after)\n", block->bbNum, stmt->GetID());
gtDispTree(morphedTree);
}
}
#endif
/* Check for morphedTree as a GT_COMMA with an unconditional throw */
if (!gtIsActiveCSE_Candidate(morphedTree) && fgIsCommaThrow(morphedTree, true))
{
/* Use the call as the new stmt */
morphedTree = morphedTree->AsOp()->gtOp1;
noway_assert(morphedTree->gtOper == GT_CALL);
noway_assert((morphedTree->gtFlags & GTF_COLON_COND) == 0);
fgRemoveRestOfBlock = true;
}
stmt->SetRootNode(morphedTree);
if (fgRemoveRestOfBlock)
{
continue;
}
/* Has the statement been optimized away */
if (fgCheckRemoveStmt(block, stmt))
{
continue;
}
/* Check if this block ends with a conditional branch that can be folded */
if (fgFoldConditional(block) != FoldResult::FOLD_DID_NOTHING)
{
continue;
}
if (ehBlockHasExnFlowDsc(block))
{
continue;
}
}
if (fgRemoveRestOfBlock)
{
if ((block->bbJumpKind == BBJ_COND) || (block->bbJumpKind == BBJ_SWITCH))
{
Statement* first = block->firstStmt();
noway_assert(first);
Statement* lastStmt = block->lastStmt();
noway_assert(lastStmt && lastStmt->GetNextStmt() == nullptr);
GenTree* last = lastStmt->GetRootNode();
if (((block->bbJumpKind == BBJ_COND) && (last->gtOper == GT_JTRUE)) ||
((block->bbJumpKind == BBJ_SWITCH) && (last->gtOper == GT_SWITCH)))
{
GenTree* op1 = last->AsOp()->gtOp1;
if (op1->OperIsCompare())
{
/* Unmark the comparison node with GTF_RELOP_JMP_USED */
op1->gtFlags &= ~GTF_RELOP_JMP_USED;
}
lastStmt->SetRootNode(fgMorphTree(op1));
}
}
/* Mark block as a BBJ_THROW block */
fgConvertBBToThrowBB(block);
}
#if FEATURE_FASTTAILCALL
GenTree* recursiveTailCall = nullptr;
if (block->endsWithTailCallConvertibleToLoop(this, &recursiveTailCall))
{
fgMorphRecursiveFastTailCallIntoLoop(block, recursiveTailCall->AsCall());
}
#endif
// Reset this back so that it doesn't leak out impacting other blocks
fgRemoveRestOfBlock = false;
}
/*****************************************************************************
*
* Morph the blocks of the method.
* Returns true if the basic block list is modified.
* This function should be called just once.
*/
void Compiler::fgMorphBlocks()
{
#ifdef DEBUG
if (verbose)
{
printf("\n*************** In fgMorphBlocks()\n");
}
#endif
/* Since fgMorphTree can be called after various optimizations to re-arrange
* the nodes we need a global flag to signal if we are during the one-pass
* global morphing */
fgGlobalMorph = true;
//
// Local assertion prop is enabled if we are optimized
//
optLocalAssertionProp = opts.OptimizationEnabled();
if (optLocalAssertionProp)
{
//
// Initialize for local assertion prop
//
optAssertionInit(true);
}
if (!compEnregLocals())
{
// Morph is checking if lvDoNotEnregister is already set for some optimizations.
// If we are running without `CLFLG_REGVAR` flag set (`compEnregLocals() == false`)
// then we already know that we won't enregister any locals and it is better to set
// this flag before we start reading it.
// The main reason why this flag is not set is that we are running in minOpts.
lvSetMinOptsDoNotEnreg();
}
/*-------------------------------------------------------------------------
* Process all basic blocks in the function
*/
BasicBlock* block = fgFirstBB;
noway_assert(block);
do
{
#ifdef DEBUG
if (verbose)
{
printf("\nMorphing " FMT_BB " of '%s'\n", block->bbNum, info.compFullName);
}
#endif
if (optLocalAssertionProp)
{
//
// Clear out any currently recorded assertion candidates
// before processing each basic block,
// also we must handle QMARK-COLON specially
//
optAssertionReset(0);
}
// Make the current basic block address available globally.
compCurBB = block;
// Process all statement trees in the basic block.
fgMorphStmts(block);
// Do we need to merge the result of this block into a single return block?
if ((block->bbJumpKind == BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0))
{
if ((genReturnBB != nullptr) && (genReturnBB != block))
{
fgMergeBlockReturn(block);
}
}
block = block->bbNext;
} while (block != nullptr);
// We are done with the global morphing phase
fgGlobalMorph = false;
compCurBB = nullptr;
// Under OSR, we no longer need to specially protect the original method entry
//
if (opts.IsOSR() && (fgEntryBB != nullptr) && (fgEntryBB->bbFlags & BBF_IMPORTED))
{
JITDUMP("OSR: un-protecting original method entry " FMT_BB "\n", fgEntryBB->bbNum);
assert(fgEntryBB->bbRefs > 0);
fgEntryBB->bbRefs--;
// We don't need to remember this block anymore.
fgEntryBB = nullptr;
}
#ifdef DEBUG
if (verboseTrees)
{
fgDispBasicBlocks(true);
}
#endif
}
//------------------------------------------------------------------------
// fgMergeBlockReturn: assign the block return value (if any) into the single return temp
// and branch to the single return block.
//
// Arguments:
// block - the block to process.
//
// Notes:
// A block is not guaranteed to have a last stmt if its jump kind is BBJ_RETURN.
// For example a method returning void could have an empty block with jump kind BBJ_RETURN.
// Such blocks do materialize as part of in-lining.
//
// A block with jump kind BBJ_RETURN does not necessarily need to end with GT_RETURN.
// It could end with a tail call or rejected tail call or monitor.exit or a GT_INTRINSIC.
// For now it is safe to explicitly check whether last stmt is GT_RETURN if genReturnLocal
// is BAD_VAR_NUM.
//
void Compiler::fgMergeBlockReturn(BasicBlock* block)
{
assert((block->bbJumpKind == BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0));
assert((genReturnBB != nullptr) && (genReturnBB != block));
// TODO: Need to characterize the last top level stmt of a block ending with BBJ_RETURN.
Statement* lastStmt = block->lastStmt();
GenTree* ret = (lastStmt != nullptr) ? lastStmt->GetRootNode() : nullptr;
if ((ret != nullptr) && (ret->OperGet() == GT_RETURN) && ((ret->gtFlags & GTF_RET_MERGED) != 0))
{
// This return was generated during epilog merging, so leave it alone
}
else
{
// We'll jump to the genReturnBB.
CLANG_FORMAT_COMMENT_ANCHOR;
#if !defined(TARGET_X86)
if (info.compFlags & CORINFO_FLG_SYNCH)
{
fgConvertSyncReturnToLeave(block);
}
else
#endif // !TARGET_X86
{
block->bbJumpKind = BBJ_ALWAYS;
block->bbJumpDest = genReturnBB;
fgAddRefPred(genReturnBB, block);
fgReturnCount--;
}
if (genReturnLocal != BAD_VAR_NUM)
{
// replace the GT_RETURN node to be a GT_ASG that stores the return value into genReturnLocal.
// Method must be returning a value other than TYP_VOID.
noway_assert(compMethodHasRetVal());
// This block must be ending with a GT_RETURN
noway_assert(lastStmt != nullptr);
noway_assert(lastStmt->GetNextStmt() == nullptr);
noway_assert(ret != nullptr);
// GT_RETURN must have non-null operand as the method is returning the value assigned to
// genReturnLocal
noway_assert(ret->OperGet() == GT_RETURN);
noway_assert(ret->gtGetOp1() != nullptr);
Statement* pAfterStatement = lastStmt;
const DebugInfo& di = lastStmt->GetDebugInfo();
GenTree* tree = gtNewTempAssign(genReturnLocal, ret->gtGetOp1(), &pAfterStatement, di, block);
if (tree->OperIsCopyBlkOp())
{
tree = fgMorphCopyBlock(tree);
}
else if (tree->OperIsInitBlkOp())
{
tree = fgMorphInitBlock(tree);
}
if (pAfterStatement == lastStmt)
{
lastStmt->SetRootNode(tree);
}
else
{
// gtNewTempAssign inserted additional statements after last
fgRemoveStmt(block, lastStmt);
Statement* newStmt = gtNewStmt(tree, di);
fgInsertStmtAfter(block, pAfterStatement, newStmt);
lastStmt = newStmt;
}
}
else if (ret != nullptr && ret->OperGet() == GT_RETURN)
{
// This block ends with a GT_RETURN
noway_assert(lastStmt != nullptr);
noway_assert(lastStmt->GetNextStmt() == nullptr);
// Must be a void GT_RETURN with null operand; delete it as this block branches to oneReturn
// block
noway_assert(ret->TypeGet() == TYP_VOID);
noway_assert(ret->gtGetOp1() == nullptr);
fgRemoveStmt(block, lastStmt);
}
JITDUMP("\nUpdate " FMT_BB " to jump to common return block.\n", block->bbNum);
DISPBLOCK(block);
if (block->hasProfileWeight())
{
weight_t const oldWeight = genReturnBB->hasProfileWeight() ? genReturnBB->bbWeight : BB_ZERO_WEIGHT;
weight_t const newWeight = oldWeight + block->bbWeight;
JITDUMP("merging profile weight " FMT_WT " from " FMT_BB " to common return " FMT_BB "\n", block->bbWeight,
block->bbNum, genReturnBB->bbNum);
genReturnBB->setBBProfileWeight(newWeight);
DISPBLOCK(genReturnBB);
}
}
}
/*****************************************************************************
*
* Make some decisions about the kind of code to generate.
*/
void Compiler::fgSetOptions()
{
#ifdef DEBUG
/* Should we force fully interruptible code ? */
if (JitConfig.JitFullyInt() || compStressCompile(STRESS_GENERIC_VARN, 30))
{
noway_assert(!codeGen->isGCTypeFixed());
SetInterruptible(true);
}
#endif
if (opts.compDbgCode)
{
assert(!codeGen->isGCTypeFixed());
SetInterruptible(true); // debugging is easier this way ...
}
/* Assume we won't need an explicit stack frame if this is allowed */
if (compLocallocUsed)
{
codeGen->setFramePointerRequired(true);
}
#ifdef TARGET_X86
if (compTailCallUsed)
codeGen->setFramePointerRequired(true);
#endif // TARGET_X86
if (!opts.genFPopt)
{
codeGen->setFramePointerRequired(true);
}
// Assert that the EH table has been initialized by now. Note that
// compHndBBtabAllocCount never decreases; it is a high-water mark
// of table allocation. In contrast, compHndBBtabCount does shrink
// if we delete a dead EH region, and if it shrinks to zero, the
// table pointer compHndBBtab is unreliable.
assert(compHndBBtabAllocCount >= info.compXcptnsCount);
#ifdef TARGET_X86
// Note: this case, and the !X86 case below, should both use the
// !X86 path. This would require a few more changes for X86 to use
// compHndBBtabCount (the current number of EH clauses) instead of
// info.compXcptnsCount (the number of EH clauses in IL), such as
// in ehNeedsShadowSPslots(). This is because sometimes the IL has
// an EH clause that we delete as statically dead code before we
// get here, leaving no EH clauses left, and thus no requirement
// to use a frame pointer because of EH. But until all the code uses
// the same test, leave info.compXcptnsCount here.
if (info.compXcptnsCount > 0)
{
codeGen->setFramePointerRequiredEH(true);
}
#else // !TARGET_X86
if (compHndBBtabCount > 0)
{
codeGen->setFramePointerRequiredEH(true);
}
#endif // TARGET_X86
#ifdef UNIX_X86_ABI
if (info.compXcptnsCount > 0)
{
assert(!codeGen->isGCTypeFixed());
// Enforce fully interruptible codegen for funclet unwinding
SetInterruptible(true);
}
#endif // UNIX_X86_ABI
if (compMethodRequiresPInvokeFrame())
{
codeGen->setFramePointerRequired(true); // Setup of Pinvoke frame currently requires an EBP style frame
}
if (info.compPublishStubParam)
{
codeGen->setFramePointerRequiredGCInfo(true);
}
if (compIsProfilerHookNeeded())
{
codeGen->setFramePointerRequired(true);
}
if (info.compIsVarArgs)
{
// Code that initializes lvaVarargsBaseOfStkArgs requires this to be EBP relative.
codeGen->setFramePointerRequiredGCInfo(true);
}
if (lvaReportParamTypeArg())
{
codeGen->setFramePointerRequiredGCInfo(true);
}
// printf("method will %s be fully interruptible\n", GetInterruptible() ? " " : "not");
}
/*****************************************************************************/
GenTree* Compiler::fgInitThisClass()
{
noway_assert(!compIsForInlining());
CORINFO_LOOKUP_KIND kind;
info.compCompHnd->getLocationOfThisType(info.compMethodHnd, &kind);
if (!kind.needsRuntimeLookup)
{
return fgGetSharedCCtor(info.compClassHnd);
}
else
{
#ifdef FEATURE_READYTORUN
// Only CoreRT understands CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE. Don't do this on CoreCLR.
if (opts.IsReadyToRun() && IsTargetAbi(CORINFO_CORERT_ABI))
{
CORINFO_RESOLVED_TOKEN resolvedToken;
memset(&resolvedToken, 0, sizeof(resolvedToken));
// We are in a shared method body, but maybe we don't need a runtime lookup after all.
// This covers the case of a generic method on a non-generic type.
if (!(info.compClassAttr & CORINFO_FLG_SHAREDINST))
{
resolvedToken.hClass = info.compClassHnd;
return impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF);
}
// We need a runtime lookup.
GenTree* ctxTree = getRuntimeContextTree(kind.runtimeLookupKind);
// CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE with a zeroed out resolvedToken means "get the static
// base of the class that owns the method being compiled". If we're in this method, it means we're not
// inlining and there's no ambiguity.
return impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, TYP_BYREF,
gtNewCallArgs(ctxTree), &kind);
}
#endif
// Collectible types requires that for shared generic code, if we use the generic context paramter
// that we report it. (This is a conservative approach, we could detect some cases particularly when the
// context parameter is this that we don't need the eager reporting logic.)
lvaGenericsContextInUse = true;
switch (kind.runtimeLookupKind)
{
case CORINFO_LOOKUP_THISOBJ:
{
// This code takes a this pointer; but we need to pass the static method desc to get the right point in
// the hierarchy
GenTree* vtTree = gtNewLclvNode(info.compThisArg, TYP_REF);
vtTree->gtFlags |= GTF_VAR_CONTEXT;
// Vtable pointer of this object
vtTree = gtNewMethodTableLookup(vtTree);
GenTree* methodHnd = gtNewIconEmbMethHndNode(info.compMethodHnd);
return gtNewHelperCallNode(CORINFO_HELP_INITINSTCLASS, TYP_VOID, gtNewCallArgs(vtTree, methodHnd));
}
case CORINFO_LOOKUP_CLASSPARAM:
{
GenTree* vtTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL);
vtTree->gtFlags |= GTF_VAR_CONTEXT;
return gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, gtNewCallArgs(vtTree));
}
case CORINFO_LOOKUP_METHODPARAM:
{
GenTree* methHndTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL);
methHndTree->gtFlags |= GTF_VAR_CONTEXT;
return gtNewHelperCallNode(CORINFO_HELP_INITINSTCLASS, TYP_VOID,
gtNewCallArgs(gtNewIconNode(0), methHndTree));
}
default:
noway_assert(!"Unknown LOOKUP_KIND");
UNREACHABLE();
}
}
}
#ifdef DEBUG
/*****************************************************************************
*
* Tree walk callback to make sure no GT_QMARK nodes are present in the tree,
* except for the allowed ? 1 : 0; pattern.
*/
Compiler::fgWalkResult Compiler::fgAssertNoQmark(GenTree** tree, fgWalkData* data)
{
if ((*tree)->OperGet() == GT_QMARK)
{
fgCheckQmarkAllowedForm(*tree);
}
return WALK_CONTINUE;
}
void Compiler::fgCheckQmarkAllowedForm(GenTree* tree)
{
assert(tree->OperGet() == GT_QMARK);
assert(!"Qmarks beyond morph disallowed.");
}
/*****************************************************************************
*
* Verify that the importer has created GT_QMARK nodes in a way we can
* process them. The following is allowed:
*
* 1. A top level qmark. Top level qmark is of the form:
* a) (bool) ? (void) : (void) OR
* b) V0N = (bool) ? (type) : (type)
*
* 2. Recursion is allowed at the top level, i.e., a GT_QMARK can be a child
* of either op1 of colon or op2 of colon but not a child of any other
* operator.
*/
void Compiler::fgPreExpandQmarkChecks(GenTree* expr)
{
GenTree* topQmark = fgGetTopLevelQmark(expr);
// If the top level Qmark is null, then scan the tree to make sure
// there are no qmarks within it.
if (topQmark == nullptr)
{
fgWalkTreePre(&expr, Compiler::fgAssertNoQmark, nullptr);
}
else
{
// We could probably expand the cond node also, but don't think the extra effort is necessary,
// so let's just assert the cond node of a top level qmark doesn't have further top level qmarks.
fgWalkTreePre(&topQmark->AsOp()->gtOp1, Compiler::fgAssertNoQmark, nullptr);
fgPreExpandQmarkChecks(topQmark->AsOp()->gtOp2->AsOp()->gtOp1);
fgPreExpandQmarkChecks(topQmark->AsOp()->gtOp2->AsOp()->gtOp2);
}
}
#endif // DEBUG
/*****************************************************************************
*
* Get the top level GT_QMARK node in a given "expr", return NULL if such a
* node is not present. If the top level GT_QMARK node is assigned to a
* GT_LCL_VAR, then return the lcl node in ppDst.
*
*/
GenTree* Compiler::fgGetTopLevelQmark(GenTree* expr, GenTree** ppDst /* = NULL */)
{
if (ppDst != nullptr)
{
*ppDst = nullptr;
}
GenTree* topQmark = nullptr;
if (expr->gtOper == GT_QMARK)
{
topQmark = expr;
}
else if (expr->gtOper == GT_ASG && expr->AsOp()->gtOp2->gtOper == GT_QMARK &&
expr->AsOp()->gtOp1->gtOper == GT_LCL_VAR)
{
topQmark = expr->AsOp()->gtOp2;
if (ppDst != nullptr)
{
*ppDst = expr->AsOp()->gtOp1;
}
}
return topQmark;
}
/*********************************************************************************
*
* For a castclass helper call,
* Importer creates the following tree:
* tmp = (op1 == null) ? op1 : ((*op1 == (cse = op2, cse)) ? op1 : helper());
*
* This method splits the qmark expression created by the importer into the
* following blocks: (block, asg, cond1, cond2, helper, remainder)
* Notice that op1 is the result for both the conditions. So we coalesce these
* assignments into a single block instead of two blocks resulting a nested diamond.
*
* +---------->-----------+
* | | |
* ^ ^ v
* | | |
* block-->asg-->cond1--+-->cond2--+-->helper--+-->remainder
*
* We expect to achieve the following codegen:
* mov rsi, rdx tmp = op1 // asgBlock
* test rsi, rsi goto skip if tmp == null ? // cond1Block
* je SKIP
* mov rcx, 0x76543210 cns = op2 // cond2Block
* cmp qword ptr [rsi], rcx goto skip if *tmp == op2
* je SKIP
* call CORINFO_HELP_CHKCASTCLASS_SPECIAL tmp = helper(cns, tmp) // helperBlock
* mov rsi, rax
* SKIP: // remainderBlock
* tmp has the result.
*
*/
void Compiler::fgExpandQmarkForCastInstOf(BasicBlock* block, Statement* stmt)
{
#ifdef DEBUG
if (verbose)
{
printf("\nExpanding CastInstOf qmark in " FMT_BB " (before)\n", block->bbNum);
fgDispBasicBlocks(block, block, true);
}
#endif // DEBUG
GenTree* expr = stmt->GetRootNode();
GenTree* dst = nullptr;
GenTree* qmark = fgGetTopLevelQmark(expr, &dst);
noway_assert(dst != nullptr);
assert(qmark->gtFlags & GTF_QMARK_CAST_INSTOF);
// Get cond, true, false exprs for the qmark.
GenTree* condExpr = qmark->gtGetOp1();
GenTree* trueExpr = qmark->gtGetOp2()->AsColon()->ThenNode();
GenTree* falseExpr = qmark->gtGetOp2()->AsColon()->ElseNode();
// Get cond, true, false exprs for the nested qmark.
GenTree* nestedQmark = falseExpr;
GenTree* cond2Expr;
GenTree* true2Expr;
GenTree* false2Expr;
if (nestedQmark->gtOper == GT_QMARK)
{
cond2Expr = nestedQmark->gtGetOp1();
true2Expr = nestedQmark->gtGetOp2()->AsColon()->ThenNode();
false2Expr = nestedQmark->gtGetOp2()->AsColon()->ElseNode();
}
else
{
// This is a rare case that arises when we are doing minopts and encounter isinst of null
// gtFoldExpr was still is able to optimize away part of the tree (but not all).
// That means it does not match our pattern.
// Rather than write code to handle this case, just fake up some nodes to make it match the common
// case. Synthesize a comparison that is always true, and for the result-on-true, use the
// entire subtree we expected to be the nested question op.
cond2Expr = gtNewOperNode(GT_EQ, TYP_INT, gtNewIconNode(0, TYP_I_IMPL), gtNewIconNode(0, TYP_I_IMPL));
true2Expr = nestedQmark;
false2Expr = gtNewIconNode(0, TYP_I_IMPL);
}
assert(false2Expr->OperGet() == trueExpr->OperGet());
// Create the chain of blocks. See method header comment.
// The order of blocks after this is the following:
// block ... asgBlock ... cond1Block ... cond2Block ... helperBlock ... remainderBlock
//
// We need to remember flags that exist on 'block' that we want to propagate to 'remainderBlock',
// if they are going to be cleared by fgSplitBlockAfterStatement(). We currently only do this only
// for the GC safe point bit, the logic being that if 'block' was marked gcsafe, then surely
// remainderBlock will still be GC safe.
BasicBlockFlags propagateFlags = block->bbFlags & BBF_GC_SAFE_POINT;
BasicBlock* remainderBlock = fgSplitBlockAfterStatement(block, stmt);
fgRemoveRefPred(remainderBlock, block); // We're going to put more blocks between block and remainderBlock.
BasicBlock* helperBlock = fgNewBBafter(BBJ_NONE, block, true);
BasicBlock* cond2Block = fgNewBBafter(BBJ_COND, block, true);
BasicBlock* cond1Block = fgNewBBafter(BBJ_COND, block, true);
BasicBlock* asgBlock = fgNewBBafter(BBJ_NONE, block, true);
remainderBlock->bbFlags |= propagateFlags;
// These blocks are only internal if 'block' is (but they've been set as internal by fgNewBBafter).
// If they're not internal, mark them as imported to avoid asserts about un-imported blocks.
if ((block->bbFlags & BBF_INTERNAL) == 0)
{
helperBlock->bbFlags &= ~BBF_INTERNAL;
cond2Block->bbFlags &= ~BBF_INTERNAL;
cond1Block->bbFlags &= ~BBF_INTERNAL;
asgBlock->bbFlags &= ~BBF_INTERNAL;
helperBlock->bbFlags |= BBF_IMPORTED;
cond2Block->bbFlags |= BBF_IMPORTED;
cond1Block->bbFlags |= BBF_IMPORTED;
asgBlock->bbFlags |= BBF_IMPORTED;
}
// Chain the flow correctly.
fgAddRefPred(asgBlock, block);
fgAddRefPred(cond1Block, asgBlock);
fgAddRefPred(cond2Block, cond1Block);
fgAddRefPred(helperBlock, cond2Block);
fgAddRefPred(remainderBlock, helperBlock);
fgAddRefPred(remainderBlock, cond1Block);
fgAddRefPred(remainderBlock, cond2Block);
cond1Block->bbJumpDest = remainderBlock;
cond2Block->bbJumpDest = remainderBlock;
// Set the weights; some are guesses.
asgBlock->inheritWeight(block);
cond1Block->inheritWeight(block);
cond2Block->inheritWeightPercentage(cond1Block, 50);
helperBlock->inheritWeightPercentage(cond2Block, 50);
// Append cond1 as JTRUE to cond1Block
GenTree* jmpTree = gtNewOperNode(GT_JTRUE, TYP_VOID, condExpr);
Statement* jmpStmt = fgNewStmtFromTree(jmpTree, stmt->GetDebugInfo());
fgInsertStmtAtEnd(cond1Block, jmpStmt);
// Append cond2 as JTRUE to cond2Block
jmpTree = gtNewOperNode(GT_JTRUE, TYP_VOID, cond2Expr);
jmpStmt = fgNewStmtFromTree(jmpTree, stmt->GetDebugInfo());
fgInsertStmtAtEnd(cond2Block, jmpStmt);
// AsgBlock should get tmp = op1 assignment.
trueExpr = gtNewTempAssign(dst->AsLclVarCommon()->GetLclNum(), trueExpr);
Statement* trueStmt = fgNewStmtFromTree(trueExpr, stmt->GetDebugInfo());
fgInsertStmtAtEnd(asgBlock, trueStmt);
// Since we are adding helper in the JTRUE false path, reverse the cond2 and add the helper.
gtReverseCond(cond2Expr);
GenTree* helperExpr = gtNewTempAssign(dst->AsLclVarCommon()->GetLclNum(), true2Expr);
Statement* helperStmt = fgNewStmtFromTree(helperExpr, stmt->GetDebugInfo());
fgInsertStmtAtEnd(helperBlock, helperStmt);
// Finally remove the nested qmark stmt.
fgRemoveStmt(block, stmt);
if (true2Expr->OperIs(GT_CALL) && (true2Expr->AsCall()->gtCallMoreFlags & GTF_CALL_M_DOES_NOT_RETURN))
{
fgConvertBBToThrowBB(helperBlock);
}
#ifdef DEBUG
if (verbose)
{
printf("\nExpanding CastInstOf qmark in " FMT_BB " (after)\n", block->bbNum);
fgDispBasicBlocks(block, remainderBlock, true);
}
#endif // DEBUG
}
/*****************************************************************************
*
* Expand a statement with a top level qmark node. There are three cases, based
* on whether the qmark has both "true" and "false" arms, or just one of them.
*
* S0;
* C ? T : F;
* S1;
*
* Generates ===>
*
* bbj_always
* +---->------+
* false | |
* S0 -->-- ~C -->-- T F -->-- S1
* | |
* +--->--------+
* bbj_cond(true)
*
* -----------------------------------------
*
* S0;
* C ? T : NOP;
* S1;
*
* Generates ===>
*
* false
* S0 -->-- ~C -->-- T -->-- S1
* | |
* +-->-------------+
* bbj_cond(true)
*
* -----------------------------------------
*
* S0;
* C ? NOP : F;
* S1;
*
* Generates ===>
*
* false
* S0 -->-- C -->-- F -->-- S1
* | |
* +-->------------+
* bbj_cond(true)
*
* If the qmark assigns to a variable, then create tmps for "then"
* and "else" results and assign the temp to the variable as a writeback step.
*/
void Compiler::fgExpandQmarkStmt(BasicBlock* block, Statement* stmt)
{
GenTree* expr = stmt->GetRootNode();
// Retrieve the Qmark node to be expanded.
GenTree* dst = nullptr;
GenTree* qmark = fgGetTopLevelQmark(expr, &dst);
if (qmark == nullptr)
{
return;
}
if (qmark->gtFlags & GTF_QMARK_CAST_INSTOF)
{
fgExpandQmarkForCastInstOf(block, stmt);
return;
}
#ifdef DEBUG
if (verbose)
{
printf("\nExpanding top-level qmark in " FMT_BB " (before)\n", block->bbNum);
fgDispBasicBlocks(block, block, true);
}
#endif // DEBUG
// Retrieve the operands.
GenTree* condExpr = qmark->gtGetOp1();
GenTree* trueExpr = qmark->gtGetOp2()->AsColon()->ThenNode();
GenTree* falseExpr = qmark->gtGetOp2()->AsColon()->ElseNode();
assert(!varTypeIsFloating(condExpr->TypeGet()));
bool hasTrueExpr = (trueExpr->OperGet() != GT_NOP);
bool hasFalseExpr = (falseExpr->OperGet() != GT_NOP);
assert(hasTrueExpr || hasFalseExpr); // We expect to have at least one arm of the qmark!
// Create remainder, cond and "else" blocks. After this, the blocks are in this order:
// block ... condBlock ... elseBlock ... remainderBlock
//
// We need to remember flags that exist on 'block' that we want to propagate to 'remainderBlock',
// if they are going to be cleared by fgSplitBlockAfterStatement(). We currently only do this only
// for the GC safe point bit, the logic being that if 'block' was marked gcsafe, then surely
// remainderBlock will still be GC safe.
BasicBlockFlags propagateFlags = block->bbFlags & BBF_GC_SAFE_POINT;
BasicBlock* remainderBlock = fgSplitBlockAfterStatement(block, stmt);
fgRemoveRefPred(remainderBlock, block); // We're going to put more blocks between block and remainderBlock.
BasicBlock* condBlock = fgNewBBafter(BBJ_COND, block, true);
BasicBlock* elseBlock = fgNewBBafter(BBJ_NONE, condBlock, true);
// These blocks are only internal if 'block' is (but they've been set as internal by fgNewBBafter).
// If they're not internal, mark them as imported to avoid asserts about un-imported blocks.
if ((block->bbFlags & BBF_INTERNAL) == 0)
{
condBlock->bbFlags &= ~BBF_INTERNAL;
elseBlock->bbFlags &= ~BBF_INTERNAL;
condBlock->bbFlags |= BBF_IMPORTED;
elseBlock->bbFlags |= BBF_IMPORTED;
}
remainderBlock->bbFlags |= propagateFlags;
condBlock->inheritWeight(block);
fgAddRefPred(condBlock, block);
fgAddRefPred(elseBlock, condBlock);
fgAddRefPred(remainderBlock, elseBlock);
BasicBlock* thenBlock = nullptr;
if (hasTrueExpr && hasFalseExpr)
{
// bbj_always
// +---->------+
// false | |
// S0 -->-- ~C -->-- T F -->-- S1
// | |
// +--->--------+
// bbj_cond(true)
//
gtReverseCond(condExpr);
condBlock->bbJumpDest = elseBlock;
thenBlock = fgNewBBafter(BBJ_ALWAYS, condBlock, true);
thenBlock->bbJumpDest = remainderBlock;
if ((block->bbFlags & BBF_INTERNAL) == 0)
{
thenBlock->bbFlags &= ~BBF_INTERNAL;
thenBlock->bbFlags |= BBF_IMPORTED;
}
fgAddRefPred(thenBlock, condBlock);
fgAddRefPred(remainderBlock, thenBlock);
thenBlock->inheritWeightPercentage(condBlock, 50);
elseBlock->inheritWeightPercentage(condBlock, 50);
}
else if (hasTrueExpr)
{
// false
// S0 -->-- ~C -->-- T -->-- S1
// | |
// +-->-------------+
// bbj_cond(true)
//
gtReverseCond(condExpr);
condBlock->bbJumpDest = remainderBlock;
fgAddRefPred(remainderBlock, condBlock);
// Since we have no false expr, use the one we'd already created.
thenBlock = elseBlock;
elseBlock = nullptr;
thenBlock->inheritWeightPercentage(condBlock, 50);
}
else if (hasFalseExpr)
{
// false
// S0 -->-- C -->-- F -->-- S1
// | |
// +-->------------+
// bbj_cond(true)
//
condBlock->bbJumpDest = remainderBlock;
fgAddRefPred(remainderBlock, condBlock);
elseBlock->inheritWeightPercentage(condBlock, 50);
}
GenTree* jmpTree = gtNewOperNode(GT_JTRUE, TYP_VOID, qmark->gtGetOp1());
Statement* jmpStmt = fgNewStmtFromTree(jmpTree, stmt->GetDebugInfo());
fgInsertStmtAtEnd(condBlock, jmpStmt);
// Remove the original qmark statement.
fgRemoveStmt(block, stmt);
// Since we have top level qmarks, we either have a dst for it in which case
// we need to create tmps for true and falseExprs, else just don't bother
// assigning.
unsigned lclNum = BAD_VAR_NUM;
if (dst != nullptr)
{
assert(dst->gtOper == GT_LCL_VAR);
lclNum = dst->AsLclVar()->GetLclNum();
}
else
{
assert(qmark->TypeGet() == TYP_VOID);
}
if (hasTrueExpr)
{
if (dst != nullptr)
{
trueExpr = gtNewTempAssign(lclNum, trueExpr);
}
Statement* trueStmt = fgNewStmtFromTree(trueExpr, stmt->GetDebugInfo());
fgInsertStmtAtEnd(thenBlock, trueStmt);
}
// Assign the falseExpr into the dst or tmp, insert in elseBlock
if (hasFalseExpr)
{
if (dst != nullptr)
{
falseExpr = gtNewTempAssign(lclNum, falseExpr);
}
Statement* falseStmt = fgNewStmtFromTree(falseExpr, stmt->GetDebugInfo());
fgInsertStmtAtEnd(elseBlock, falseStmt);
}
#ifdef DEBUG
if (verbose)
{
printf("\nExpanding top-level qmark in " FMT_BB " (after)\n", block->bbNum);
fgDispBasicBlocks(block, remainderBlock, true);
}
#endif // DEBUG
}
/*****************************************************************************
*
* Expand GT_QMARK nodes from the flow graph into basic blocks.
*
*/
void Compiler::fgExpandQmarkNodes()
{
if (compQmarkUsed)
{
for (BasicBlock* const block : Blocks())
{
for (Statement* const stmt : block->Statements())
{
GenTree* expr = stmt->GetRootNode();
#ifdef DEBUG
fgPreExpandQmarkChecks(expr);
#endif
fgExpandQmarkStmt(block, stmt);
}
}
#ifdef DEBUG
fgPostExpandQmarkChecks();
#endif
}
compQmarkRationalized = true;
}
#ifdef DEBUG
/*****************************************************************************
*
* Make sure we don't have any more GT_QMARK nodes.
*
*/
void Compiler::fgPostExpandQmarkChecks()
{
for (BasicBlock* const block : Blocks())
{
for (Statement* const stmt : block->Statements())
{
GenTree* expr = stmt->GetRootNode();
fgWalkTreePre(&expr, Compiler::fgAssertNoQmark, nullptr);
}
}
}
#endif
/*****************************************************************************
*
* Promoting struct locals
*/
void Compiler::fgPromoteStructs()
{
#ifdef DEBUG
if (verbose)
{
printf("*************** In fgPromoteStructs()\n");
}
#endif // DEBUG
if (!opts.OptEnabled(CLFLG_STRUCTPROMOTE))
{
JITDUMP(" promotion opt flag not enabled\n");
return;
}
if (fgNoStructPromotion)
{
JITDUMP(" promotion disabled by JitNoStructPromotion\n");
return;
}
#if 0
// The code in this #if has been useful in debugging struct promotion issues, by
// enabling selective enablement of the struct promotion optimization according to
// method hash.
#ifdef DEBUG
unsigned methHash = info.compMethodHash();
char* lostr = getenv("structpromohashlo");
unsigned methHashLo = 0;
if (lostr != NULL)
{
sscanf_s(lostr, "%x", &methHashLo);
}
char* histr = getenv("structpromohashhi");
unsigned methHashHi = UINT32_MAX;
if (histr != NULL)
{
sscanf_s(histr, "%x", &methHashHi);
}
if (methHash < methHashLo || methHash > methHashHi)
{
return;
}
else
{
printf("Promoting structs for method %s, hash = 0x%x.\n",
info.compFullName, info.compMethodHash());
printf(""); // in our logic this causes a flush
}
#endif // DEBUG
#endif // 0
if (info.compIsVarArgs)
{
JITDUMP(" promotion disabled because of varargs\n");
return;
}
#ifdef DEBUG
if (verbose)
{
printf("\nlvaTable before fgPromoteStructs\n");
lvaTableDump();
}
#endif // DEBUG
// The lvaTable might grow as we grab temps. Make a local copy here.
unsigned startLvaCount = lvaCount;
//
// Loop through the original lvaTable. Looking for struct locals to be promoted.
//
lvaStructPromotionInfo structPromotionInfo;
bool tooManyLocalsReported = false;
// Clear the structPromotionHelper, since it is used during inlining, at which point it
// may be conservative about looking up SIMD info.
// We don't want to preserve those conservative decisions for the actual struct promotion.
structPromotionHelper->Clear();
for (unsigned lclNum = 0; lclNum < startLvaCount; lclNum++)
{
// Whether this var got promoted
bool promotedVar = false;
LclVarDsc* varDsc = lvaGetDesc(lclNum);
// If we have marked this as lvUsedInSIMDIntrinsic, then we do not want to promote
// its fields. Instead, we will attempt to enregister the entire struct.
if (varDsc->lvIsSIMDType() && (varDsc->lvIsUsedInSIMDIntrinsic() || isOpaqueSIMDLclVar(varDsc)))
{
varDsc->lvRegStruct = true;
}
// Don't promote if we have reached the tracking limit.
else if (lvaHaveManyLocals())
{
// Print the message first time when we detected this condition
if (!tooManyLocalsReported)
{
JITDUMP("Stopped promoting struct fields, due to too many locals.\n");
}
tooManyLocalsReported = true;
}
else if (varTypeIsStruct(varDsc))
{
assert(structPromotionHelper != nullptr);
promotedVar = structPromotionHelper->TryPromoteStructVar(lclNum);
}
if (!promotedVar && varDsc->lvIsSIMDType() && !varDsc->lvFieldAccessed)
{
// Even if we have not used this in a SIMD intrinsic, if it is not being promoted,
// we will treat it as a reg struct.
varDsc->lvRegStruct = true;
}
}
#ifdef DEBUG
if (verbose)
{
printf("\nlvaTable after fgPromoteStructs\n");
lvaTableDump();
}
#endif // DEBUG
}
void Compiler::fgMorphStructField(GenTree* tree, GenTree* parent)
{
noway_assert(tree->OperGet() == GT_FIELD);
GenTreeField* field = tree->AsField();
GenTree* objRef = field->GetFldObj();
GenTree* obj = ((objRef != nullptr) && (objRef->gtOper == GT_ADDR)) ? objRef->AsOp()->gtOp1 : nullptr;
noway_assert((tree->gtFlags & GTF_GLOB_REF) || ((obj != nullptr) && (obj->gtOper == GT_LCL_VAR)));
/* Is this an instance data member? */
if ((obj != nullptr) && (obj->gtOper == GT_LCL_VAR))
{
unsigned lclNum = obj->AsLclVarCommon()->GetLclNum();
const LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (varTypeIsStruct(obj))
{
if (varDsc->lvPromoted)
{
// Promoted struct
unsigned fldOffset = field->gtFldOffset;
unsigned fieldLclIndex = lvaGetFieldLocal(varDsc, fldOffset);
if (fieldLclIndex == BAD_VAR_NUM)
{
// Access a promoted struct's field with an offset that doesn't correspond to any field.
// It can happen if the struct was cast to another struct with different offsets.
return;
}
const LclVarDsc* fieldDsc = lvaGetDesc(fieldLclIndex);
var_types fieldType = fieldDsc->TypeGet();
assert(fieldType != TYP_STRUCT); // promoted LCL_VAR can't have a struct type.
if (tree->TypeGet() != fieldType)
{
if (tree->TypeGet() != TYP_STRUCT)
{
// This is going to be an incorrect instruction promotion.
// For example when we try to read int as long.
return;
}
if (field->gtFldHnd != fieldDsc->lvFieldHnd)
{
CORINFO_CLASS_HANDLE fieldTreeClass = nullptr, fieldDscClass = nullptr;
CorInfoType fieldTreeType = info.compCompHnd->getFieldType(field->gtFldHnd, &fieldTreeClass);
CorInfoType fieldDscType = info.compCompHnd->getFieldType(fieldDsc->lvFieldHnd, &fieldDscClass);
if (fieldTreeType != fieldDscType || fieldTreeClass != fieldDscClass)
{
// Access the promoted field with a different class handle, can't check that types match.
return;
}
// Access the promoted field as a field of a non-promoted struct with the same class handle.
}
else
{
// As we already checked this above, we must have a tree with a TYP_STRUCT type
//
assert(tree->TypeGet() == TYP_STRUCT);
// The field tree accesses it as a struct, but the promoted LCL_VAR field
// says that it has another type. This happens when struct promotion unwraps
// a single field struct to get to its ultimate type.
//
// Note that currently, we cannot have a promoted LCL_VAR field with a struct type.
//
// This mismatch in types can lead to problems for some parent node type like GT_RETURN.
// So we check the parent node and only allow this optimization when we have
// a GT_ADDR or a GT_ASG.
//
// Note that for a GT_ASG we have to do some additional work,
// see below after the SetOper(GT_LCL_VAR)
//
if (!parent->OperIs(GT_ADDR, GT_ASG))
{
// Don't transform other operations such as GT_RETURN
//
return;
}
#ifdef DEBUG
// This is an additional DEBUG-only sanity check
//
assert(structPromotionHelper != nullptr);
structPromotionHelper->CheckRetypedAsScalar(field->gtFldHnd, fieldType);
#endif // DEBUG
}
}
tree->SetOper(GT_LCL_VAR);
tree->AsLclVarCommon()->SetLclNum(fieldLclIndex);
tree->gtType = fieldType;
tree->gtFlags &= GTF_NODE_MASK; // Note: that clears all flags except `GTF_COLON_COND`.
if (parent->gtOper == GT_ASG)
{
// If we are changing the left side of an assignment, we need to set
// these two flags:
//
if (parent->AsOp()->gtOp1 == tree)
{
tree->gtFlags |= GTF_VAR_DEF;
tree->gtFlags |= GTF_DONT_CSE;
}
// Promotion of struct containing struct fields where the field
// is a struct with a single pointer sized scalar type field: in
// this case struct promotion uses the type of the underlying
// scalar field as the type of struct field instead of recursively
// promoting. This can lead to a case where we have a block-asgn
// with its RHS replaced with a scalar type. Mark RHS value as
// DONT_CSE so that assertion prop will not do const propagation.
// The reason this is required is that if RHS of a block-asg is a
// constant, then it is interpreted as init-block incorrectly.
//
// TODO - This can also be avoided if we implement recursive struct
// promotion, tracked by #10019.
if (varTypeIsStruct(parent) && parent->AsOp()->gtOp2 == tree && !varTypeIsStruct(tree))
{
tree->gtFlags |= GTF_DONT_CSE;
}
}
#ifdef DEBUG
if (verbose)
{
printf("Replacing the field in promoted struct with local var V%02u\n", fieldLclIndex);
}
#endif // DEBUG
}
}
else
{
// Normed struct
// A "normed struct" is a struct that the VM tells us is a basic type. This can only happen if
// the struct contains a single element, and that element is 4 bytes (on x64 it can also be 8
// bytes). Normally, the type of the local var and the type of GT_FIELD are equivalent. However,
// there is one extremely rare case where that won't be true. An enum type is a special value type
// that contains exactly one element of a primitive integer type (that, for CLS programs is named
// "value__"). The VM tells us that a local var of that enum type is the primitive type of the
// enum's single field. It turns out that it is legal for IL to access this field using ldflda or
// ldfld. For example:
//
// .class public auto ansi sealed mynamespace.e_t extends [mscorlib]System.Enum
// {
// .field public specialname rtspecialname int16 value__
// .field public static literal valuetype mynamespace.e_t one = int16(0x0000)
// }
// .method public hidebysig static void Main() cil managed
// {
// .locals init (valuetype mynamespace.e_t V_0)
// ...
// ldloca.s V_0
// ldflda int16 mynamespace.e_t::value__
// ...
// }
//
// Normally, compilers will not generate the ldflda, since it is superfluous.
//
// In the example, the lclVar is short, but the JIT promotes all trees using this local to the
// "actual type", that is, INT. But the GT_FIELD is still SHORT. So, in the case of a type
// mismatch like this, don't do this morphing. The local var may end up getting marked as
// address taken, and the appropriate SHORT load will be done from memory in that case.
if (tree->TypeGet() == obj->TypeGet())
{
tree->ChangeOper(GT_LCL_VAR);
tree->AsLclVarCommon()->SetLclNum(lclNum);
tree->gtFlags &= GTF_NODE_MASK;
if ((parent->gtOper == GT_ASG) && (parent->AsOp()->gtOp1 == tree))
{
tree->gtFlags |= GTF_VAR_DEF;
tree->gtFlags |= GTF_DONT_CSE;
}
#ifdef DEBUG
if (verbose)
{
printf("Replacing the field in normed struct with local var V%02u\n", lclNum);
}
#endif // DEBUG
}
}
}
}
void Compiler::fgMorphLocalField(GenTree* tree, GenTree* parent)
{
noway_assert(tree->OperGet() == GT_LCL_FLD);
unsigned lclNum = tree->AsLclFld()->GetLclNum();
LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (varTypeIsStruct(varDsc))
{
if (varDsc->lvPromoted)
{
// Promoted struct
unsigned fldOffset = tree->AsLclFld()->GetLclOffs();
unsigned fieldLclIndex = 0;
LclVarDsc* fldVarDsc = nullptr;
if (fldOffset != BAD_VAR_NUM)
{
fieldLclIndex = lvaGetFieldLocal(varDsc, fldOffset);
noway_assert(fieldLclIndex != BAD_VAR_NUM);
fldVarDsc = lvaGetDesc(fieldLclIndex);
}
var_types treeType = tree->TypeGet();
var_types fieldType = fldVarDsc->TypeGet();
if (fldOffset != BAD_VAR_NUM &&
((genTypeSize(fieldType) == genTypeSize(treeType)) || (varDsc->lvFieldCnt == 1)))
{
// There is an existing sub-field we can use.
tree->AsLclFld()->SetLclNum(fieldLclIndex);
// The field must be an enregisterable type; otherwise it would not be a promoted field.
// The tree type may not match, e.g. for return types that have been morphed, but both
// must be enregisterable types.
assert(varTypeIsEnregisterable(treeType) && varTypeIsEnregisterable(fieldType));
tree->ChangeOper(GT_LCL_VAR);
assert(tree->AsLclVarCommon()->GetLclNum() == fieldLclIndex);
tree->gtType = fldVarDsc->TypeGet();
if ((parent->gtOper == GT_ASG) && (parent->AsOp()->gtOp1 == tree))
{
tree->gtFlags |= GTF_VAR_DEF;
tree->gtFlags |= GTF_DONT_CSE;
}
JITDUMP("Replacing the GT_LCL_FLD in promoted struct with local var V%02u\n", fieldLclIndex);
}
else
{
// There is no existing field that has all the parts that we need
// So we must ensure that the struct lives in memory.
lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::LocalField));
#ifdef DEBUG
// We can't convert this guy to a float because he really does have his
// address taken..
varDsc->lvKeepType = 1;
#endif // DEBUG
}
}
else if (varTypeIsSIMD(varDsc) && (genTypeSize(tree->TypeGet()) == genTypeSize(varDsc)))
{
assert(tree->AsLclFld()->GetLclOffs() == 0);
tree->gtType = varDsc->TypeGet();
tree->ChangeOper(GT_LCL_VAR);
JITDUMP("Replacing GT_LCL_FLD of struct with local var V%02u\n", lclNum);
}
}
}
//------------------------------------------------------------------------
// fgResetImplicitByRefRefCount: Clear the ref count field of all implicit byrefs
void Compiler::fgResetImplicitByRefRefCount()
{
#if (defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)
#ifdef DEBUG
if (verbose)
{
printf("\n*************** In fgResetImplicitByRefRefCount()\n");
}
#endif // DEBUG
for (unsigned lclNum = 0; lclNum < info.compArgsCount; ++lclNum)
{
LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (varDsc->lvIsImplicitByRef)
{
// Clear the ref count field; fgMarkAddressTakenLocals will increment it per
// appearance of implicit-by-ref param so that call arg morphing can do an
// optimization for single-use implicit-by-ref params whose single use is as
// an outgoing call argument.
varDsc->setLvRefCnt(0, RCS_EARLY);
varDsc->setLvRefCntWtd(0, RCS_EARLY);
}
}
#endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 || TARGET_LOONGARCH64
}
//------------------------------------------------------------------------
// fgRetypeImplicitByRefArgs: Update the types on implicit byref parameters' `LclVarDsc`s (from
// struct to pointer). Also choose (based on address-exposed analysis)
// which struct promotions of implicit byrefs to keep or discard.
// For those which are kept, insert the appropriate initialization code.
// For those which are to be discarded, annotate the promoted field locals
// so that fgMorphImplicitByRefArgs will know to rewrite their appearances
// using indirections off the pointer parameters.
void Compiler::fgRetypeImplicitByRefArgs()
{
#if (defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)
#ifdef DEBUG
if (verbose)
{
printf("\n*************** In fgRetypeImplicitByRefArgs()\n");
}
#endif // DEBUG
for (unsigned lclNum = 0; lclNum < info.compArgsCount; lclNum++)
{
LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (lvaIsImplicitByRefLocal(lclNum))
{
unsigned size;
if (varDsc->lvSize() > REGSIZE_BYTES)
{
size = varDsc->lvSize();
}
else
{
CORINFO_CLASS_HANDLE typeHnd = varDsc->GetStructHnd();
size = info.compCompHnd->getClassSize(typeHnd);
}
if (varDsc->lvPromoted)
{
// This implicit-by-ref was promoted; create a new temp to represent the
// promoted struct before rewriting this parameter as a pointer.
unsigned newLclNum = lvaGrabTemp(false DEBUGARG("Promoted implicit byref"));
lvaSetStruct(newLclNum, lvaGetStruct(lclNum), true);
if (info.compIsVarArgs)
{
lvaSetStructUsedAsVarArg(newLclNum);
}
// Update varDsc since lvaGrabTemp might have re-allocated the var dsc array.
varDsc = lvaGetDesc(lclNum);
// Copy the struct promotion annotations to the new temp.
LclVarDsc* newVarDsc = lvaGetDesc(newLclNum);
newVarDsc->lvPromoted = true;
newVarDsc->lvFieldLclStart = varDsc->lvFieldLclStart;
newVarDsc->lvFieldCnt = varDsc->lvFieldCnt;
newVarDsc->lvContainsHoles = varDsc->lvContainsHoles;
newVarDsc->lvCustomLayout = varDsc->lvCustomLayout;
#ifdef DEBUG
newVarDsc->lvKeepType = true;
#endif // DEBUG
// Propagate address-taken-ness and do-not-enregister-ness.
newVarDsc->SetAddressExposed(varDsc->IsAddressExposed() DEBUGARG(varDsc->GetAddrExposedReason()));
newVarDsc->lvDoNotEnregister = varDsc->lvDoNotEnregister;
newVarDsc->lvLiveInOutOfHndlr = varDsc->lvLiveInOutOfHndlr;
newVarDsc->lvSingleDef = varDsc->lvSingleDef;
newVarDsc->lvSingleDefRegCandidate = varDsc->lvSingleDefRegCandidate;
newVarDsc->lvSpillAtSingleDef = varDsc->lvSpillAtSingleDef;
#ifdef DEBUG
newVarDsc->SetDoNotEnregReason(varDsc->GetDoNotEnregReason());
#endif // DEBUG
// If the promotion is dependent, the promoted temp would just be committed
// to memory anyway, so we'll rewrite its appearances to be indirections
// through the pointer parameter, the same as we'd do for this
// parameter if it weren't promoted at all (otherwise the initialization
// of the new temp would just be a needless memcpy at method entry).
//
// Otherwise, see how many appearances there are. We keep two early ref counts: total
// number of references to the struct or some field, and how many of these are
// arguments to calls. We undo promotion unless we see enough non-call uses.
//
const unsigned totalAppearances = varDsc->lvRefCnt(RCS_EARLY);
const unsigned callAppearances = (unsigned)varDsc->lvRefCntWtd(RCS_EARLY);
assert(totalAppearances >= callAppearances);
const unsigned nonCallAppearances = totalAppearances - callAppearances;
bool undoPromotion = ((lvaGetPromotionType(newVarDsc) == PROMOTION_TYPE_DEPENDENT) ||
(nonCallAppearances <= varDsc->lvFieldCnt));
#ifdef DEBUG
// Above is a profitability heurisic; either value of
// undoPromotion should lead to correct code. So,
// under stress, make different decisions at times.
if (compStressCompile(STRESS_BYREF_PROMOTION, 25))
{
undoPromotion = !undoPromotion;
JITDUMP("Stress -- changing byref undo promotion for V%02u to %s undo\n", lclNum,
undoPromotion ? "" : "NOT");
}
#endif // DEBUG
JITDUMP("%s promotion of implicit by-ref V%02u: %s total: %u non-call: %u fields: %u\n",
undoPromotion ? "Undoing" : "Keeping", lclNum,
(lvaGetPromotionType(newVarDsc) == PROMOTION_TYPE_DEPENDENT) ? "dependent;" : "",
totalAppearances, nonCallAppearances, varDsc->lvFieldCnt);
if (!undoPromotion)
{
// Insert IR that initializes the temp from the parameter.
// LHS is a simple reference to the temp.
fgEnsureFirstBBisScratch();
GenTree* lhs = gtNewLclvNode(newLclNum, varDsc->lvType);
// RHS is an indirection (using GT_OBJ) off the parameter.
GenTree* addr = gtNewLclvNode(lclNum, TYP_BYREF);
GenTree* rhs = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, addr, typGetBlkLayout(size));
GenTree* assign = gtNewAssignNode(lhs, rhs);
fgNewStmtAtBeg(fgFirstBB, assign);
}
// Update the locals corresponding to the promoted fields.
unsigned fieldLclStart = varDsc->lvFieldLclStart;
unsigned fieldCount = varDsc->lvFieldCnt;
unsigned fieldLclStop = fieldLclStart + fieldCount;
for (unsigned fieldLclNum = fieldLclStart; fieldLclNum < fieldLclStop; ++fieldLclNum)
{
LclVarDsc* fieldVarDsc = lvaGetDesc(fieldLclNum);
if (undoPromotion)
{
// Leave lvParentLcl pointing to the parameter so that fgMorphImplicitByRefArgs
// will know to rewrite appearances of this local.
assert(fieldVarDsc->lvParentLcl == lclNum);
}
else
{
// Set the new parent.
fieldVarDsc->lvParentLcl = newLclNum;
}
fieldVarDsc->lvIsParam = false;
// The fields shouldn't inherit any register preferences from
// the parameter which is really a pointer to the struct.
fieldVarDsc->lvIsRegArg = false;
fieldVarDsc->lvIsMultiRegArg = false;
fieldVarDsc->SetArgReg(REG_NA);
#if FEATURE_MULTIREG_ARGS
fieldVarDsc->SetOtherArgReg(REG_NA);
#endif
// Promoted fields of implicit byrefs can't be OSR locals.
//
if (fieldVarDsc->lvIsOSRLocal)
{
assert(opts.IsOSR());
fieldVarDsc->lvIsOSRLocal = false;
}
}
// Hijack lvFieldLclStart to record the new temp number.
// It will get fixed up in fgMarkDemotedImplicitByRefArgs.
varDsc->lvFieldLclStart = newLclNum;
// Go ahead and clear lvFieldCnt -- either we're promoting
// a replacement temp or we're not promoting this arg, and
// in either case the parameter is now a pointer that doesn't
// have these fields.
varDsc->lvFieldCnt = 0;
// Hijack lvPromoted to communicate to fgMorphImplicitByRefArgs
// whether references to the struct should be rewritten as
// indirections off the pointer (not promoted) or references
// to the new struct local (promoted).
varDsc->lvPromoted = !undoPromotion;
}
else
{
// The "undo promotion" path above clears lvPromoted for args that struct
// promotion wanted to promote but that aren't considered profitable to
// rewrite. It hijacks lvFieldLclStart to communicate to
// fgMarkDemotedImplicitByRefArgs that it needs to clean up annotations left
// on such args for fgMorphImplicitByRefArgs to consult in the interim.
// Here we have an arg that was simply never promoted, so make sure it doesn't
// have nonzero lvFieldLclStart, since that would confuse fgMorphImplicitByRefArgs
// and fgMarkDemotedImplicitByRefArgs.
assert(varDsc->lvFieldLclStart == 0);
}
// Since the parameter in this position is really a pointer, its type is TYP_BYREF.
varDsc->lvType = TYP_BYREF;
// Since this previously was a TYP_STRUCT and we have changed it to a TYP_BYREF
// make sure that the following flag is not set as these will force SSA to
// exclude tracking/enregistering these LclVars. (see SsaBuilder::IncludeInSsa)
//
varDsc->lvOverlappingFields = 0; // This flag could have been set, clear it.
// The struct parameter may have had its address taken, but the pointer parameter
// cannot -- any uses of the struct parameter's address are uses of the pointer
// parameter's value, and there's no way for the MSIL to reference the pointer
// parameter's address. So clear the address-taken bit for the parameter.
varDsc->CleanAddressExposed();
varDsc->lvDoNotEnregister = 0;
#ifdef DEBUG
// This should not be converted to a double in stress mode,
// because it is really a pointer
varDsc->lvKeepType = 1;
if (verbose)
{
printf("Changing the lvType for struct parameter V%02d to TYP_BYREF.\n", lclNum);
}
#endif // DEBUG
}
}
#endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 || TARGET_LOONGARCH64
}
//------------------------------------------------------------------------
// fgMarkDemotedImplicitByRefArgs: Clear annotations for any implicit byrefs that struct promotion
// asked to promote. Appearances of these have now been rewritten
// (by fgMorphImplicitByRefArgs) using indirections from the pointer
// parameter or references to the promotion temp, as appropriate.
void Compiler::fgMarkDemotedImplicitByRefArgs()
{
JITDUMP("\n*************** In fgMarkDemotedImplicitByRefArgs()\n");
#if (defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)
for (unsigned lclNum = 0; lclNum < info.compArgsCount; lclNum++)
{
LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (lvaIsImplicitByRefLocal(lclNum))
{
JITDUMP("Clearing annotation for V%02d\n", lclNum);
if (varDsc->lvPromoted)
{
// The parameter is simply a pointer now, so clear lvPromoted. It was left set
// by fgRetypeImplicitByRefArgs to communicate to fgMorphImplicitByRefArgs that
// appearances of this arg needed to be rewritten to a new promoted struct local.
varDsc->lvPromoted = false;
// Clear the lvFieldLclStart value that was set by fgRetypeImplicitByRefArgs
// to tell fgMorphImplicitByRefArgs which local is the new promoted struct one.
varDsc->lvFieldLclStart = 0;
}
else if (varDsc->lvFieldLclStart != 0)
{
// We created new temps to represent a promoted struct corresponding to this
// parameter, but decided not to go through with the promotion and have
// rewritten all uses as indirections off the pointer parameter.
// We stashed the pointer to the new struct temp in lvFieldLclStart; make
// note of that and clear the annotation.
unsigned structLclNum = varDsc->lvFieldLclStart;
varDsc->lvFieldLclStart = 0;
// The temp struct is now unused; set flags appropriately so that we
// won't allocate space for it on the stack.
LclVarDsc* structVarDsc = lvaGetDesc(structLclNum);
structVarDsc->CleanAddressExposed();
#ifdef DEBUG
structVarDsc->lvUnusedStruct = true;
structVarDsc->lvUndoneStructPromotion = true;
#endif // DEBUG
unsigned fieldLclStart = structVarDsc->lvFieldLclStart;
unsigned fieldCount = structVarDsc->lvFieldCnt;
unsigned fieldLclStop = fieldLclStart + fieldCount;
for (unsigned fieldLclNum = fieldLclStart; fieldLclNum < fieldLclStop; ++fieldLclNum)
{
JITDUMP("Fixing pointer for field V%02d from V%02d to V%02d\n", fieldLclNum, lclNum, structLclNum);
// Fix the pointer to the parent local.
LclVarDsc* fieldVarDsc = lvaGetDesc(fieldLclNum);
assert(fieldVarDsc->lvParentLcl == lclNum);
fieldVarDsc->lvParentLcl = structLclNum;
// The field local is now unused; set flags appropriately so that
// we won't allocate stack space for it.
fieldVarDsc->CleanAddressExposed();
}
}
}
}
#endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 || TARGET_LOONGARCH64
}
/*****************************************************************************
*
* Morph irregular parameters
* for x64 and ARM64 this means turning them into byrefs, adding extra indirs.
*/
bool Compiler::fgMorphImplicitByRefArgs(GenTree* tree)
{
#if (!defined(TARGET_AMD64) || defined(UNIX_AMD64_ABI)) && !defined(TARGET_ARM64) && !defined(TARGET_LOONGARCH64)
return false;
#else // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 || TARGET_LOONGARCH64
bool changed = false;
// Implicit byref morphing needs to know if the reference to the parameter is a
// child of GT_ADDR or not, so this method looks one level down and does the
// rewrite whenever a child is a reference to an implicit byref parameter.
if (tree->gtOper == GT_ADDR)
{
if (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR)
{
GenTree* morphedTree = fgMorphImplicitByRefArgs(tree, true);
changed = (morphedTree != nullptr);
assert(!changed || (morphedTree == tree));
}
}
else
{
for (GenTree** pTree : tree->UseEdges())
{
GenTree** pTreeCopy = pTree;
GenTree* childTree = *pTree;
if (childTree->gtOper == GT_LCL_VAR)
{
GenTree* newChildTree = fgMorphImplicitByRefArgs(childTree, false);
if (newChildTree != nullptr)
{
changed = true;
*pTreeCopy = newChildTree;
}
}
}
}
return changed;
#endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 || TARGET_LOONGARCH64
}
GenTree* Compiler::fgMorphImplicitByRefArgs(GenTree* tree, bool isAddr)
{
assert((tree->gtOper == GT_LCL_VAR) || ((tree->gtOper == GT_ADDR) && (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR)));
assert(isAddr == (tree->gtOper == GT_ADDR));
GenTree* lclVarTree = isAddr ? tree->AsOp()->gtOp1 : tree;
unsigned lclNum = lclVarTree->AsLclVarCommon()->GetLclNum();
LclVarDsc* lclVarDsc = lvaGetDesc(lclNum);
CORINFO_FIELD_HANDLE fieldHnd;
unsigned fieldOffset = 0;
var_types fieldRefType = TYP_UNKNOWN;
if (lvaIsImplicitByRefLocal(lclNum))
{
// The SIMD transformation to coalesce contiguous references to SIMD vector fields will
// re-invoke the traversal to mark address-taken locals.
// So, we may encounter a tree that has already been transformed to TYP_BYREF.
// If we do, leave it as-is.
if (!varTypeIsStruct(lclVarTree))
{
assert(lclVarTree->TypeGet() == TYP_BYREF);
return nullptr;
}
else if (lclVarDsc->lvPromoted)
{
// fgRetypeImplicitByRefArgs created a new promoted struct local to represent this
// arg. Rewrite this to refer to the new local.
assert(lclVarDsc->lvFieldLclStart != 0);
lclVarTree->AsLclVarCommon()->SetLclNum(lclVarDsc->lvFieldLclStart);
return tree;
}
fieldHnd = nullptr;
}
else if (lclVarDsc->lvIsStructField && lvaIsImplicitByRefLocal(lclVarDsc->lvParentLcl))
{
// This was a field reference to an implicit-by-reference struct parameter that was
// dependently promoted; update it to a field reference off the pointer.
// Grab the field handle from the struct field lclVar.
fieldHnd = lclVarDsc->lvFieldHnd;
fieldOffset = lclVarDsc->lvFldOffset;
assert(fieldHnd != nullptr);
// Update lclNum/lclVarDsc to refer to the parameter
lclNum = lclVarDsc->lvParentLcl;
lclVarDsc = lvaGetDesc(lclNum);
fieldRefType = lclVarTree->TypeGet();
}
else
{
// We only need to tranform the 'marked' implicit by ref parameters
return nullptr;
}
// This is no longer a def of the lclVar, even if it WAS a def of the struct.
lclVarTree->gtFlags &= ~(GTF_LIVENESS_MASK);
if (isAddr)
{
if (fieldHnd == nullptr)
{
// change &X into just plain X
tree->ReplaceWith(lclVarTree, this);
tree->gtType = TYP_BYREF;
}
else
{
// change &(X.f) [i.e. GT_ADDR of local for promoted arg field]
// into &(X, f) [i.e. GT_ADDR of GT_FIELD off ptr param]
lclVarTree->AsLclVarCommon()->SetLclNum(lclNum);
lclVarTree->gtType = TYP_BYREF;
tree->AsOp()->gtOp1 = gtNewFieldRef(fieldRefType, fieldHnd, lclVarTree, fieldOffset);
}
#ifdef DEBUG
if (verbose)
{
printf("Replacing address of implicit by ref struct parameter with byref:\n");
}
#endif // DEBUG
}
else
{
// Change X into OBJ(X) or FIELD(X, f)
var_types structType = tree->gtType;
tree->gtType = TYP_BYREF;
if (fieldHnd)
{
tree->AsLclVarCommon()->SetLclNum(lclNum);
tree = gtNewFieldRef(fieldRefType, fieldHnd, tree, fieldOffset);
}
else
{
tree = gtNewObjNode(lclVarDsc->GetStructHnd(), tree);
if (structType == TYP_STRUCT)
{
gtSetObjGcInfo(tree->AsObj());
}
}
// TODO-CQ: If the VM ever stops violating the ABI and passing heap references
// we could remove TGTANYWHERE
tree->gtFlags = ((tree->gtFlags & GTF_COMMON_MASK) | GTF_IND_TGTANYWHERE);
#ifdef DEBUG
if (verbose)
{
printf("Replacing value of implicit by ref struct parameter with indir of parameter:\n");
}
#endif // DEBUG
}
#ifdef DEBUG
if (verbose)
{
gtDispTree(tree);
}
#endif // DEBUG
return tree;
}
//------------------------------------------------------------------------
// fgAddFieldSeqForZeroOffset:
// Associate a fieldSeq (with a zero offset) with the GenTree node 'addr'
//
// Arguments:
// addr - A GenTree node
// fieldSeqZero - a fieldSeq (with a zero offset)
//
// Notes:
// Some GenTree nodes have internal fields that record the field sequence.
// If we have one of these nodes: GT_CNS_INT, GT_LCL_FLD
// we can append the field sequence using the gtFieldSeq
// If we have a GT_ADD of a GT_CNS_INT we can use the
// fieldSeq from child node.
// Otherwise we record 'fieldSeqZero' in the GenTree node using
// a Map: GetFieldSeqStore()
// When doing so we take care to preserve any existing zero field sequence
//
void Compiler::fgAddFieldSeqForZeroOffset(GenTree* addr, FieldSeqNode* fieldSeqZero)
{
// We expect 'addr' to be an address at this point.
assert(addr->TypeGet() == TYP_BYREF || addr->TypeGet() == TYP_I_IMPL || addr->TypeGet() == TYP_REF);
// Tunnel through any commas.
const bool commaOnly = true;
addr = addr->gtEffectiveVal(commaOnly);
// We still expect 'addr' to be an address at this point.
assert(addr->TypeGet() == TYP_BYREF || addr->TypeGet() == TYP_I_IMPL || addr->TypeGet() == TYP_REF);
FieldSeqNode* fieldSeqUpdate = fieldSeqZero;
GenTree* fieldSeqNode = addr;
bool fieldSeqRecorded = false;
#ifdef DEBUG
if (verbose)
{
printf("\nfgAddFieldSeqForZeroOffset for");
gtDispAnyFieldSeq(fieldSeqZero);
printf("\naddr (Before)\n");
gtDispNode(addr, nullptr, nullptr, false);
gtDispCommonEndLine(addr);
}
#endif // DEBUG
switch (addr->OperGet())
{
case GT_CNS_INT:
fieldSeqUpdate = GetFieldSeqStore()->Append(addr->AsIntCon()->gtFieldSeq, fieldSeqZero);
addr->AsIntCon()->gtFieldSeq = fieldSeqUpdate;
fieldSeqRecorded = true;
break;
case GT_ADDR:
if (addr->AsOp()->gtOp1->OperGet() == GT_LCL_FLD)
{
fieldSeqNode = addr->AsOp()->gtOp1;
GenTreeLclFld* lclFld = addr->AsOp()->gtOp1->AsLclFld();
fieldSeqUpdate = GetFieldSeqStore()->Append(lclFld->GetFieldSeq(), fieldSeqZero);
lclFld->SetFieldSeq(fieldSeqUpdate);
fieldSeqRecorded = true;
}
break;
case GT_ADD:
if (addr->AsOp()->gtOp1->OperGet() == GT_CNS_INT)
{
fieldSeqNode = addr->AsOp()->gtOp1;
fieldSeqUpdate = GetFieldSeqStore()->Append(addr->AsOp()->gtOp1->AsIntCon()->gtFieldSeq, fieldSeqZero);
addr->AsOp()->gtOp1->AsIntCon()->gtFieldSeq = fieldSeqUpdate;
fieldSeqRecorded = true;
}
else if (addr->AsOp()->gtOp2->OperGet() == GT_CNS_INT)
{
fieldSeqNode = addr->AsOp()->gtOp2;
fieldSeqUpdate = GetFieldSeqStore()->Append(addr->AsOp()->gtOp2->AsIntCon()->gtFieldSeq, fieldSeqZero);
addr->AsOp()->gtOp2->AsIntCon()->gtFieldSeq = fieldSeqUpdate;
fieldSeqRecorded = true;
}
break;
default:
break;
}
if (fieldSeqRecorded == false)
{
// Record in the general zero-offset map.
// The "addr" node might already be annotated with a zero-offset field sequence.
FieldSeqNode* existingFieldSeq = nullptr;
if (GetZeroOffsetFieldMap()->Lookup(addr, &existingFieldSeq))
{
// Append the zero field sequences
fieldSeqUpdate = GetFieldSeqStore()->Append(existingFieldSeq, fieldSeqZero);
}
// Overwrite the field sequence annotation for op1
GetZeroOffsetFieldMap()->Set(addr, fieldSeqUpdate, NodeToFieldSeqMap::Overwrite);
fieldSeqRecorded = true;
}
#ifdef DEBUG
if (verbose)
{
printf(" (After)\n");
gtDispNode(fieldSeqNode, nullptr, nullptr, false);
gtDispCommonEndLine(fieldSeqNode);
}
#endif // DEBUG
}
#ifdef FEATURE_SIMD
//-----------------------------------------------------------------------------------
// fgMorphCombineSIMDFieldAssignments:
// If the RHS of the input stmt is a read for simd vector X Field, then this function
// will keep reading next few stmts based on the vector size(2, 3, 4).
// If the next stmts LHS are located contiguous and RHS are also located
// contiguous, then we replace those statements with a copyblk.
//
// Argument:
// block - BasicBlock*. block which stmt belongs to
// stmt - Statement*. the stmt node we want to check
//
// return value:
// if this funciton successfully optimized the stmts, then return true. Otherwise
// return false;
bool Compiler::fgMorphCombineSIMDFieldAssignments(BasicBlock* block, Statement* stmt)
{
GenTree* tree = stmt->GetRootNode();
assert(tree->OperGet() == GT_ASG);
GenTree* originalLHS = tree->AsOp()->gtOp1;
GenTree* prevLHS = tree->AsOp()->gtOp1;
GenTree* prevRHS = tree->AsOp()->gtOp2;
unsigned index = 0;
CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF;
unsigned simdSize = 0;
GenTree* simdStructNode = getSIMDStructFromField(prevRHS, &simdBaseJitType, &index, &simdSize, true);
if (simdStructNode == nullptr || index != 0 || simdBaseJitType != CORINFO_TYPE_FLOAT)
{
// if the RHS is not from a SIMD vector field X, then there is no need to check further.
return false;
}
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
var_types simdType = getSIMDTypeForSize(simdSize);
int assignmentsCount = simdSize / genTypeSize(simdBaseType) - 1;
int remainingAssignments = assignmentsCount;
Statement* curStmt = stmt->GetNextStmt();
Statement* lastStmt = stmt;
while (curStmt != nullptr && remainingAssignments > 0)
{
GenTree* exp = curStmt->GetRootNode();
if (exp->OperGet() != GT_ASG)
{
break;
}
GenTree* curLHS = exp->gtGetOp1();
GenTree* curRHS = exp->gtGetOp2();
if (!areArgumentsContiguous(prevLHS, curLHS) || !areArgumentsContiguous(prevRHS, curRHS))
{
break;
}
remainingAssignments--;
prevLHS = curLHS;
prevRHS = curRHS;
lastStmt = curStmt;
curStmt = curStmt->GetNextStmt();
}
if (remainingAssignments > 0)
{
// if the left assignments number is bigger than zero, then this means
// that the assignments are not assgining to the contiguously memory
// locations from same vector.
return false;
}
#ifdef DEBUG
if (verbose)
{
printf("\nFound contiguous assignments from a SIMD vector to memory.\n");
printf("From " FMT_BB ", stmt ", block->bbNum);
printStmtID(stmt);
printf(" to stmt");
printStmtID(lastStmt);
printf("\n");
}
#endif
for (int i = 0; i < assignmentsCount; i++)
{
fgRemoveStmt(block, stmt->GetNextStmt());
}
GenTree* dstNode;
if (originalLHS->OperIs(GT_LCL_FLD))
{
dstNode = originalLHS;
dstNode->gtType = simdType;
dstNode->AsLclFld()->SetFieldSeq(FieldSeqStore::NotAField());
// This may have changed a partial local field into full local field
if (dstNode->IsPartialLclFld(this))
{
dstNode->gtFlags |= GTF_VAR_USEASG;
}
else
{
dstNode->gtFlags &= ~GTF_VAR_USEASG;
}
}
else
{
GenTree* copyBlkDst = createAddressNodeForSIMDInit(originalLHS, simdSize);
if (simdStructNode->OperIsLocal())
{
setLclRelatedToSIMDIntrinsic(simdStructNode);
}
GenTree* copyBlkAddr = copyBlkDst;
if (copyBlkAddr->gtOper == GT_LEA)
{
copyBlkAddr = copyBlkAddr->AsAddrMode()->Base();
}
GenTreeLclVarCommon* localDst = copyBlkAddr->IsLocalAddrExpr();
if (localDst != nullptr)
{
setLclRelatedToSIMDIntrinsic(localDst);
}
if (simdStructNode->TypeGet() == TYP_BYREF)
{
assert(simdStructNode->OperIsLocal());
assert(lvaIsImplicitByRefLocal(simdStructNode->AsLclVarCommon()->GetLclNum()));
simdStructNode = gtNewIndir(simdType, simdStructNode);
}
else
{
assert(varTypeIsSIMD(simdStructNode));
}
dstNode = gtNewOperNode(GT_IND, simdType, copyBlkDst);
}
#ifdef DEBUG
if (verbose)
{
printf("\n" FMT_BB " stmt ", block->bbNum);
printStmtID(stmt);
printf("(before)\n");
gtDispStmt(stmt);
}
#endif
assert(!simdStructNode->CanCSE());
simdStructNode->ClearDoNotCSE();
tree = gtNewAssignNode(dstNode, simdStructNode);
stmt->SetRootNode(tree);
// Since we generated a new address node which didn't exist before,
// we should expose this address manually here.
// TODO-ADDR: Remove this when LocalAddressVisitor transforms all
// local field access into LCL_FLDs, at that point we would be
// combining 2 existing LCL_FLDs or 2 FIELDs that do not reference
// a local and thus cannot result in a new address exposed local.
fgMarkAddressExposedLocals(stmt);
#ifdef DEBUG
if (verbose)
{
printf("\nReplaced " FMT_BB " stmt", block->bbNum);
printStmtID(stmt);
printf("(after)\n");
gtDispStmt(stmt);
}
#endif
return true;
}
#endif // FEATURE_SIMD
//------------------------------------------------------------------------
// fgCheckStmtAfterTailCall: check that statements after the tail call stmt
// candidate are in one of expected forms, that are desctibed below.
//
// Return Value:
// 'true' if stmts are in the expected form, else 'false'.
//
bool Compiler::fgCheckStmtAfterTailCall()
{
// For void calls, we would have created a GT_CALL in the stmt list.
// For non-void calls, we would have created a GT_RETURN(GT_CAST(GT_CALL)).
// For calls returning structs, we would have a void call, followed by a void return.
// For debuggable code, it would be an assignment of the call to a temp
// We want to get rid of any of this extra trees, and just leave
// the call.
Statement* callStmt = fgMorphStmt;
Statement* nextMorphStmt = callStmt->GetNextStmt();
// Check that the rest stmts in the block are in one of the following pattern:
// 1) ret(void)
// 2) ret(cast*(callResultLclVar))
// 3) lclVar = callResultLclVar, the actual ret(lclVar) in another block
// 4) nop
if (nextMorphStmt != nullptr)
{
GenTree* callExpr = callStmt->GetRootNode();
if (callExpr->gtOper != GT_ASG)
{
// The next stmt can be GT_RETURN(TYP_VOID) or GT_RETURN(lclVar),
// where lclVar was return buffer in the call for structs or simd.
Statement* retStmt = nextMorphStmt;
GenTree* retExpr = retStmt->GetRootNode();
noway_assert(retExpr->gtOper == GT_RETURN);
nextMorphStmt = retStmt->GetNextStmt();
}
else
{
noway_assert(callExpr->gtGetOp1()->OperIsLocal());
unsigned callResultLclNumber = callExpr->gtGetOp1()->AsLclVarCommon()->GetLclNum();
#if FEATURE_TAILCALL_OPT_SHARED_RETURN
// We can have a chain of assignments from the call result to
// various inline return spill temps. These are ok as long
// as the last one ultimately provides the return value or is ignored.
//
// And if we're returning a small type we may see a cast
// on the source side.
while ((nextMorphStmt != nullptr) && (nextMorphStmt->GetRootNode()->OperIs(GT_ASG, GT_NOP)))
{
if (nextMorphStmt->GetRootNode()->OperIs(GT_NOP))
{
nextMorphStmt = nextMorphStmt->GetNextStmt();
continue;
}
Statement* moveStmt = nextMorphStmt;
GenTree* moveExpr = nextMorphStmt->GetRootNode();
GenTree* moveDest = moveExpr->gtGetOp1();
noway_assert(moveDest->OperIsLocal());
// Tunnel through any casts on the source side.
GenTree* moveSource = moveExpr->gtGetOp2();
while (moveSource->OperIs(GT_CAST))
{
noway_assert(!moveSource->gtOverflow());
moveSource = moveSource->gtGetOp1();
}
noway_assert(moveSource->OperIsLocal());
// Verify we're just passing the value from one local to another
// along the chain.
const unsigned srcLclNum = moveSource->AsLclVarCommon()->GetLclNum();
noway_assert(srcLclNum == callResultLclNumber);
const unsigned dstLclNum = moveDest->AsLclVarCommon()->GetLclNum();
callResultLclNumber = dstLclNum;
nextMorphStmt = moveStmt->GetNextStmt();
}
if (nextMorphStmt != nullptr)
#endif
{
Statement* retStmt = nextMorphStmt;
GenTree* retExpr = nextMorphStmt->GetRootNode();
noway_assert(retExpr->gtOper == GT_RETURN);
GenTree* treeWithLcl = retExpr->gtGetOp1();
while (treeWithLcl->gtOper == GT_CAST)
{
noway_assert(!treeWithLcl->gtOverflow());
treeWithLcl = treeWithLcl->gtGetOp1();
}
noway_assert(callResultLclNumber == treeWithLcl->AsLclVarCommon()->GetLclNum());
nextMorphStmt = retStmt->GetNextStmt();
}
}
}
return nextMorphStmt == nullptr;
}
//------------------------------------------------------------------------
// fgCanTailCallViaJitHelper: check whether we can use the faster tailcall
// JIT helper on x86.
//
// Return Value:
// 'true' if we can; or 'false' if we should use the generic tailcall mechanism.
//
bool Compiler::fgCanTailCallViaJitHelper()
{
#if !defined(TARGET_X86) || defined(UNIX_X86_ABI) || defined(FEATURE_READYTORUN)
// On anything except windows X86 we have no faster mechanism available.
return false;
#else
// The JIT helper does not properly handle the case where localloc was used.
if (compLocallocUsed)
return false;
return true;
#endif
}
//------------------------------------------------------------------------
// fgMorphReduceAddOps: reduce successive variable adds into a single multiply,
// e.g., i + i + i + i => i * 4.
//
// Arguments:
// tree - tree for reduction
//
// Return Value:
// reduced tree if pattern matches, original tree otherwise
//
GenTree* Compiler::fgMorphReduceAddOps(GenTree* tree)
{
// ADD(_, V0) starts the pattern match.
if (!tree->OperIs(GT_ADD) || tree->gtOverflow())
{
return tree;
}
#ifndef TARGET_64BIT
// Transforming 64-bit ADD to 64-bit MUL on 32-bit system results in replacing
// ADD ops with a helper function call. Don't apply optimization in that case.
if (tree->TypeGet() == TYP_LONG)
{
return tree;
}
#endif
GenTree* lclVarTree = tree->AsOp()->gtOp2;
GenTree* consTree = tree->AsOp()->gtOp1;
GenTree* op1 = consTree;
GenTree* op2 = lclVarTree;
if (!op2->OperIs(GT_LCL_VAR) || !varTypeIsIntegral(op2))
{
return tree;
}
int foldCount = 0;
unsigned lclNum = op2->AsLclVarCommon()->GetLclNum();
// Search for pattern of shape ADD(ADD(ADD(lclNum, lclNum), lclNum), lclNum).
while (true)
{
// ADD(lclNum, lclNum), end of tree
if (op1->OperIs(GT_LCL_VAR) && op1->AsLclVarCommon()->GetLclNum() == lclNum && op2->OperIs(GT_LCL_VAR) &&
op2->AsLclVarCommon()->GetLclNum() == lclNum)
{
foldCount += 2;
break;
}
// ADD(ADD(X, Y), lclNum), keep descending
else if (op1->OperIs(GT_ADD) && !op1->gtOverflow() && op2->OperIs(GT_LCL_VAR) &&
op2->AsLclVarCommon()->GetLclNum() == lclNum)
{
foldCount++;
op2 = op1->AsOp()->gtOp2;
op1 = op1->AsOp()->gtOp1;
}
// Any other case is a pattern we won't attempt to fold for now.
else
{
return tree;
}
}
// V0 + V0 ... + V0 becomes V0 * foldCount, where postorder transform will optimize
// accordingly
consTree->BashToConst(foldCount, tree->TypeGet());
GenTree* morphed = gtNewOperNode(GT_MUL, tree->TypeGet(), lclVarTree, consTree);
DEBUG_DESTROY_NODE(tree);
return morphed;
}
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XX
XX Morph XX
XX XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
*/
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
#endif
#include "allocacheck.h" // for alloca
// Convert the given node into a call to the specified helper passing
// the given argument list.
//
// Tries to fold constants and also adds an edge for overflow exception
// returns the morphed tree
GenTree* Compiler::fgMorphCastIntoHelper(GenTree* tree, int helper, GenTree* oper)
{
GenTree* result;
/* If the operand is a constant, we'll try to fold it */
if (oper->OperIsConst())
{
GenTree* oldTree = tree;
tree = gtFoldExprConst(tree); // This may not fold the constant (NaN ...)
if (tree != oldTree)
{
return fgMorphTree(tree);
}
else if (tree->OperIsConst())
{
return fgMorphConst(tree);
}
// assert that oper is unchanged and that it is still a GT_CAST node
noway_assert(tree->AsCast()->CastOp() == oper);
noway_assert(tree->gtOper == GT_CAST);
}
result = fgMorphIntoHelperCall(tree, helper, gtNewCallArgs(oper));
assert(result == tree);
return result;
}
/*****************************************************************************
*
* Convert the given node into a call to the specified helper passing
* the given argument list.
*/
GenTree* Compiler::fgMorphIntoHelperCall(GenTree* tree, int helper, GenTreeCall::Use* args, bool morphArgs)
{
// The helper call ought to be semantically equivalent to the original node, so preserve its VN.
tree->ChangeOper(GT_CALL, GenTree::PRESERVE_VN);
GenTreeCall* call = tree->AsCall();
call->gtCallType = CT_HELPER;
call->gtReturnType = tree->TypeGet();
call->gtCallMethHnd = eeFindHelper(helper);
call->gtCallThisArg = nullptr;
call->gtCallArgs = args;
call->gtCallLateArgs = nullptr;
call->fgArgInfo = nullptr;
call->gtRetClsHnd = nullptr;
call->gtCallMoreFlags = GTF_CALL_M_EMPTY;
call->gtInlineCandidateInfo = nullptr;
call->gtControlExpr = nullptr;
call->gtRetBufArg = nullptr;
#ifdef UNIX_X86_ABI
call->gtFlags |= GTF_CALL_POP_ARGS;
#endif // UNIX_X86_ABI
#if DEBUG
// Helper calls are never candidates.
call->gtInlineObservation = InlineObservation::CALLSITE_IS_CALL_TO_HELPER;
call->callSig = nullptr;
#endif // DEBUG
#ifdef FEATURE_READYTORUN
call->gtEntryPoint.addr = nullptr;
call->gtEntryPoint.accessType = IAT_VALUE;
#endif
#if FEATURE_MULTIREG_RET
call->ResetReturnType();
call->ClearOtherRegs();
call->ClearOtherRegFlags();
#ifndef TARGET_64BIT
if (varTypeIsLong(tree))
{
call->InitializeLongReturnType();
}
#endif // !TARGET_64BIT
#endif // FEATURE_MULTIREG_RET
if (tree->OperMayThrow(this))
{
tree->gtFlags |= GTF_EXCEPT;
}
else
{
tree->gtFlags &= ~GTF_EXCEPT;
}
tree->gtFlags |= GTF_CALL;
for (GenTreeCall::Use& use : GenTreeCall::UseList(args))
{
tree->gtFlags |= (use.GetNode()->gtFlags & GTF_ALL_EFFECT);
}
/* Perform the morphing */
if (morphArgs)
{
tree = fgMorphArgs(call);
}
return tree;
}
//------------------------------------------------------------------------
// fgMorphExpandCast: Performs the pre-order (required) morphing for a cast.
//
// Performs a rich variety of pre-order transformations (and some optimizations).
//
// Notably:
// 1. Splits long -> small type casts into long -> int -> small type
// for 32 bit targets. Does the same for float/double -> small type
// casts for all targets.
// 2. Morphs casts not supported by the target directly into helpers.
// These mostly have to do with casts from and to floating point
// types, especially checked ones. Refer to the implementation for
// what specific casts need to be handled - it is a complex matrix.
// 3. "Casts away" the GC-ness of a tree (for CAST(nint <- byref)) via
// assigning the GC tree to an inline - COMMA(ASG, LCL_VAR) - non-GC
// temporary.
// 3. "Pushes down" truncating long -> int casts for some operations:
// CAST(int <- MUL(long, long)) => MUL(CAST(int <- long), CAST(int <- long)).
// The purpose of this is to allow "optNarrowTree" in the post-order
// traversal to fold the tree into a TYP_INT one, which helps 32 bit
// targets (and AMD64 too since 32 bit instructions are more compact).
// TODO-Arm64-CQ: Re-evaluate the value of this optimization for ARM64.
//
// Arguments:
// tree - the cast tree to morph
//
// Return Value:
// The fully morphed tree, or "nullptr" if it needs further morphing,
// in which case the cast may be transformed into an unchecked one
// and its operand changed (the cast "expanded" into two).
//
GenTree* Compiler::fgMorphExpandCast(GenTreeCast* tree)
{
GenTree* oper = tree->CastOp();
if (fgGlobalMorph && (oper->gtOper == GT_ADDR))
{
// Make sure we've checked if 'oper' is an address of an implicit-byref parameter.
// If it is, fgMorphImplicitByRefArgs will change its type, and we want the cast
// morphing code to see that type.
fgMorphImplicitByRefArgs(oper);
}
var_types srcType = genActualType(oper);
var_types dstType = tree->CastToType();
unsigned dstSize = genTypeSize(dstType);
// See if the cast has to be done in two steps. R -> I
if (varTypeIsFloating(srcType) && varTypeIsIntegral(dstType))
{
if (srcType == TYP_FLOAT
#if defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)
// Arm64: src = float, dst is overflow conversion.
// This goes through helper and hence src needs to be converted to double.
&& tree->gtOverflow()
#elif defined(TARGET_AMD64)
// Amd64: src = float, dst = uint64 or overflow conversion.
// This goes through helper and hence src needs to be converted to double.
&& (tree->gtOverflow() || (dstType == TYP_ULONG))
#elif defined(TARGET_ARM)
// Arm: src = float, dst = int64/uint64 or overflow conversion.
&& (tree->gtOverflow() || varTypeIsLong(dstType))
#else
// x86: src = float, dst = uint32/int64/uint64 or overflow conversion.
&& (tree->gtOverflow() || varTypeIsLong(dstType) || (dstType == TYP_UINT))
#endif
)
{
oper = gtNewCastNode(TYP_DOUBLE, oper, false, TYP_DOUBLE);
}
// Do we need to do it in two steps R -> I -> smallType?
if (dstSize < genTypeSize(TYP_INT))
{
oper = gtNewCastNodeL(TYP_INT, oper, /* fromUnsigned */ false, TYP_INT);
oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT));
tree->AsCast()->CastOp() = oper;
// We must not mistreat the original cast, which was from a floating point type,
// as from an unsigned type, since we now have a TYP_INT node for the source and
// CAST_OVF(BYTE <- INT) != CAST_OVF(BYTE <- UINT).
assert(!tree->IsUnsigned());
}
else
{
if (!tree->gtOverflow())
{
// ARM64 and LoongArch64 optimize all non-overflow checking conversions
#if defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)
return nullptr;
#else
switch (dstType)
{
case TYP_INT:
return nullptr;
case TYP_UINT:
#if defined(TARGET_ARM) || defined(TARGET_AMD64)
return nullptr;
#else // TARGET_X86
return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2UINT, oper);
#endif // TARGET_X86
case TYP_LONG:
#ifdef TARGET_AMD64
// SSE2 has instructions to convert a float/double directly to a long
return nullptr;
#else // !TARGET_AMD64
return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2LNG, oper);
#endif // !TARGET_AMD64
case TYP_ULONG:
return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2ULNG, oper);
default:
unreached();
}
#endif // TARGET_ARM64 || TARGET_LOONGARCH64
}
else
{
switch (dstType)
{
case TYP_INT:
return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2INT_OVF, oper);
case TYP_UINT:
return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2UINT_OVF, oper);
case TYP_LONG:
return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2LNG_OVF, oper);
case TYP_ULONG:
return fgMorphCastIntoHelper(tree, CORINFO_HELP_DBL2ULNG_OVF, oper);
default:
unreached();
}
}
}
}
#ifndef TARGET_64BIT
// The code generation phase (for x86 & ARM32) does not handle casts
// directly from [u]long to anything other than [u]int. Insert an
// intermediate cast to native int.
else if (varTypeIsLong(srcType) && varTypeIsSmall(dstType))
{
oper = gtNewCastNode(TYP_I_IMPL, oper, tree->IsUnsigned(), TYP_I_IMPL);
oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT));
tree->ClearUnsigned();
tree->AsCast()->CastOp() = oper;
}
#endif //! TARGET_64BIT
#ifdef TARGET_ARMARCH
// AArch, unlike x86/amd64, has instructions that can cast directly from
// all integers (except for longs on AArch32 of course) to floats.
// Because there is no IL instruction conv.r4.un, uint/ulong -> float
// casts are always imported as CAST(float <- CAST(double <- uint/ulong)).
// We can eliminate the redundant intermediate cast as an optimization.
else if ((dstType == TYP_FLOAT) && (srcType == TYP_DOUBLE) && oper->OperIs(GT_CAST)
#ifdef TARGET_ARM
&& !varTypeIsLong(oper->AsCast()->CastOp())
#endif
)
{
oper->gtType = TYP_FLOAT;
oper->CastToType() = TYP_FLOAT;
return fgMorphTree(oper);
}
#endif // TARGET_ARMARCH
#ifdef TARGET_ARM
// converts long/ulong --> float/double casts into helper calls.
else if (varTypeIsFloating(dstType) && varTypeIsLong(srcType))
{
if (dstType == TYP_FLOAT)
{
// there is only a double helper, so we
// - change the dsttype to double
// - insert a cast from double to float
// - recurse into the resulting tree
tree->CastToType() = TYP_DOUBLE;
tree->gtType = TYP_DOUBLE;
tree = gtNewCastNode(TYP_FLOAT, tree, false, TYP_FLOAT);
return fgMorphTree(tree);
}
if (tree->gtFlags & GTF_UNSIGNED)
return fgMorphCastIntoHelper(tree, CORINFO_HELP_ULNG2DBL, oper);
return fgMorphCastIntoHelper(tree, CORINFO_HELP_LNG2DBL, oper);
}
#endif // TARGET_ARM
#ifdef TARGET_AMD64
// Do we have to do two step U4/8 -> R4/8 ?
// Codegen supports the following conversion as one-step operation
// a) Long -> R4/R8
// b) U8 -> R8
//
// The following conversions are performed as two-step operations using above.
// U4 -> R4/8 = U4-> Long -> R4/8
// U8 -> R4 = U8 -> R8 -> R4
else if (tree->IsUnsigned() && varTypeIsFloating(dstType))
{
srcType = varTypeToUnsigned(srcType);
if (srcType == TYP_ULONG)
{
if (dstType == TYP_FLOAT)
{
// Codegen can handle U8 -> R8 conversion.
// U8 -> R4 = U8 -> R8 -> R4
// - change the dsttype to double
// - insert a cast from double to float
// - recurse into the resulting tree
tree->CastToType() = TYP_DOUBLE;
tree->gtType = TYP_DOUBLE;
tree = gtNewCastNode(TYP_FLOAT, tree, false, TYP_FLOAT);
return fgMorphTree(tree);
}
}
else if (srcType == TYP_UINT)
{
oper = gtNewCastNode(TYP_LONG, oper, true, TYP_LONG);
oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT));
tree->ClearUnsigned();
tree->CastOp() = oper;
}
}
#endif // TARGET_AMD64
#ifdef TARGET_X86
// Do we have to do two step U4/8 -> R4/8 ?
else if (tree->IsUnsigned() && varTypeIsFloating(dstType))
{
srcType = varTypeToUnsigned(srcType);
if (srcType == TYP_ULONG)
{
return fgMorphCastIntoHelper(tree, CORINFO_HELP_ULNG2DBL, oper);
}
else if (srcType == TYP_UINT)
{
oper = gtNewCastNode(TYP_LONG, oper, true, TYP_LONG);
oper->gtFlags |= (tree->gtFlags & (GTF_OVERFLOW | GTF_EXCEPT));
tree->gtFlags &= ~GTF_UNSIGNED;
return fgMorphCastIntoHelper(tree, CORINFO_HELP_LNG2DBL, oper);
}
}
else if (((tree->gtFlags & GTF_UNSIGNED) == 0) && (srcType == TYP_LONG) && varTypeIsFloating(dstType))
{
oper = fgMorphCastIntoHelper(tree, CORINFO_HELP_LNG2DBL, oper);
// Since we don't have a Jit Helper that converts to a TYP_FLOAT
// we just use the one that converts to a TYP_DOUBLE
// and then add a cast to TYP_FLOAT
//
if ((dstType == TYP_FLOAT) && (oper->OperGet() == GT_CALL))
{
// Fix the return type to be TYP_DOUBLE
//
oper->gtType = TYP_DOUBLE;
// Add a Cast to TYP_FLOAT
//
tree = gtNewCastNode(TYP_FLOAT, oper, false, TYP_FLOAT);
INDEBUG(tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
return tree;
}
else
{
return oper;
}
}
#endif // TARGET_X86
else if (varTypeIsGC(srcType) != varTypeIsGC(dstType))
{
// We are casting away GC information. we would like to just
// change the type to int, however this gives the emitter fits because
// it believes the variable is a GC variable at the beginning of the
// instruction group, but is not turned non-gc by the code generator
// we fix this by copying the GC pointer to a non-gc pointer temp.
noway_assert(!varTypeIsGC(dstType) && "How can we have a cast to a GCRef here?");
// We generate an assignment to an int and then do the cast from an int. With this we avoid
// the gc problem and we allow casts to bytes, longs, etc...
unsigned lclNum = lvaGrabTemp(true DEBUGARG("Cast away GC"));
oper->gtType = TYP_I_IMPL;
GenTree* asg = gtNewTempAssign(lclNum, oper);
oper->gtType = srcType;
// do the real cast
GenTree* cast = gtNewCastNode(tree->TypeGet(), gtNewLclvNode(lclNum, TYP_I_IMPL), false, dstType);
// Generate the comma tree
oper = gtNewOperNode(GT_COMMA, tree->TypeGet(), asg, cast);
return fgMorphTree(oper);
}
// Look for narrowing casts ([u]long -> [u]int) and try to push them
// down into the operand before morphing it.
//
// It doesn't matter if this is cast is from ulong or long (i.e. if
// GTF_UNSIGNED is set) because the transformation is only applied to
// overflow-insensitive narrowing casts, which always silently truncate.
//
// Note that casts from [u]long to small integer types are handled above.
if ((srcType == TYP_LONG) && ((dstType == TYP_INT) || (dstType == TYP_UINT)))
{
// As a special case, look for overflow-sensitive casts of an AND
// expression, and see if the second operand is a small constant. Since
// the result of an AND is bound by its smaller operand, it may be
// possible to prove that the cast won't overflow, which will in turn
// allow the cast's operand to be transformed.
if (tree->gtOverflow() && (oper->OperGet() == GT_AND))
{
GenTree* andOp2 = oper->AsOp()->gtOp2;
// Look for a constant less than 2^{32} for a cast to uint, or less
// than 2^{31} for a cast to int.
int maxWidth = (dstType == TYP_UINT) ? 32 : 31;
if ((andOp2->OperGet() == GT_CNS_NATIVELONG) && ((andOp2->AsIntConCommon()->LngValue() >> maxWidth) == 0))
{
tree->ClearOverflow();
tree->SetAllEffectsFlags(oper);
}
}
// Only apply this transformation during global morph,
// when neither the cast node nor the oper node may throw an exception
// based on the upper 32 bits.
//
if (fgGlobalMorph && !tree->gtOverflow() && !oper->gtOverflowEx())
{
// For these operations the lower 32 bits of the result only depends
// upon the lower 32 bits of the operands.
//
bool canPushCast = oper->OperIs(GT_ADD, GT_SUB, GT_MUL, GT_AND, GT_OR, GT_XOR, GT_NOT, GT_NEG);
// For long LSH cast to int, there is a discontinuity in behavior
// when the shift amount is 32 or larger.
//
// CAST(INT, LSH(1LL, 31)) == LSH(1, 31)
// LSH(CAST(INT, 1LL), CAST(INT, 31)) == LSH(1, 31)
//
// CAST(INT, LSH(1LL, 32)) == 0
// LSH(CAST(INT, 1LL), CAST(INT, 32)) == LSH(1, 32) == LSH(1, 0) == 1
//
// So some extra validation is needed.
//
if (oper->OperIs(GT_LSH))
{
GenTree* shiftAmount = oper->AsOp()->gtOp2;
// Expose constant value for shift, if possible, to maximize the number
// of cases we can handle.
shiftAmount = gtFoldExpr(shiftAmount);
oper->AsOp()->gtOp2 = shiftAmount;
#if DEBUG
// We may remorph the shift amount tree again later, so clear any morphed flag.
shiftAmount->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED;
#endif // DEBUG
if (shiftAmount->IsIntegralConst())
{
const ssize_t shiftAmountValue = shiftAmount->AsIntCon()->IconValue();
if ((shiftAmountValue >= 64) || (shiftAmountValue < 0))
{
// Shift amount is large enough or negative so result is undefined.
// Don't try to optimize.
assert(!canPushCast);
}
else if (shiftAmountValue >= 32)
{
// We know that we have a narrowing cast ([u]long -> [u]int)
// and that we are casting to a 32-bit value, which will result in zero.
//
// Check to see if we have any side-effects that we must keep
//
if ((tree->gtFlags & GTF_ALL_EFFECT) == 0)
{
// Result of the shift is zero.
DEBUG_DESTROY_NODE(tree);
GenTree* zero = gtNewZeroConNode(TYP_INT);
return fgMorphTree(zero);
}
else // We do have a side-effect
{
// We could create a GT_COMMA node here to keep the side-effect and return a zero
// Instead we just don't try to optimize this case.
canPushCast = false;
}
}
else
{
// Shift amount is positive and small enough that we can push the cast through.
canPushCast = true;
}
}
else
{
// Shift amount is unknown. We can't optimize this case.
assert(!canPushCast);
}
}
if (canPushCast)
{
DEBUG_DESTROY_NODE(tree);
// Insert narrowing casts for op1 and op2.
oper->AsOp()->gtOp1 = gtNewCastNode(TYP_INT, oper->AsOp()->gtOp1, false, dstType);
if (oper->AsOp()->gtOp2 != nullptr)
{
oper->AsOp()->gtOp2 = gtNewCastNode(TYP_INT, oper->AsOp()->gtOp2, false, dstType);
}
// Clear the GT_MUL_64RSLT if it is set.
if (oper->gtOper == GT_MUL && (oper->gtFlags & GTF_MUL_64RSLT))
{
oper->gtFlags &= ~GTF_MUL_64RSLT;
}
// The operation now produces a 32-bit result.
oper->gtType = TYP_INT;
// Remorph the new tree as the casts that we added may be folded away.
return fgMorphTree(oper);
}
}
}
return nullptr;
}
#ifdef DEBUG
const char* getNonStandardArgKindName(NonStandardArgKind kind)
{
switch (kind)
{
case NonStandardArgKind::None:
return "None";
case NonStandardArgKind::PInvokeFrame:
return "PInvokeFrame";
case NonStandardArgKind::PInvokeTarget:
return "PInvokeTarget";
case NonStandardArgKind::PInvokeCookie:
return "PInvokeCookie";
case NonStandardArgKind::WrapperDelegateCell:
return "WrapperDelegateCell";
case NonStandardArgKind::ShiftLow:
return "ShiftLow";
case NonStandardArgKind::ShiftHigh:
return "ShiftHigh";
case NonStandardArgKind::FixedRetBuffer:
return "FixedRetBuffer";
case NonStandardArgKind::VirtualStubCell:
return "VirtualStubCell";
case NonStandardArgKind::R2RIndirectionCell:
return "R2RIndirectionCell";
case NonStandardArgKind::ValidateIndirectCallTarget:
return "ValidateIndirectCallTarget";
default:
unreached();
}
}
void fgArgTabEntry::Dump() const
{
printf("fgArgTabEntry[arg %u", argNum);
printf(" %d.%s", GetNode()->gtTreeID, GenTree::OpName(GetNode()->OperGet()));
printf(" %s", varTypeName(argType));
printf(" (%s)", passedByRef ? "By ref" : "By value");
if (GetRegNum() != REG_STK)
{
printf(", %u reg%s:", numRegs, numRegs == 1 ? "" : "s");
for (unsigned i = 0; i < numRegs; i++)
{
printf(" %s", getRegName(regNums[i]));
}
}
if (GetStackByteSize() > 0)
{
#if defined(DEBUG_ARG_SLOTS)
printf(", numSlots=%u, slotNum=%u, byteSize=%u, byteOffset=%u", numSlots, slotNum, m_byteSize, m_byteOffset);
#else
printf(", byteSize=%u, byteOffset=%u", m_byteSize, m_byteOffset);
#endif
}
printf(", byteAlignment=%u", m_byteAlignment);
if (isLateArg())
{
printf(", lateArgInx=%u", GetLateArgInx());
}
if (IsSplit())
{
printf(", isSplit");
}
if (needTmp)
{
printf(", tmpNum=V%02u", tmpNum);
}
if (needPlace)
{
printf(", needPlace");
}
if (isTmp)
{
printf(", isTmp");
}
if (processed)
{
printf(", processed");
}
if (IsHfaRegArg())
{
printf(", isHfa(%s)", varTypeName(GetHfaType()));
}
if (isBackFilled)
{
printf(", isBackFilled");
}
if (nonStandardArgKind != NonStandardArgKind::None)
{
printf(", nonStandard[%s]", getNonStandardArgKindName(nonStandardArgKind));
}
if (isStruct)
{
printf(", isStruct");
}
printf("]\n");
}
#endif
fgArgInfo::fgArgInfo(Compiler* comp, GenTreeCall* call, unsigned numArgs)
{
compiler = comp;
callTree = call;
argCount = 0; // filled in arg count, starts at zero
DEBUG_ARG_SLOTS_ONLY(nextSlotNum = INIT_ARG_STACK_SLOT;)
nextStackByteOffset = INIT_ARG_STACK_SLOT * TARGET_POINTER_SIZE;
stkLevel = 0;
#if defined(UNIX_X86_ABI)
alignmentDone = false;
stkSizeBytes = 0;
padStkAlign = 0;
#endif
#if FEATURE_FIXED_OUT_ARGS
outArgSize = 0;
#endif
argTableSize = numArgs; // the allocated table size
hasRegArgs = false;
hasStackArgs = false;
argsComplete = false;
argsSorted = false;
needsTemps = false;
if (argTableSize == 0)
{
argTable = nullptr;
}
else
{
argTable = new (compiler, CMK_fgArgInfoPtrArr) fgArgTabEntry*[argTableSize];
}
}
/*****************************************************************************
*
* fgArgInfo Copy Constructor
*
* This method needs to act like a copy constructor for fgArgInfo.
* The newCall needs to have its fgArgInfo initialized such that
* we have newCall that is an exact copy of the oldCall.
* We have to take care since the argument information
* in the argTable contains pointers that must point to the
* new arguments and not the old arguments.
*/
fgArgInfo::fgArgInfo(GenTreeCall* newCall, GenTreeCall* oldCall)
{
fgArgInfo* oldArgInfo = oldCall->AsCall()->fgArgInfo;
compiler = oldArgInfo->compiler;
callTree = newCall;
argCount = 0; // filled in arg count, starts at zero
DEBUG_ARG_SLOTS_ONLY(nextSlotNum = INIT_ARG_STACK_SLOT;)
nextStackByteOffset = INIT_ARG_STACK_SLOT * TARGET_POINTER_SIZE;
stkLevel = oldArgInfo->stkLevel;
#if defined(UNIX_X86_ABI)
alignmentDone = oldArgInfo->alignmentDone;
stkSizeBytes = oldArgInfo->stkSizeBytes;
padStkAlign = oldArgInfo->padStkAlign;
#endif
#if FEATURE_FIXED_OUT_ARGS
outArgSize = oldArgInfo->outArgSize;
#endif
argTableSize = oldArgInfo->argTableSize;
argsComplete = false;
argTable = nullptr;
assert(oldArgInfo->argsComplete);
if (argTableSize > 0)
{
argTable = new (compiler, CMK_fgArgInfoPtrArr) fgArgTabEntry*[argTableSize];
// Copy the old arg entries
for (unsigned i = 0; i < argTableSize; i++)
{
argTable[i] = new (compiler, CMK_fgArgInfo) fgArgTabEntry(*oldArgInfo->argTable[i]);
}
// The copied arg entries contain pointers to old uses, they need
// to be updated to point to new uses.
if (newCall->gtCallThisArg != nullptr)
{
for (unsigned i = 0; i < argTableSize; i++)
{
if (argTable[i]->use == oldCall->gtCallThisArg)
{
argTable[i]->use = newCall->gtCallThisArg;
break;
}
}
}
GenTreeCall::UseIterator newUse = newCall->Args().begin();
GenTreeCall::UseIterator newUseEnd = newCall->Args().end();
GenTreeCall::UseIterator oldUse = oldCall->Args().begin();
GenTreeCall::UseIterator oldUseEnd = newCall->Args().end();
for (; newUse != newUseEnd; ++newUse, ++oldUse)
{
for (unsigned i = 0; i < argTableSize; i++)
{
if (argTable[i]->use == oldUse.GetUse())
{
argTable[i]->use = newUse.GetUse();
break;
}
}
}
newUse = newCall->LateArgs().begin();
newUseEnd = newCall->LateArgs().end();
oldUse = oldCall->LateArgs().begin();
oldUseEnd = newCall->LateArgs().end();
for (; newUse != newUseEnd; ++newUse, ++oldUse)
{
for (unsigned i = 0; i < argTableSize; i++)
{
if (argTable[i]->lateUse == oldUse.GetUse())
{
argTable[i]->lateUse = newUse.GetUse();
break;
}
}
}
}
argCount = oldArgInfo->argCount;
DEBUG_ARG_SLOTS_ONLY(nextSlotNum = oldArgInfo->nextSlotNum;)
nextStackByteOffset = oldArgInfo->nextStackByteOffset;
hasRegArgs = oldArgInfo->hasRegArgs;
hasStackArgs = oldArgInfo->hasStackArgs;
argsComplete = true;
argsSorted = true;
}
void fgArgInfo::AddArg(fgArgTabEntry* curArgTabEntry)
{
assert(argCount < argTableSize);
argTable[argCount] = curArgTabEntry;
argCount++;
}
fgArgTabEntry* fgArgInfo::AddRegArg(unsigned argNum,
GenTree* node,
GenTreeCall::Use* use,
regNumber regNum,
unsigned numRegs,
unsigned byteSize,
unsigned byteAlignment,
bool isStruct,
bool isFloatHfa,
bool isVararg /*=false*/)
{
fgArgTabEntry* curArgTabEntry = new (compiler, CMK_fgArgInfo) fgArgTabEntry;
// Any additional register numbers are set by the caller.
// This is primarily because on ARM we don't yet know if it
// will be split or if it is a double HFA, so the number of registers
// may actually be less.
curArgTabEntry->setRegNum(0, regNum);
curArgTabEntry->argNum = argNum;
curArgTabEntry->argType = node->TypeGet();
curArgTabEntry->use = use;
curArgTabEntry->lateUse = nullptr;
curArgTabEntry->numRegs = numRegs;
#if defined(DEBUG_ARG_SLOTS)
curArgTabEntry->slotNum = 0;
curArgTabEntry->numSlots = 0;
#endif
curArgTabEntry->SetLateArgInx(UINT_MAX);
curArgTabEntry->tmpNum = BAD_VAR_NUM;
curArgTabEntry->SetSplit(false);
curArgTabEntry->isTmp = false;
curArgTabEntry->needTmp = false;
curArgTabEntry->needPlace = false;
curArgTabEntry->processed = false;
if (GlobalJitOptions::compFeatureHfa)
{
curArgTabEntry->SetHfaElemKind(CORINFO_HFA_ELEM_NONE);
}
curArgTabEntry->isBackFilled = false;
curArgTabEntry->nonStandardArgKind = NonStandardArgKind::None;
curArgTabEntry->isStruct = isStruct;
curArgTabEntry->SetIsVararg(isVararg);
curArgTabEntry->SetByteAlignment(byteAlignment);
curArgTabEntry->SetByteSize(byteSize, isStruct, isFloatHfa);
curArgTabEntry->SetByteOffset(0);
#ifdef TARGET_LOONGARCH64
curArgTabEntry->structFloatFieldType[0] = TYP_UNDEF;
curArgTabEntry->structFloatFieldType[1] = TYP_UNDEF;
#endif
hasRegArgs = true;
if (argCount >= argTableSize)
{
fgArgTabEntry** oldTable = argTable;
argTable = new (compiler, CMK_fgArgInfoPtrArr) fgArgTabEntry*[argCount + 1];
memcpy(argTable, oldTable, argCount * sizeof(fgArgTabEntry*));
argTableSize++;
}
AddArg(curArgTabEntry);
return curArgTabEntry;
}
#if defined(UNIX_AMD64_ABI)
fgArgTabEntry* fgArgInfo::AddRegArg(unsigned argNum,
GenTree* node,
GenTreeCall::Use* use,
regNumber regNum,
unsigned numRegs,
unsigned byteSize,
unsigned byteAlignment,
const bool isStruct,
const bool isFloatHfa,
const bool isVararg,
const regNumber otherRegNum,
const unsigned structIntRegs,
const unsigned structFloatRegs,
const SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* const structDescPtr)
{
fgArgTabEntry* curArgTabEntry =
AddRegArg(argNum, node, use, regNum, numRegs, byteSize, byteAlignment, isStruct, isFloatHfa, isVararg);
assert(curArgTabEntry != nullptr);
curArgTabEntry->isStruct = isStruct; // is this a struct arg
curArgTabEntry->structIntRegs = structIntRegs;
curArgTabEntry->structFloatRegs = structFloatRegs;
INDEBUG(curArgTabEntry->checkIsStruct();)
assert(numRegs <= 2);
if (numRegs == 2)
{
curArgTabEntry->setRegNum(1, otherRegNum);
}
if (isStruct && structDescPtr != nullptr)
{
curArgTabEntry->structDesc.CopyFrom(*structDescPtr);
}
return curArgTabEntry;
}
#endif // defined(UNIX_AMD64_ABI)
#if defined(TARGET_LOONGARCH64)
fgArgTabEntry* fgArgInfo::AddRegArg(unsigned argNum,
GenTree* node,
GenTreeCall::Use* use,
regNumber regNum,
unsigned numRegs,
unsigned byteSize,
unsigned byteAlignment,
bool isStruct,
bool isFloatHfa, /* unused */
bool isVararg,
const regNumber otherRegNum)
{
fgArgTabEntry* curArgTabEntry =
AddRegArg(argNum, node, use, regNum, numRegs, byteSize, byteAlignment, isStruct, false, isVararg);
assert(curArgTabEntry != nullptr);
INDEBUG(curArgTabEntry->checkIsStruct();)
assert(numRegs <= 2);
if (numRegs == 2)
{
curArgTabEntry->setRegNum(1, otherRegNum);
}
return curArgTabEntry;
}
#endif // defined(TARGET_LOONGARCH64)
fgArgTabEntry* fgArgInfo::AddStkArg(unsigned argNum,
GenTree* node,
GenTreeCall::Use* use,
unsigned numSlots,
unsigned byteSize,
unsigned byteAlignment,
bool isStruct,
bool isFloatHfa,
bool isVararg /*=false*/)
{
fgArgTabEntry* curArgTabEntry = new (compiler, CMK_fgArgInfo) fgArgTabEntry;
#if defined(DEBUG_ARG_SLOTS)
if (!compMacOsArm64Abi())
{
nextSlotNum = roundUp(nextSlotNum, byteAlignment / TARGET_POINTER_SIZE);
}
#endif
nextStackByteOffset = roundUp(nextStackByteOffset, byteAlignment);
DEBUG_ARG_SLOTS_ASSERT(nextStackByteOffset / TARGET_POINTER_SIZE == nextSlotNum);
curArgTabEntry->setRegNum(0, REG_STK);
curArgTabEntry->argNum = argNum;
curArgTabEntry->argType = node->TypeGet();
curArgTabEntry->use = use;
curArgTabEntry->lateUse = nullptr;
#if defined(DEBUG_ARG_SLOTS)
curArgTabEntry->numSlots = numSlots;
curArgTabEntry->slotNum = nextSlotNum;
#endif
curArgTabEntry->numRegs = 0;
#if defined(UNIX_AMD64_ABI)
curArgTabEntry->structIntRegs = 0;
curArgTabEntry->structFloatRegs = 0;
#endif // defined(UNIX_AMD64_ABI)
curArgTabEntry->SetLateArgInx(UINT_MAX);
curArgTabEntry->tmpNum = BAD_VAR_NUM;
curArgTabEntry->SetSplit(false);
curArgTabEntry->isTmp = false;
curArgTabEntry->needTmp = false;
curArgTabEntry->needPlace = false;
curArgTabEntry->processed = false;
if (GlobalJitOptions::compFeatureHfa)
{
curArgTabEntry->SetHfaElemKind(CORINFO_HFA_ELEM_NONE);
}
curArgTabEntry->isBackFilled = false;
curArgTabEntry->nonStandardArgKind = NonStandardArgKind::None;
curArgTabEntry->isStruct = isStruct;
curArgTabEntry->SetIsVararg(isVararg);
curArgTabEntry->SetByteAlignment(byteAlignment);
curArgTabEntry->SetByteSize(byteSize, isStruct, isFloatHfa);
curArgTabEntry->SetByteOffset(nextStackByteOffset);
hasStackArgs = true;
AddArg(curArgTabEntry);
DEBUG_ARG_SLOTS_ONLY(nextSlotNum += numSlots;)
nextStackByteOffset += curArgTabEntry->GetByteSize();
return curArgTabEntry;
}
void fgArgInfo::RemorphReset()
{
DEBUG_ARG_SLOTS_ONLY(nextSlotNum = INIT_ARG_STACK_SLOT;)
nextStackByteOffset = INIT_ARG_STACK_SLOT * TARGET_POINTER_SIZE;
}
//------------------------------------------------------------------------
// UpdateRegArg: Update the given fgArgTabEntry while morphing.
//
// Arguments:
// curArgTabEntry - the fgArgTabEntry to update.
// node - the tree node that defines the argument
// reMorphing - a boolean value indicate whether we are remorphing the call
//
// Assumptions:
// This must have already been determined to be at least partially passed in registers.
//
void fgArgInfo::UpdateRegArg(fgArgTabEntry* curArgTabEntry, GenTree* node, bool reMorphing)
{
bool isLateArg = curArgTabEntry->isLateArg();
// If this is a late arg, we'd better be updating it with a correctly marked node, and vice-versa.
assert((isLateArg && ((node->gtFlags & GTF_LATE_ARG) != 0)) ||
(!isLateArg && ((node->gtFlags & GTF_LATE_ARG) == 0)));
assert(curArgTabEntry->numRegs != 0);
assert(curArgTabEntry->use->GetNode() == node);
}
//------------------------------------------------------------------------
// UpdateStkArg: Update the given fgArgTabEntry while morphing.
//
// Arguments:
// curArgTabEntry - the fgArgTabEntry to update.
// node - the tree node that defines the argument
// reMorphing - a boolean value indicate whether we are remorphing the call
//
// Assumptions:
// This must have already been determined to be passed on the stack.
//
void fgArgInfo::UpdateStkArg(fgArgTabEntry* curArgTabEntry, GenTree* node, bool reMorphing)
{
bool isLateArg = curArgTabEntry->isLateArg();
// If this is a late arg, we'd better be updating it with a correctly marked node, and vice-versa.
assert((isLateArg && ((node->gtFlags & GTF_LATE_ARG) != 0)) ||
(!isLateArg && ((node->gtFlags & GTF_LATE_ARG) == 0)));
noway_assert(curArgTabEntry->use != callTree->gtCallThisArg);
assert((curArgTabEntry->GetRegNum() == REG_STK) || curArgTabEntry->IsSplit());
assert(curArgTabEntry->use->GetNode() == node);
#if defined(DEBUG_ARG_SLOTS)
if (!compMacOsArm64Abi())
{
nextSlotNum = roundUp(nextSlotNum, curArgTabEntry->GetByteAlignment() / TARGET_POINTER_SIZE);
assert(curArgTabEntry->slotNum == nextSlotNum);
nextSlotNum += curArgTabEntry->numSlots;
}
#endif
nextStackByteOffset = roundUp(nextStackByteOffset, curArgTabEntry->GetByteAlignment());
assert(curArgTabEntry->GetByteOffset() == nextStackByteOffset);
nextStackByteOffset += curArgTabEntry->GetStackByteSize();
}
void fgArgInfo::SplitArg(unsigned argNum, unsigned numRegs, unsigned numSlots)
{
fgArgTabEntry* curArgTabEntry = nullptr;
assert(argNum < argCount);
for (unsigned inx = 0; inx < argCount; inx++)
{
curArgTabEntry = argTable[inx];
if (curArgTabEntry->argNum == argNum)
{
break;
}
}
assert(numRegs > 0);
assert(numSlots > 0);
if (argsComplete)
{
assert(curArgTabEntry->IsSplit() == true);
assert(curArgTabEntry->numRegs == numRegs);
DEBUG_ARG_SLOTS_ONLY(assert(curArgTabEntry->numSlots == numSlots);)
assert(hasStackArgs == true);
}
else
{
curArgTabEntry->SetSplit(true);
curArgTabEntry->numRegs = numRegs;
DEBUG_ARG_SLOTS_ONLY(curArgTabEntry->numSlots = numSlots;)
curArgTabEntry->SetByteOffset(0);
hasStackArgs = true;
}
DEBUG_ARG_SLOTS_ONLY(nextSlotNum += numSlots;)
// TODO-Cleanup: structs are aligned to 8 bytes on arm64 apple, so it would work, but pass the precise size.
nextStackByteOffset += numSlots * TARGET_POINTER_SIZE;
}
//------------------------------------------------------------------------
// EvalToTmp: Replace the node in the given fgArgTabEntry with a temp
//
// Arguments:
// curArgTabEntry - the fgArgTabEntry for the argument
// tmpNum - the varNum for the temp
// newNode - the assignment of the argument value to the temp
//
// Notes:
// Although the name of this method is EvalToTmp, it doesn't actually create
// the temp or the copy.
//
void fgArgInfo::EvalToTmp(fgArgTabEntry* curArgTabEntry, unsigned tmpNum, GenTree* newNode)
{
assert(curArgTabEntry->use != callTree->gtCallThisArg);
assert(curArgTabEntry->use->GetNode() == newNode);
assert(curArgTabEntry->GetNode() == newNode);
curArgTabEntry->tmpNum = tmpNum;
curArgTabEntry->isTmp = true;
}
void fgArgInfo::ArgsComplete()
{
bool hasStructRegArg = false;
for (unsigned curInx = 0; curInx < argCount; curInx++)
{
fgArgTabEntry* curArgTabEntry = argTable[curInx];
assert(curArgTabEntry != nullptr);
GenTree* argx = curArgTabEntry->GetNode();
if (curArgTabEntry->GetRegNum() == REG_STK)
{
assert(hasStackArgs == true);
#if !FEATURE_FIXED_OUT_ARGS
// On x86 we use push instructions to pass arguments:
// The non-register arguments are evaluated and pushed in order
// and they are never evaluated into temps
//
continue;
#endif
}
#if FEATURE_ARG_SPLIT
else if (curArgTabEntry->IsSplit())
{
hasStructRegArg = true;
assert(hasStackArgs == true);
}
#endif // FEATURE_ARG_SPLIT
else // we have a register argument, next we look for a struct type.
{
if (varTypeIsStruct(argx) UNIX_AMD64_ABI_ONLY(|| curArgTabEntry->isStruct))
{
hasStructRegArg = true;
}
}
/* If the argument tree contains an assignment (GTF_ASG) then the argument and
and every earlier argument (except constants) must be evaluated into temps
since there may be other arguments that follow and they may use the value being assigned.
EXAMPLE: ArgTab is "a, a=5, a"
-> when we see the second arg "a=5"
we know the first two arguments "a, a=5" have to be evaluated into temps
For the case of an assignment, we only know that there exist some assignment someplace
in the tree. We don't know what is being assigned so we are very conservative here
and assume that any local variable could have been assigned.
*/
if (argx->gtFlags & GTF_ASG)
{
// If this is not the only argument, or it's a copyblk, or it already evaluates the expression to
// a tmp, then we need a temp in the late arg list.
if ((argCount > 1) || argx->OperIsCopyBlkOp()
#ifdef FEATURE_FIXED_OUT_ARGS
|| curArgTabEntry->isTmp // I protect this by "FEATURE_FIXED_OUT_ARGS" to preserve the property
// that we only have late non-register args when that feature is on.
#endif // FEATURE_FIXED_OUT_ARGS
)
{
curArgTabEntry->needTmp = true;
needsTemps = true;
}
// For all previous arguments, unless they are a simple constant
// we require that they be evaluated into temps
for (unsigned prevInx = 0; prevInx < curInx; prevInx++)
{
fgArgTabEntry* prevArgTabEntry = argTable[prevInx];
assert(prevArgTabEntry->argNum < curArgTabEntry->argNum);
if (!prevArgTabEntry->GetNode()->IsInvariant())
{
prevArgTabEntry->needTmp = true;
needsTemps = true;
}
}
}
bool treatLikeCall = ((argx->gtFlags & GTF_CALL) != 0);
#if FEATURE_FIXED_OUT_ARGS
// Like calls, if this argument has a tree that will do an inline throw,
// a call to a jit helper, then we need to treat it like a call (but only
// if there are/were any stack args).
// This means unnesting, sorting, etc. Technically this is overly
// conservative, but I want to avoid as much special-case debug-only code
// as possible, so leveraging the GTF_CALL flag is the easiest.
//
if (!treatLikeCall && (argx->gtFlags & GTF_EXCEPT) && (argCount > 1) && compiler->opts.compDbgCode &&
(compiler->fgWalkTreePre(&argx, Compiler::fgChkThrowCB) == Compiler::WALK_ABORT))
{
for (unsigned otherInx = 0; otherInx < argCount; otherInx++)
{
if (otherInx == curInx)
{
continue;
}
if (argTable[otherInx]->GetRegNum() == REG_STK)
{
treatLikeCall = true;
break;
}
}
}
#endif // FEATURE_FIXED_OUT_ARGS
/* If it contains a call (GTF_CALL) then itself and everything before the call
with a GLOB_EFFECT must eval to temp (this is because everything with SIDE_EFFECT
has to be kept in the right order since we will move the call to the first position)
For calls we don't have to be quite as conservative as we are with an assignment
since the call won't be modifying any non-address taken LclVars.
*/
if (treatLikeCall)
{
if (argCount > 1) // If this is not the only argument
{
curArgTabEntry->needTmp = true;
needsTemps = true;
}
else if (varTypeIsFloating(argx->TypeGet()) && (argx->OperGet() == GT_CALL))
{
// Spill all arguments that are floating point calls
curArgTabEntry->needTmp = true;
needsTemps = true;
}
// All previous arguments may need to be evaluated into temps
for (unsigned prevInx = 0; prevInx < curInx; prevInx++)
{
fgArgTabEntry* prevArgTabEntry = argTable[prevInx];
assert(prevArgTabEntry->argNum < curArgTabEntry->argNum);
// For all previous arguments, if they have any GTF_ALL_EFFECT
// we require that they be evaluated into a temp
if ((prevArgTabEntry->GetNode()->gtFlags & GTF_ALL_EFFECT) != 0)
{
prevArgTabEntry->needTmp = true;
needsTemps = true;
}
#if FEATURE_FIXED_OUT_ARGS
// Or, if they are stored into the FIXED_OUT_ARG area
// we require that they be moved to the gtCallLateArgs
// and replaced with a placeholder node
else if (prevArgTabEntry->GetRegNum() == REG_STK)
{
prevArgTabEntry->needPlace = true;
}
#if FEATURE_ARG_SPLIT
else if (prevArgTabEntry->IsSplit())
{
prevArgTabEntry->needPlace = true;
}
#endif // FEATURE_ARG_SPLIT
#endif
}
}
#if FEATURE_MULTIREG_ARGS
// For RyuJIT backend we will expand a Multireg arg into a GT_FIELD_LIST
// with multiple indirections, so here we consider spilling it into a tmp LclVar.
//
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_ARM
bool isMultiRegArg =
(curArgTabEntry->numRegs > 0) && (curArgTabEntry->numRegs + curArgTabEntry->GetStackSlotsNumber() > 1);
#else
bool isMultiRegArg = (curArgTabEntry->numRegs > 1);
#endif
if ((varTypeIsStruct(argx->TypeGet())) && (curArgTabEntry->needTmp == false))
{
if (isMultiRegArg && ((argx->gtFlags & GTF_PERSISTENT_SIDE_EFFECTS) != 0))
{
// Spill multireg struct arguments that have Assignments or Calls embedded in them
curArgTabEntry->needTmp = true;
needsTemps = true;
}
else
{
// We call gtPrepareCost to measure the cost of evaluating this tree
compiler->gtPrepareCost(argx);
if (isMultiRegArg && (argx->GetCostEx() > (6 * IND_COST_EX)))
{
// Spill multireg struct arguments that are expensive to evaluate twice
curArgTabEntry->needTmp = true;
needsTemps = true;
}
#if defined(FEATURE_SIMD) && defined(TARGET_ARM64)
else if (isMultiRegArg && varTypeIsSIMD(argx->TypeGet()))
{
// SIMD types do not need the optimization below due to their sizes
if (argx->OperIsSimdOrHWintrinsic() ||
(argx->OperIs(GT_OBJ) && argx->AsObj()->gtOp1->OperIs(GT_ADDR) &&
argx->AsObj()->gtOp1->AsOp()->gtOp1->OperIsSimdOrHWintrinsic()))
{
curArgTabEntry->needTmp = true;
needsTemps = true;
}
}
#endif
#ifndef TARGET_ARM
// TODO-Arm: This optimization is not implemented for ARM32
// so we skip this for ARM32 until it is ported to use RyuJIT backend
//
else if (argx->OperGet() == GT_OBJ)
{
GenTreeObj* argObj = argx->AsObj();
unsigned structSize = argObj->GetLayout()->GetSize();
switch (structSize)
{
case 3:
case 5:
case 6:
case 7:
// If we have a stack based LclVar we can perform a wider read of 4 or 8 bytes
//
if (argObj->AsObj()->gtOp1->IsLocalAddrExpr() == nullptr) // Is the source not a LclVar?
{
// If we don't have a LclVar we need to read exactly 3,5,6 or 7 bytes
// For now we use a a GT_CPBLK to copy the exact size into a GT_LCL_VAR temp.
//
curArgTabEntry->needTmp = true;
needsTemps = true;
}
break;
case 11:
case 13:
case 14:
case 15:
// Spill any GT_OBJ multireg structs that are difficult to extract
//
// When we have a GT_OBJ of a struct with the above sizes we would need
// to use 3 or 4 load instructions to load the exact size of this struct.
// Instead we spill the GT_OBJ into a new GT_LCL_VAR temp and this sequence
// will use a GT_CPBLK to copy the exact size into the GT_LCL_VAR temp.
// Then we can just load all 16 bytes of the GT_LCL_VAR temp when passing
// the argument.
//
curArgTabEntry->needTmp = true;
needsTemps = true;
break;
default:
break;
}
}
#endif // !TARGET_ARM
}
}
#endif // FEATURE_MULTIREG_ARGS
}
// We only care because we can't spill structs and qmarks involve a lot of spilling, but
// if we don't have qmarks, then it doesn't matter.
// So check for Qmark's globally once here, instead of inside the loop.
//
const bool hasStructRegArgWeCareAbout = (hasStructRegArg && compiler->compQmarkUsed);
#if FEATURE_FIXED_OUT_ARGS
// For Arm/x64 we only care because we can't reorder a register
// argument that uses GT_LCLHEAP. This is an optimization to
// save a check inside the below loop.
//
const bool hasStackArgsWeCareAbout = (hasStackArgs && compiler->compLocallocUsed);
#else
const bool hasStackArgsWeCareAbout = hasStackArgs;
#endif // FEATURE_FIXED_OUT_ARGS
// If we have any stack args we have to force the evaluation
// of any arguments passed in registers that might throw an exception
//
// Technically we only a required to handle the following two cases:
// a GT_IND with GTF_IND_RNGCHK (only on x86) or
// a GT_LCLHEAP node that allocates stuff on the stack
//
if (hasStackArgsWeCareAbout || hasStructRegArgWeCareAbout)
{
for (unsigned curInx = 0; curInx < argCount; curInx++)
{
fgArgTabEntry* curArgTabEntry = argTable[curInx];
assert(curArgTabEntry != nullptr);
GenTree* argx = curArgTabEntry->GetNode();
// Examine the register args that are currently not marked needTmp
//
if (!curArgTabEntry->needTmp && (curArgTabEntry->GetRegNum() != REG_STK))
{
if (hasStackArgsWeCareAbout)
{
#if !FEATURE_FIXED_OUT_ARGS
// On x86 we previously recorded a stack depth of zero when
// morphing the register arguments of any GT_IND with a GTF_IND_RNGCHK flag
// Thus we can not reorder the argument after any stack based argument
// (Note that GT_LCLHEAP sets the GTF_EXCEPT flag so we don't need to
// check for it explicitly.)
//
if (argx->gtFlags & GTF_EXCEPT)
{
curArgTabEntry->needTmp = true;
needsTemps = true;
continue;
}
#else
// For Arm/X64 we can't reorder a register argument that uses a GT_LCLHEAP
//
if (argx->gtFlags & GTF_EXCEPT)
{
assert(compiler->compLocallocUsed);
// Returns WALK_ABORT if a GT_LCLHEAP node is encountered in the argx tree
//
if (compiler->fgWalkTreePre(&argx, Compiler::fgChkLocAllocCB) == Compiler::WALK_ABORT)
{
curArgTabEntry->needTmp = true;
needsTemps = true;
continue;
}
}
#endif
}
if (hasStructRegArgWeCareAbout)
{
// Returns true if a GT_QMARK node is encountered in the argx tree
//
if (compiler->fgWalkTreePre(&argx, Compiler::fgChkQmarkCB) == Compiler::WALK_ABORT)
{
curArgTabEntry->needTmp = true;
needsTemps = true;
continue;
}
}
}
}
}
// When CFG is enabled and this is a delegate call or vtable call we must
// compute the call target before all late args. However this will
// effectively null-check 'this', which should happen only after all
// arguments are evaluated. Thus we must evaluate all args with side
// effects to a temp.
if (compiler->opts.IsCFGEnabled() && (callTree->IsVirtualVtable() || callTree->IsDelegateInvoke()))
{
// Always evaluate 'this' to temp.
argTable[0]->needTmp = true;
needsTemps = true;
for (unsigned curInx = 1; curInx < argCount; curInx++)
{
fgArgTabEntry* curArgTabEntry = argTable[curInx];
GenTree* arg = curArgTabEntry->GetNode();
if ((arg->gtFlags & GTF_ALL_EFFECT) != 0)
{
curArgTabEntry->needTmp = true;
needsTemps = true;
}
}
}
argsComplete = true;
}
void fgArgInfo::SortArgs()
{
assert(argsComplete == true);
#ifdef DEBUG
if (compiler->verbose)
{
printf("\nSorting the arguments:\n");
}
#endif
/* Shuffle the arguments around before we build the gtCallLateArgs list.
The idea is to move all "simple" arguments like constants and local vars
to the end of the table, and move the complex arguments towards the beginning
of the table. This will help prevent registers from being spilled by
allowing us to evaluate the more complex arguments before the simpler arguments.
The argTable ends up looking like:
+------------------------------------+ <--- argTable[argCount - 1]
| constants |
+------------------------------------+
| local var / local field |
+------------------------------------+
| remaining arguments sorted by cost |
+------------------------------------+
| temps (argTable[].needTmp = true) |
+------------------------------------+
| args with calls (GTF_CALL) |
+------------------------------------+ <--- argTable[0]
*/
/* Set the beginning and end for the new argument table */
unsigned curInx;
int regCount = 0;
unsigned begTab = 0;
unsigned endTab = argCount - 1;
unsigned argsRemaining = argCount;
// First take care of arguments that are constants.
// [We use a backward iterator pattern]
//
curInx = argCount;
do
{
curInx--;
fgArgTabEntry* curArgTabEntry = argTable[curInx];
if (curArgTabEntry->GetRegNum() != REG_STK)
{
regCount++;
}
assert(curArgTabEntry->lateUse == nullptr);
// Skip any already processed args
//
if (!curArgTabEntry->processed)
{
GenTree* argx = curArgTabEntry->GetNode();
// put constants at the end of the table
//
if (argx->gtOper == GT_CNS_INT)
{
noway_assert(curInx <= endTab);
curArgTabEntry->processed = true;
// place curArgTabEntry at the endTab position by performing a swap
//
if (curInx != endTab)
{
argTable[curInx] = argTable[endTab];
argTable[endTab] = curArgTabEntry;
}
endTab--;
argsRemaining--;
}
}
} while (curInx > 0);
if (argsRemaining > 0)
{
// Next take care of arguments that are calls.
// [We use a forward iterator pattern]
//
for (curInx = begTab; curInx <= endTab; curInx++)
{
fgArgTabEntry* curArgTabEntry = argTable[curInx];
// Skip any already processed args
//
if (!curArgTabEntry->processed)
{
GenTree* argx = curArgTabEntry->GetNode();
// put calls at the beginning of the table
//
if (argx->gtFlags & GTF_CALL)
{
curArgTabEntry->processed = true;
// place curArgTabEntry at the begTab position by performing a swap
//
if (curInx != begTab)
{
argTable[curInx] = argTable[begTab];
argTable[begTab] = curArgTabEntry;
}
begTab++;
argsRemaining--;
}
}
}
}
if (argsRemaining > 0)
{
// Next take care arguments that are temps.
// These temps come before the arguments that are
// ordinary local vars or local fields
// since this will give them a better chance to become
// enregistered into their actual argument register.
// [We use a forward iterator pattern]
//
for (curInx = begTab; curInx <= endTab; curInx++)
{
fgArgTabEntry* curArgTabEntry = argTable[curInx];
// Skip any already processed args
//
if (!curArgTabEntry->processed)
{
if (curArgTabEntry->needTmp)
{
curArgTabEntry->processed = true;
// place curArgTabEntry at the begTab position by performing a swap
//
if (curInx != begTab)
{
argTable[curInx] = argTable[begTab];
argTable[begTab] = curArgTabEntry;
}
begTab++;
argsRemaining--;
}
}
}
}
if (argsRemaining > 0)
{
// Next take care of local var and local field arguments.
// These are moved towards the end of the argument evaluation.
// [We use a backward iterator pattern]
//
curInx = endTab + 1;
do
{
curInx--;
fgArgTabEntry* curArgTabEntry = argTable[curInx];
// Skip any already processed args
//
if (!curArgTabEntry->processed)
{
GenTree* argx = curArgTabEntry->GetNode();
if ((argx->gtOper == GT_LCL_VAR) || (argx->gtOper == GT_LCL_FLD))
{
noway_assert(curInx <= endTab);
curArgTabEntry->processed = true;
// place curArgTabEntry at the endTab position by performing a swap
//
if (curInx != endTab)
{
argTable[curInx] = argTable[endTab];
argTable[endTab] = curArgTabEntry;
}
endTab--;
argsRemaining--;
}
}
} while (curInx > begTab);
}
// Finally, take care of all the remaining arguments.
// Note that we fill in one arg at a time using a while loop.
bool costsPrepared = false; // Only prepare tree costs once, the first time through this loop
while (argsRemaining > 0)
{
/* Find the most expensive arg remaining and evaluate it next */
fgArgTabEntry* expensiveArgTabEntry = nullptr;
unsigned expensiveArg = UINT_MAX;
unsigned expensiveArgCost = 0;
// [We use a forward iterator pattern]
//
for (curInx = begTab; curInx <= endTab; curInx++)
{
fgArgTabEntry* curArgTabEntry = argTable[curInx];
// Skip any already processed args
//
if (!curArgTabEntry->processed)
{
GenTree* argx = curArgTabEntry->GetNode();
// We should have already handled these kinds of args
assert(argx->gtOper != GT_LCL_VAR);
assert(argx->gtOper != GT_LCL_FLD);
assert(argx->gtOper != GT_CNS_INT);
// This arg should either have no persistent side effects or be the last one in our table
// assert(((argx->gtFlags & GTF_PERSISTENT_SIDE_EFFECTS) == 0) || (curInx == (argCount-1)));
if (argsRemaining == 1)
{
// This is the last arg to place
expensiveArg = curInx;
expensiveArgTabEntry = curArgTabEntry;
assert(begTab == endTab);
break;
}
else
{
if (!costsPrepared)
{
/* We call gtPrepareCost to measure the cost of evaluating this tree */
compiler->gtPrepareCost(argx);
}
if (argx->GetCostEx() > expensiveArgCost)
{
// Remember this arg as the most expensive one that we have yet seen
expensiveArgCost = argx->GetCostEx();
expensiveArg = curInx;
expensiveArgTabEntry = curArgTabEntry;
}
}
}
}
noway_assert(expensiveArg != UINT_MAX);
// put the most expensive arg towards the beginning of the table
expensiveArgTabEntry->processed = true;
// place expensiveArgTabEntry at the begTab position by performing a swap
//
if (expensiveArg != begTab)
{
argTable[expensiveArg] = argTable[begTab];
argTable[begTab] = expensiveArgTabEntry;
}
begTab++;
argsRemaining--;
costsPrepared = true; // If we have more expensive arguments, don't re-evaluate the tree cost on the next loop
}
// The table should now be completely filled and thus begTab should now be adjacent to endTab
// and regArgsRemaining should be zero
assert(begTab == (endTab + 1));
assert(argsRemaining == 0);
argsSorted = true;
}
#ifdef DEBUG
void fgArgInfo::Dump(Compiler* compiler) const
{
for (unsigned curInx = 0; curInx < ArgCount(); curInx++)
{
fgArgTabEntry* curArgEntry = ArgTable()[curInx];
curArgEntry->Dump();
}
}
#endif
//------------------------------------------------------------------------------
// fgMakeTmpArgNode : This function creates a tmp var only if needed.
// We need this to be done in order to enforce ordering
// of the evaluation of arguments.
//
// Arguments:
// curArgTabEntry
//
// Return Value:
// the newly created temp var tree.
GenTree* Compiler::fgMakeTmpArgNode(fgArgTabEntry* curArgTabEntry)
{
unsigned tmpVarNum = curArgTabEntry->tmpNum;
LclVarDsc* varDsc = lvaGetDesc(tmpVarNum);
assert(varDsc->lvIsTemp);
var_types type = varDsc->TypeGet();
// Create a copy of the temp to go into the late argument list
GenTree* arg = gtNewLclvNode(tmpVarNum, type);
GenTree* addrNode = nullptr;
if (varTypeIsStruct(type))
{
#if defined(TARGET_AMD64) || defined(TARGET_ARM64) || defined(TARGET_ARM) || defined(TARGET_LOONGARCH64)
// Can this type be passed as a primitive type?
// If so, the following call will return the corresponding primitive type.
// Otherwise, it will return TYP_UNKNOWN and we will pass it as a struct type.
bool passedAsPrimitive = false;
if (curArgTabEntry->TryPassAsPrimitive())
{
CORINFO_CLASS_HANDLE clsHnd = varDsc->GetStructHnd();
var_types structBaseType =
getPrimitiveTypeForStruct(lvaLclExactSize(tmpVarNum), clsHnd, curArgTabEntry->IsVararg());
if (structBaseType != TYP_UNKNOWN)
{
passedAsPrimitive = true;
#if defined(UNIX_AMD64_ABI)
// TODO-Cleanup: This is inelegant, but eventually we'll track this in the fgArgTabEntry,
// and otherwise we'd have to either modify getPrimitiveTypeForStruct() to take
// a structDesc or call eeGetSystemVAmd64PassStructInRegisterDescriptor yet again.
//
if (genIsValidFloatReg(curArgTabEntry->GetRegNum()))
{
if (structBaseType == TYP_INT)
{
structBaseType = TYP_FLOAT;
}
else
{
assert(structBaseType == TYP_LONG);
structBaseType = TYP_DOUBLE;
}
}
#endif
type = structBaseType;
}
}
// If it is passed in registers, don't get the address of the var. Make it a
// field instead. It will be loaded in registers with putarg_reg tree in lower.
if (passedAsPrimitive)
{
arg->ChangeOper(GT_LCL_FLD);
arg->gtType = type;
lvaSetVarDoNotEnregister(tmpVarNum DEBUGARG(DoNotEnregisterReason::SwizzleArg));
}
else
{
var_types addrType = TYP_BYREF;
arg = gtNewOperNode(GT_ADDR, addrType, arg);
lvaSetVarAddrExposed(tmpVarNum DEBUGARG(AddressExposedReason::ESCAPE_ADDRESS));
addrNode = arg;
#if FEATURE_MULTIREG_ARGS
#if defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)
assert(varTypeIsStruct(type));
if (lvaIsMultiregStruct(varDsc, curArgTabEntry->IsVararg()))
{
// We will create a GT_OBJ for the argument below.
// This will be passed by value in two registers.
assert(addrNode != nullptr);
// Create an Obj of the temp to use it as a call argument.
arg = gtNewObjNode(lvaGetStruct(tmpVarNum), arg);
}
#else
// Always create an Obj of the temp to use it as a call argument.
arg = gtNewObjNode(lvaGetStruct(tmpVarNum), arg);
#endif // !(TARGET_ARM64 || TARGET_LOONGARCH64)
#endif // FEATURE_MULTIREG_ARGS
}
#else // not (TARGET_AMD64 or TARGET_ARM64 or TARGET_ARM or TARGET_LOONGARCH64)
// other targets, we pass the struct by value
assert(varTypeIsStruct(type));
addrNode = gtNewOperNode(GT_ADDR, TYP_BYREF, arg);
// Get a new Obj node temp to use it as a call argument.
// gtNewObjNode will set the GTF_EXCEPT flag if this is not a local stack object.
arg = gtNewObjNode(lvaGetStruct(tmpVarNum), addrNode);
#endif // not (TARGET_AMD64 or TARGET_ARM64 or TARGET_ARM or TARGET_LOONGARCH64)
} // (varTypeIsStruct(type))
if (addrNode != nullptr)
{
assert(addrNode->gtOper == GT_ADDR);
// the child of a GT_ADDR is required to have this flag set
addrNode->AsOp()->gtOp1->gtFlags |= GTF_DONT_CSE;
}
return arg;
}
//------------------------------------------------------------------------------
// EvalArgsToTemps : Create temp assignments and populate the LateArgs list.
void fgArgInfo::EvalArgsToTemps()
{
assert(argsSorted);
unsigned regArgInx = 0;
// Now go through the argument table and perform the necessary evaluation into temps
GenTreeCall::Use* tmpRegArgNext = nullptr;
for (unsigned curInx = 0; curInx < argCount; curInx++)
{
fgArgTabEntry* curArgTabEntry = argTable[curInx];
assert(curArgTabEntry->lateUse == nullptr);
GenTree* argx = curArgTabEntry->GetNode();
GenTree* setupArg = nullptr;
GenTree* defArg;
#if !FEATURE_FIXED_OUT_ARGS
// Only ever set for FEATURE_FIXED_OUT_ARGS
assert(curArgTabEntry->needPlace == false);
// On x86 and other archs that use push instructions to pass arguments:
// Only the register arguments need to be replaced with placeholder nodes.
// Stacked arguments are evaluated and pushed (or stored into the stack) in order.
//
if (curArgTabEntry->GetRegNum() == REG_STK)
continue;
#endif
if (curArgTabEntry->needTmp)
{
if (curArgTabEntry->isTmp)
{
// Create a copy of the temp to go into the late argument list
defArg = compiler->fgMakeTmpArgNode(curArgTabEntry);
// mark the original node as a late argument
argx->gtFlags |= GTF_LATE_ARG;
}
else
{
// Create a temp assignment for the argument
// Put the temp in the gtCallLateArgs list
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (compiler->verbose)
{
printf("Argument with 'side effect'...\n");
compiler->gtDispTree(argx);
}
#endif
#if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)
noway_assert(argx->gtType != TYP_STRUCT);
#endif
unsigned tmpVarNum = compiler->lvaGrabTemp(true DEBUGARG("argument with side effect"));
if (argx->gtOper == GT_MKREFANY)
{
// For GT_MKREFANY, typically the actual struct copying does
// not have any side-effects and can be delayed. So instead
// of using a temp for the whole struct, we can just use a temp
// for operand that that has a side-effect
GenTree* operand;
if ((argx->AsOp()->gtOp2->gtFlags & GTF_ALL_EFFECT) == 0)
{
operand = argx->AsOp()->gtOp1;
// In the early argument evaluation, place an assignment to the temp
// from the source operand of the mkrefany
setupArg = compiler->gtNewTempAssign(tmpVarNum, operand);
// Replace the operand for the mkrefany with the new temp.
argx->AsOp()->gtOp1 = compiler->gtNewLclvNode(tmpVarNum, operand->TypeGet());
}
else if ((argx->AsOp()->gtOp1->gtFlags & GTF_ALL_EFFECT) == 0)
{
operand = argx->AsOp()->gtOp2;
// In the early argument evaluation, place an assignment to the temp
// from the source operand of the mkrefany
setupArg = compiler->gtNewTempAssign(tmpVarNum, operand);
// Replace the operand for the mkrefany with the new temp.
argx->AsOp()->gtOp2 = compiler->gtNewLclvNode(tmpVarNum, operand->TypeGet());
}
}
if (setupArg != nullptr)
{
// Now keep the mkrefany for the late argument list
defArg = argx;
// Clear the side-effect flags because now both op1 and op2 have no side-effects
defArg->gtFlags &= ~GTF_ALL_EFFECT;
}
else
{
setupArg = compiler->gtNewTempAssign(tmpVarNum, argx);
LclVarDsc* varDsc = compiler->lvaGetDesc(tmpVarNum);
var_types lclVarType = genActualType(argx->gtType);
var_types scalarType = TYP_UNKNOWN;
if (setupArg->OperIsCopyBlkOp())
{
setupArg = compiler->fgMorphCopyBlock(setupArg);
#if defined(TARGET_ARMARCH) || defined(UNIX_AMD64_ABI) || defined(TARGET_LOONGARCH64)
#if defined(TARGET_LOONGARCH64)
// On LoongArch64, "getPrimitiveTypeForStruct" will incorrectly return "TYP_LONG"
// for "struct { float, float }", and retyping to a primitive here will cause the
// multi-reg morphing to not kick in (the struct in question needs to be passed in
// two FP registers).
// TODO-LoongArch64: fix "getPrimitiveTypeForStruct" or use the ABI information in
// the arg entry instead of calling it here.
if ((lclVarType == TYP_STRUCT) && (curArgTabEntry->numRegs == 1))
#else
if (lclVarType == TYP_STRUCT)
#endif
{
// This scalar LclVar widening step is only performed for ARM architectures.
//
CORINFO_CLASS_HANDLE clsHnd = compiler->lvaGetStruct(tmpVarNum);
unsigned structSize = varDsc->lvExactSize;
scalarType =
compiler->getPrimitiveTypeForStruct(structSize, clsHnd, curArgTabEntry->IsVararg());
}
#endif // TARGET_ARMARCH || defined (UNIX_AMD64_ABI) || defined(TARGET_LOONGARCH64)
}
// scalarType can be set to a wider type for ARM or unix amd64 architectures: (3 => 4) or (5,6,7 =>
// 8)
if ((scalarType != TYP_UNKNOWN) && (scalarType != lclVarType))
{
// Create a GT_LCL_FLD using the wider type to go to the late argument list
defArg = compiler->gtNewLclFldNode(tmpVarNum, scalarType, 0);
}
else
{
// Create a copy of the temp to go to the late argument list
defArg = compiler->gtNewLclvNode(tmpVarNum, lclVarType);
}
curArgTabEntry->isTmp = true;
curArgTabEntry->tmpNum = tmpVarNum;
#ifdef TARGET_ARM
// Previously we might have thought the local was promoted, and thus the 'COPYBLK'
// might have left holes in the used registers (see
// fgAddSkippedRegsInPromotedStructArg).
// Too bad we're not that smart for these intermediate temps...
if (isValidIntArgReg(curArgTabEntry->GetRegNum()) && (curArgTabEntry->numRegs > 1))
{
regNumber argReg = curArgTabEntry->GetRegNum();
regMaskTP allUsedRegs = genRegMask(curArgTabEntry->GetRegNum());
for (unsigned i = 1; i < curArgTabEntry->numRegs; i++)
{
argReg = genRegArgNext(argReg);
allUsedRegs |= genRegMask(argReg);
}
}
#endif // TARGET_ARM
}
/* mark the assignment as a late argument */
setupArg->gtFlags |= GTF_LATE_ARG;
#ifdef DEBUG
if (compiler->verbose)
{
printf("\n Evaluate to a temp:\n");
compiler->gtDispTree(setupArg);
}
#endif
}
}
else // curArgTabEntry->needTmp == false
{
// On x86 -
// Only register args are replaced with placeholder nodes
// and the stack based arguments are evaluated and pushed in order.
//
// On Arm/x64 - When needTmp is false and needPlace is false,
// the non-register arguments are evaluated and stored in order.
// When needPlace is true we have a nested call that comes after
// this argument so we have to replace it in the gtCallArgs list
// (the initial argument evaluation list) with a placeholder.
//
if ((curArgTabEntry->GetRegNum() == REG_STK) && (curArgTabEntry->needPlace == false))
{
continue;
}
/* No temp needed - move the whole node to the gtCallLateArgs list */
/* The argument is deferred and put in the late argument list */
defArg = argx;
// Create a placeholder node to put in its place in gtCallLateArgs.
// For a struct type we also need to record the class handle of the arg.
CORINFO_CLASS_HANDLE clsHnd = NO_CLASS_HANDLE;
#if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)
// All structs are either passed (and retyped) as integral types, OR they
// are passed by reference.
noway_assert(argx->gtType != TYP_STRUCT);
#else // !defined(TARGET_AMD64) || defined(UNIX_AMD64_ABI)
if (defArg->TypeGet() == TYP_STRUCT)
{
clsHnd = compiler->gtGetStructHandleIfPresent(defArg);
noway_assert(clsHnd != NO_CLASS_HANDLE);
}
#endif // !(defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI))
setupArg = compiler->gtNewArgPlaceHolderNode(defArg->gtType, clsHnd);
/* mark the placeholder node as a late argument */
setupArg->gtFlags |= GTF_LATE_ARG;
#ifdef DEBUG
if (compiler->verbose)
{
if (curArgTabEntry->GetRegNum() == REG_STK)
{
printf("Deferred stack argument :\n");
}
else
{
printf("Deferred argument ('%s'):\n", getRegName(curArgTabEntry->GetRegNum()));
}
compiler->gtDispTree(argx);
printf("Replaced with placeholder node:\n");
compiler->gtDispTree(setupArg);
}
#endif
}
if (setupArg != nullptr)
{
noway_assert(curArgTabEntry->use->GetNode() == argx);
curArgTabEntry->use->SetNode(setupArg);
}
/* deferred arg goes into the late argument list */
if (tmpRegArgNext == nullptr)
{
tmpRegArgNext = compiler->gtNewCallArgs(defArg);
callTree->AsCall()->gtCallLateArgs = tmpRegArgNext;
}
else
{
noway_assert(tmpRegArgNext->GetNode() != nullptr);
tmpRegArgNext->SetNext(compiler->gtNewCallArgs(defArg));
tmpRegArgNext = tmpRegArgNext->GetNext();
}
curArgTabEntry->lateUse = tmpRegArgNext;
curArgTabEntry->SetLateArgInx(regArgInx++);
if ((setupArg != nullptr) && setupArg->OperIs(GT_ARGPLACE) && (callTree->gtRetBufArg == curArgTabEntry->use))
{
callTree->SetLclRetBufArg(tmpRegArgNext);
}
}
#ifdef DEBUG
if (compiler->verbose)
{
printf("\nShuffled argument table: ");
for (unsigned curInx = 0; curInx < argCount; curInx++)
{
fgArgTabEntry* curArgTabEntry = argTable[curInx];
if (curArgTabEntry->GetRegNum() != REG_STK)
{
printf("%s ", getRegName(curArgTabEntry->GetRegNum()));
}
}
printf("\n");
}
#endif
}
//------------------------------------------------------------------------------
// fgMakeMultiUse : If the node is an unaliased local or constant clone it,
// otherwise insert a comma form temp
//
// Arguments:
// ppTree - a pointer to the child node we will be replacing with the comma expression that
// evaluates ppTree to a temp and returns the result
//
// Return Value:
// A fresh GT_LCL_VAR node referencing the temp which has not been used
//
// Notes:
// Caller must ensure that if the node is an unaliased local, the second use this
// creates will be evaluated before the local can be reassigned.
//
// Can be safely called in morph preorder, before GTF_GLOB_REF is reliable.
//
GenTree* Compiler::fgMakeMultiUse(GenTree** pOp)
{
GenTree* const tree = *pOp;
if (tree->IsInvariant())
{
return gtClone(tree);
}
else if (tree->IsLocal())
{
// Can't rely on GTF_GLOB_REF here.
//
if (!lvaGetDesc(tree->AsLclVarCommon())->IsAddressExposed())
{
return gtClone(tree);
}
}
return fgInsertCommaFormTemp(pOp);
}
//------------------------------------------------------------------------------
// fgInsertCommaFormTemp: Create a new temporary variable to hold the result of *ppTree,
// and replace *ppTree with comma(asg(newLcl, *ppTree), newLcl)
//
// Arguments:
// ppTree - a pointer to the child node we will be replacing with the comma expression that
// evaluates ppTree to a temp and returns the result
//
// structType - value type handle if the temp created is of TYP_STRUCT.
//
// Return Value:
// A fresh GT_LCL_VAR node referencing the temp which has not been used
//
GenTree* Compiler::fgInsertCommaFormTemp(GenTree** ppTree, CORINFO_CLASS_HANDLE structType /*= nullptr*/)
{
GenTree* subTree = *ppTree;
unsigned lclNum = lvaGrabTemp(true DEBUGARG("fgInsertCommaFormTemp is creating a new local variable"));
if (varTypeIsStruct(subTree))
{
assert(structType != nullptr);
lvaSetStruct(lclNum, structType, false);
}
// If subTree->TypeGet() == TYP_STRUCT, gtNewTempAssign() will create a GT_COPYBLK tree.
// The type of GT_COPYBLK is TYP_VOID. Therefore, we should use subTree->TypeGet() for
// setting type of lcl vars created.
GenTree* asg = gtNewTempAssign(lclNum, subTree);
GenTree* load = new (this, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, subTree->TypeGet(), lclNum);
GenTree* comma = gtNewOperNode(GT_COMMA, subTree->TypeGet(), asg, load);
*ppTree = comma;
return new (this, GT_LCL_VAR) GenTreeLclVar(GT_LCL_VAR, subTree->TypeGet(), lclNum);
}
//------------------------------------------------------------------------
// fgInitArgInfo: Construct the fgArgInfo for the call with the fgArgEntry for each arg
//
// Arguments:
// callNode - the call for which we are generating the fgArgInfo
//
// Return Value:
// None
//
// Notes:
// This method is idempotent in that it checks whether the fgArgInfo has already been
// constructed, and just returns.
// This method only computes the arg table and arg entries for the call (the fgArgInfo),
// and makes no modification of the args themselves.
//
// The IR for the call args can change for calls with non-standard arguments: some non-standard
// arguments add new call argument IR nodes.
//
void Compiler::fgInitArgInfo(GenTreeCall* call)
{
GenTreeCall::Use* args;
GenTree* argx;
unsigned argIndex = 0;
unsigned intArgRegNum = 0;
unsigned fltArgRegNum = 0;
DEBUG_ARG_SLOTS_ONLY(unsigned argSlots = 0;)
bool callHasRetBuffArg = call->HasRetBufArg();
bool callIsVararg = call->IsVarargs();
#ifdef TARGET_ARM
regMaskTP argSkippedRegMask = RBM_NONE;
regMaskTP fltArgSkippedRegMask = RBM_NONE;
#endif // TARGET_ARM
#if defined(TARGET_X86)
unsigned maxRegArgs = MAX_REG_ARG; // X86: non-const, must be calculated
#else
const unsigned maxRegArgs = MAX_REG_ARG; // other arch: fixed constant number
#endif
if (call->fgArgInfo != nullptr)
{
// We've already initialized and set the fgArgInfo.
return;
}
JITDUMP("Initializing arg info for %d.%s:\n", call->gtTreeID, GenTree::OpName(call->gtOper));
// At this point, we should never have gtCallLateArgs, as this needs to be done before those are determined.
assert(call->gtCallLateArgs == nullptr);
if (TargetOS::IsUnix && callIsVararg)
{
// Currently native varargs is not implemented on non windows targets.
//
// Note that some targets like Arm64 Unix should not need much work as
// the ABI is the same. While other targets may only need small changes
// such as amd64 Unix, which just expects RAX to pass numFPArguments.
NYI("Morphing Vararg call not yet implemented on non Windows targets.");
}
// Data structure for keeping track of non-standard args. Non-standard args are those that are not passed
// following the normal calling convention or in the normal argument registers. We either mark existing
// arguments as non-standard (such as the x8 return buffer register on ARM64), or we manually insert the
// non-standard arguments into the argument list, below.
class NonStandardArgs
{
struct NonStandardArg
{
GenTree* node; // The tree node representing this non-standard argument.
// Note that this must be updated if the tree node changes due to morphing!
regNumber reg; // The register to be assigned to this non-standard argument.
NonStandardArgKind kind; // The kind of the non-standard arg
};
ArrayStack<NonStandardArg> args;
public:
NonStandardArgs(CompAllocator alloc) : args(alloc, 3) // We will have at most 3 non-standard arguments
{
}
//-----------------------------------------------------------------------------
// Add: add a non-standard argument to the table of non-standard arguments
//
// Arguments:
// node - a GenTree node that has a non-standard argument.
// reg - the register to assign to this node.
//
// Return Value:
// None.
//
void Add(GenTree* node, regNumber reg, NonStandardArgKind kind)
{
NonStandardArg nsa = {node, reg, kind};
args.Push(nsa);
}
//-----------------------------------------------------------------------------
// Find: Look for a GenTree* in the set of non-standard args.
//
// Arguments:
// node - a GenTree node to look for
//
// Return Value:
// The index of the non-standard argument (a non-negative, unique, stable number).
// If the node is not a non-standard argument, return -1.
//
int Find(GenTree* node)
{
for (int i = 0; i < args.Height(); i++)
{
if (node == args.Top(i).node)
{
return i;
}
}
return -1;
}
//-----------------------------------------------------------------------------
// Find: Look for a GenTree node in the non-standard arguments set. If found,
// set the register to use for the node.
//
// Arguments:
// node - a GenTree node to look for
// pReg - an OUT argument. *pReg is set to the non-standard register to use if
// 'node' is found in the non-standard argument set.
// pKind - an OUT argument. *pKind is set to the kind of the non-standard arg.
//
// Return Value:
// 'true' if 'node' is a non-standard argument. In this case, *pReg and *pKing are set.
// 'false' otherwise (in this case, *pReg and *pKind are unmodified).
//
bool Find(GenTree* node, regNumber* pReg, NonStandardArgKind* pKind)
{
for (int i = 0; i < args.Height(); i++)
{
NonStandardArg& nsa = args.TopRef(i);
if (node == nsa.node)
{
*pReg = nsa.reg;
*pKind = nsa.kind;
return true;
}
}
return false;
}
//-----------------------------------------------------------------------------
// Replace: Replace the non-standard argument node at a given index. This is done when
// the original node was replaced via morphing, but we need to continue to assign a
// particular non-standard arg to it.
//
// Arguments:
// index - the index of the non-standard arg. It must exist.
// node - the new GenTree node.
//
// Return Value:
// None.
//
void Replace(int index, GenTree* node)
{
args.TopRef(index).node = node;
}
} nonStandardArgs(getAllocator(CMK_ArrayStack));
// Count of args. On first morph, this is counted before we've filled in the arg table.
// On remorph, we grab it from the arg table.
unsigned numArgs = 0;
// First we need to count the args
if (call->gtCallThisArg != nullptr)
{
numArgs++;
}
for (GenTreeCall::Use& use : call->Args())
{
numArgs++;
}
// Insert or mark non-standard args. These are either outside the normal calling convention, or
// arguments registers that don't follow the normal progression of argument registers in the calling
// convention (such as for the ARM64 fixed return buffer argument x8).
//
// *********** NOTE *************
// The logic here must remain in sync with GetNonStandardAddedArgCount(), which is used to map arguments
// in the implementation of fast tail call.
// *********** END NOTE *********
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_X86) || defined(TARGET_ARM)
// The x86 and arm32 CORINFO_HELP_INIT_PINVOKE_FRAME helpers has a custom calling convention.
// Set the argument registers correctly here.
if (call->IsHelperCall(this, CORINFO_HELP_INIT_PINVOKE_FRAME))
{
GenTreeCall::Use* args = call->gtCallArgs;
GenTree* arg1 = args->GetNode();
assert(arg1 != nullptr);
nonStandardArgs.Add(arg1, REG_PINVOKE_FRAME, NonStandardArgKind::PInvokeFrame);
}
#endif // defined(TARGET_X86) || defined(TARGET_ARM)
#if defined(TARGET_ARM)
// A non-standard calling convention using wrapper delegate invoke is used on ARM, only, for wrapper
// delegates. It is used for VSD delegate calls where the VSD custom calling convention ABI requires passing
// R4, a callee-saved register, with a special value. Since R4 is a callee-saved register, its value needs
// to be preserved. Thus, the VM uses a wrapper delegate IL stub, which preserves R4 and also sets up R4
// correctly for the VSD call. The VM is simply reusing an existing mechanism (wrapper delegate IL stub)
// to achieve its goal for delegate VSD call. See COMDelegate::NeedsWrapperDelegate() in the VM for details.
else if (call->gtCallMoreFlags & GTF_CALL_M_WRAPPER_DELEGATE_INV)
{
GenTree* arg = call->gtCallThisArg->GetNode();
if (arg->OperIsLocal())
{
arg = gtClone(arg, true);
}
else
{
GenTree* tmp = fgInsertCommaFormTemp(&arg);
call->gtCallThisArg->SetNode(arg);
call->gtFlags |= GTF_ASG;
arg = tmp;
}
noway_assert(arg != nullptr);
GenTree* newArg = new (this, GT_ADDR)
GenTreeAddrMode(TYP_BYREF, arg, nullptr, 0, eeGetEEInfo()->offsetOfWrapperDelegateIndirectCell);
// Append newArg as the last arg
GenTreeCall::Use** insertionPoint = &call->gtCallArgs;
for (; *insertionPoint != nullptr; insertionPoint = &((*insertionPoint)->NextRef()))
{
}
*insertionPoint = gtNewCallArgs(newArg);
numArgs++;
nonStandardArgs.Add(newArg, virtualStubParamInfo->GetReg(), NonStandardArgKind::WrapperDelegateCell);
}
#endif // defined(TARGET_ARM)
#if defined(TARGET_X86)
// The x86 shift helpers have custom calling conventions and expect the lo part of the long to be in EAX and the
// hi part to be in EDX. This sets the argument registers up correctly.
else if (call->IsHelperCall(this, CORINFO_HELP_LLSH) || call->IsHelperCall(this, CORINFO_HELP_LRSH) ||
call->IsHelperCall(this, CORINFO_HELP_LRSZ))
{
GenTreeCall::Use* args = call->gtCallArgs;
GenTree* arg1 = args->GetNode();
assert(arg1 != nullptr);
nonStandardArgs.Add(arg1, REG_LNGARG_LO, NonStandardArgKind::ShiftLow);
args = args->GetNext();
GenTree* arg2 = args->GetNode();
assert(arg2 != nullptr);
nonStandardArgs.Add(arg2, REG_LNGARG_HI, NonStandardArgKind::ShiftHigh);
}
#else // !TARGET_X86
// TODO-X86-CQ: Currently RyuJIT/x86 passes args on the stack, so this is not needed.
// If/when we change that, the following code needs to be changed to correctly support the (TBD) managed calling
// convention for x86/SSE.
// If we have a Fixed Return Buffer argument register then we setup a non-standard argument for it.
//
// We don't use the fixed return buffer argument if we have the special unmanaged instance call convention.
// That convention doesn't use the fixed return buffer register.
//
CLANG_FORMAT_COMMENT_ANCHOR;
if (call->HasFixedRetBufArg())
{
args = call->gtCallArgs;
assert(args != nullptr);
argx = call->gtCallArgs->GetNode();
// We don't increment numArgs here, since we already counted this argument above.
nonStandardArgs.Add(argx, theFixedRetBuffReg(), NonStandardArgKind::FixedRetBuffer);
}
// We are allowed to have a Fixed Return Buffer argument combined
// with any of the remaining non-standard arguments
//
CLANG_FORMAT_COMMENT_ANCHOR;
if (call->IsVirtualStub())
{
if (!call->IsTailCallViaJitHelper())
{
GenTree* stubAddrArg = fgGetStubAddrArg(call);
// And push the stub address onto the list of arguments
call->gtCallArgs = gtPrependNewCallArg(stubAddrArg, call->gtCallArgs);
numArgs++;
nonStandardArgs.Add(stubAddrArg, stubAddrArg->GetRegNum(), NonStandardArgKind::VirtualStubCell);
}
else
{
// If it is a VSD call getting dispatched via tail call helper,
// fgMorphTailCallViaJitHelper() would materialize stub addr as an additional
// parameter added to the original arg list and hence no need to
// add as a non-standard arg.
}
}
else
#endif // !TARGET_X86
if (call->gtCallType == CT_INDIRECT && (call->gtCallCookie != nullptr))
{
assert(!call->IsUnmanaged());
GenTree* arg = call->gtCallCookie;
noway_assert(arg != nullptr);
call->gtCallCookie = nullptr;
// All architectures pass the cookie in a register.
call->gtCallArgs = gtPrependNewCallArg(arg, call->gtCallArgs);
nonStandardArgs.Add(arg, REG_PINVOKE_COOKIE_PARAM, NonStandardArgKind::PInvokeCookie);
numArgs++;
// put destination into R10/EAX
arg = gtClone(call->gtCallAddr, true);
call->gtCallArgs = gtPrependNewCallArg(arg, call->gtCallArgs);
numArgs++;
nonStandardArgs.Add(arg, REG_PINVOKE_TARGET_PARAM, NonStandardArgKind::PInvokeTarget);
// finally change this call to a helper call
call->gtCallType = CT_HELPER;
call->gtCallMethHnd = eeFindHelper(CORINFO_HELP_PINVOKE_CALLI);
}
#if defined(FEATURE_READYTORUN)
// For arm/arm64, we dispatch code same as VSD using virtualStubParamInfo->GetReg()
// for indirection cell address, which ZapIndirectHelperThunk expects.
// For x64/x86 we use return address to get the indirection cell by disassembling the call site.
// That is not possible for fast tailcalls, so we only need this logic for fast tailcalls on xarch.
// Note that we call this before we know if something will be a fast tailcall or not.
// That's ok; after making something a tailcall, we will invalidate this information
// and reconstruct it if necessary. The tailcalling decision does not change since
// this is a non-standard arg in a register.
bool needsIndirectionCell = call->IsR2RRelativeIndir() && !call->IsDelegateInvoke();
#if defined(TARGET_XARCH)
needsIndirectionCell &= call->IsFastTailCall();
#endif
if (needsIndirectionCell)
{
assert(call->gtEntryPoint.addr != nullptr);
size_t addrValue = (size_t)call->gtEntryPoint.addr;
GenTree* indirectCellAddress = gtNewIconHandleNode(addrValue, GTF_ICON_FTN_ADDR);
#ifdef DEBUG
indirectCellAddress->AsIntCon()->gtTargetHandle = (size_t)call->gtCallMethHnd;
#endif
indirectCellAddress->SetRegNum(REG_R2R_INDIRECT_PARAM);
#ifdef TARGET_ARM
// Issue #xxxx : Don't attempt to CSE this constant on ARM32
//
// This constant has specific register requirements, and LSRA doesn't currently correctly
// handle them when the value is in a CSE'd local.
indirectCellAddress->SetDoNotCSE();
#endif // TARGET_ARM
// Push the stub address onto the list of arguments.
call->gtCallArgs = gtPrependNewCallArg(indirectCellAddress, call->gtCallArgs);
numArgs++;
nonStandardArgs.Add(indirectCellAddress, indirectCellAddress->GetRegNum(),
NonStandardArgKind::R2RIndirectionCell);
}
#endif
if ((REG_VALIDATE_INDIRECT_CALL_ADDR != REG_ARG_0) && call->IsHelperCall(this, CORINFO_HELP_VALIDATE_INDIRECT_CALL))
{
assert(call->gtCallArgs != nullptr);
GenTreeCall::Use* args = call->gtCallArgs;
GenTree* tar = args->GetNode();
nonStandardArgs.Add(tar, REG_VALIDATE_INDIRECT_CALL_ADDR, NonStandardArgKind::ValidateIndirectCallTarget);
}
// Allocate the fgArgInfo for the call node;
//
call->fgArgInfo = new (this, CMK_Unknown) fgArgInfo(this, call, numArgs);
// Add the 'this' argument value, if present.
if (call->gtCallThisArg != nullptr)
{
argx = call->gtCallThisArg->GetNode();
assert(argIndex == 0);
assert(call->gtCallType == CT_USER_FUNC || call->gtCallType == CT_INDIRECT);
assert(varTypeIsGC(argx) || (argx->gtType == TYP_I_IMPL));
const regNumber regNum = genMapIntRegArgNumToRegNum(intArgRegNum);
const unsigned numRegs = 1;
const unsigned byteSize = TARGET_POINTER_SIZE;
const unsigned byteAlignment = TARGET_POINTER_SIZE;
const bool isStruct = false;
const bool isFloatHfa = false;
// This is a register argument - put it in the table.
call->fgArgInfo->AddRegArg(argIndex, argx, call->gtCallThisArg, regNum, numRegs, byteSize, byteAlignment,
isStruct, isFloatHfa,
callIsVararg UNIX_AMD64_ABI_ONLY_ARG(REG_STK) UNIX_AMD64_ABI_ONLY_ARG(0)
UNIX_AMD64_ABI_ONLY_ARG(0) UNIX_AMD64_ABI_ONLY_ARG(nullptr));
intArgRegNum++;
#ifdef WINDOWS_AMD64_ABI
// Whenever we pass an integer register argument
// we skip the corresponding floating point register argument
fltArgRegNum++;
#endif // WINDOWS_AMD64_ABI
argIndex++;
DEBUG_ARG_SLOTS_ONLY(argSlots++;)
}
#ifdef TARGET_X86
// Compute the maximum number of arguments that can be passed in registers.
// For X86 we handle the varargs and unmanaged calling conventions
#ifndef UNIX_X86_ABI
if (call->gtFlags & GTF_CALL_POP_ARGS)
{
noway_assert(intArgRegNum < MAX_REG_ARG);
// No more register arguments for varargs (CALL_POP_ARGS)
maxRegArgs = intArgRegNum;
// Add in the ret buff arg
if (callHasRetBuffArg)
maxRegArgs++;
}
#endif // UNIX_X86_ABI
if (call->IsUnmanaged())
{
noway_assert(intArgRegNum == 0);
if (call->gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
{
noway_assert(call->gtCallArgs->GetNode()->TypeGet() == TYP_I_IMPL ||
call->gtCallArgs->GetNode()->TypeGet() == TYP_BYREF ||
call->gtCallArgs->GetNode()->gtOper ==
GT_NOP); // the arg was already morphed to a register (fgMorph called twice)
maxRegArgs = 1;
}
else
{
maxRegArgs = 0;
}
#ifdef UNIX_X86_ABI
// Add in the ret buff arg
if (callHasRetBuffArg &&
call->unmgdCallConv != CorInfoCallConvExtension::C && // C and Stdcall calling conventions do not
call->unmgdCallConv != CorInfoCallConvExtension::Stdcall) // use registers to pass arguments.
maxRegArgs++;
#endif
}
#endif // TARGET_X86
/* Morph the user arguments */
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_ARM)
// The ARM ABI has a concept of back-filling of floating-point argument registers, according
// to the "Procedure Call Standard for the ARM Architecture" document, especially
// section 6.1.2.3 "Parameter passing". Back-filling is where floating-point argument N+1 can
// appear in a lower-numbered register than floating point argument N. That is, argument
// register allocation is not strictly increasing. To support this, we need to keep track of unused
// floating-point argument registers that we can back-fill. We only support 4-byte float and
// 8-byte double types, and one to four element HFAs composed of these types. With this, we will
// only back-fill single registers, since there is no way with these types to create
// an alignment hole greater than one register. However, there can be up to 3 back-fill slots
// available (with 16 FP argument registers). Consider this code:
//
// struct HFA { float x, y, z; }; // a three element HFA
// void bar(float a1, // passed in f0
// double a2, // passed in f2/f3; skip f1 for alignment
// HFA a3, // passed in f4/f5/f6
// double a4, // passed in f8/f9; skip f7 for alignment. NOTE: it doesn't fit in the f1 back-fill slot
// HFA a5, // passed in f10/f11/f12
// double a6, // passed in f14/f15; skip f13 for alignment. NOTE: it doesn't fit in the f1 or f7 back-fill
// // slots
// float a7, // passed in f1 (back-filled)
// float a8, // passed in f7 (back-filled)
// float a9, // passed in f13 (back-filled)
// float a10) // passed on the stack in [OutArg+0]
//
// Note that if we ever support FP types with larger alignment requirements, then there could
// be more than single register back-fills.
//
// Once we assign a floating-pointer register to the stack, they all must be on the stack.
// See "Procedure Call Standard for the ARM Architecture", section 6.1.2.3, "The back-filling
// continues only so long as no VFP CPRC has been allocated to a slot on the stack."
// We set anyFloatStackArgs to true when a floating-point argument has been assigned to the stack
// and prevent any additional floating-point arguments from going in registers.
bool anyFloatStackArgs = false;
#endif // TARGET_ARM
#ifdef UNIX_AMD64_ABI
SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc;
#endif // UNIX_AMD64_ABI
#if defined(DEBUG)
// Check that we have valid information about call's argument types.
// For example:
// load byte; call(int) -> CALL(PUTARG_TYPE byte(IND byte));
// load int; call(byte) -> CALL(PUTARG_TYPE int (IND int));
// etc.
if (call->callSig != nullptr)
{
CORINFO_SIG_INFO* sig = call->callSig;
const unsigned sigArgsCount = sig->numArgs;
GenTreeCall::Use* nodeArgs = call->gtCallArgs;
// It could include many arguments not included in `sig->numArgs`, for example, `this`, runtime lookup, cookie
// etc.
unsigned nodeArgsCount = 0;
call->VisitOperands([&nodeArgsCount](GenTree* operand) -> GenTree::VisitResult {
nodeArgsCount++;
return GenTree::VisitResult::Continue;
});
if (call->gtCallThisArg != nullptr)
{
// Handle the most common argument not in the `sig->numArgs`.
// so the following check works on more methods.
nodeArgsCount--;
}
assert(nodeArgsCount >= sigArgsCount);
if ((nodeArgsCount == sigArgsCount) &&
((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (nodeArgsCount == 1)))
{
CORINFO_ARG_LIST_HANDLE sigArg = sig->args;
for (unsigned i = 0; i < sig->numArgs; ++i)
{
CORINFO_CLASS_HANDLE argClass;
const CorInfoType corType = strip(info.compCompHnd->getArgType(sig, sigArg, &argClass));
const var_types sigType = JITtype2varType(corType);
assert(nodeArgs != nullptr);
const GenTree* nodeArg = nodeArgs->GetNode();
assert(nodeArg != nullptr);
const var_types nodeType = nodeArg->TypeGet();
assert((nodeType == sigType) || varTypeIsStruct(sigType) ||
genTypeSize(nodeType) == genTypeSize(sigType));
sigArg = info.compCompHnd->getArgNext(sigArg);
nodeArgs = nodeArgs->GetNext();
}
assert(nodeArgs == nullptr);
}
}
#endif // DEBUG
for (args = call->gtCallArgs; args != nullptr; args = args->GetNext(), argIndex++)
{
argx = args->GetNode()->gtSkipPutArgType();
// Change the node to TYP_I_IMPL so we don't report GC info
// NOTE: We deferred this from the importer because of the inliner.
if (argx->IsLocalAddrExpr() != nullptr)
{
argx->gtType = TYP_I_IMPL;
}
// We should never have any ArgPlaceHolder nodes at this point.
assert(!argx->IsArgPlaceHolderNode());
// Setup any HFA information about 'argx'
bool isHfaArg = false;
var_types hfaType = TYP_UNDEF;
unsigned hfaSlots = 0;
bool passUsingFloatRegs;
unsigned argAlignBytes = TARGET_POINTER_SIZE;
unsigned size = 0;
unsigned byteSize = 0;
if (GlobalJitOptions::compFeatureHfa)
{
hfaType = GetHfaType(argx);
isHfaArg = varTypeIsValidHfaType(hfaType);
#if defined(TARGET_ARM64)
if (TargetOS::IsWindows)
{
// Make sure for vararg methods isHfaArg is not true.
isHfaArg = callIsVararg ? false : isHfaArg;
}
#endif // defined(TARGET_ARM64)
if (isHfaArg)
{
isHfaArg = true;
hfaSlots = GetHfaCount(argx);
// If we have a HFA struct it's possible we transition from a method that originally
// only had integer types to now start having FP types. We have to communicate this
// through this flag since LSRA later on will use this flag to determine whether
// or not to track the FP register set.
//
compFloatingPointUsed = true;
}
}
const bool isFloatHfa = (hfaType == TYP_FLOAT);
#ifdef TARGET_ARM
passUsingFloatRegs = !callIsVararg && (isHfaArg || varTypeUsesFloatReg(argx)) && !opts.compUseSoftFP;
bool passUsingIntRegs = passUsingFloatRegs ? false : (intArgRegNum < MAX_REG_ARG);
// We don't use the "size" return value from InferOpSizeAlign().
codeGen->InferOpSizeAlign(argx, &argAlignBytes);
argAlignBytes = roundUp(argAlignBytes, TARGET_POINTER_SIZE);
if (argAlignBytes == 2 * TARGET_POINTER_SIZE)
{
if (passUsingFloatRegs)
{
if (fltArgRegNum % 2 == 1)
{
fltArgSkippedRegMask |= genMapArgNumToRegMask(fltArgRegNum, TYP_FLOAT);
fltArgRegNum++;
}
}
else if (passUsingIntRegs)
{
if (intArgRegNum % 2 == 1)
{
argSkippedRegMask |= genMapArgNumToRegMask(intArgRegNum, TYP_I_IMPL);
intArgRegNum++;
}
}
#if defined(DEBUG)
if (argSlots % 2 == 1)
{
argSlots++;
}
#endif
}
#elif defined(TARGET_ARM64)
assert(!callIsVararg || !isHfaArg);
passUsingFloatRegs = !callIsVararg && (isHfaArg || varTypeUsesFloatReg(argx));
#elif defined(TARGET_AMD64)
passUsingFloatRegs = varTypeIsFloating(argx);
#elif defined(TARGET_X86)
passUsingFloatRegs = false;
#elif defined(TARGET_LOONGARCH64)
assert(!callIsVararg && !isHfaArg);
passUsingFloatRegs = varTypeUsesFloatReg(argx);
DWORD floatFieldFlags = STRUCT_NO_FLOAT_FIELD;
#else
#error Unsupported or unset target architecture
#endif // TARGET*
bool isBackFilled = false;
unsigned nextFltArgRegNum = fltArgRegNum; // This is the next floating-point argument register number to use
var_types structBaseType = TYP_STRUCT;
unsigned structSize = 0;
bool passStructByRef = false;
bool isStructArg;
GenTree* actualArg = argx->gtEffectiveVal(true /* Commas only */);
//
// Figure out the size of the argument. This is either in number of registers, or number of
// TARGET_POINTER_SIZE stack slots, or the sum of these if the argument is split between the registers and
// the stack.
//
isStructArg = varTypeIsStruct(argx);
CORINFO_CLASS_HANDLE objClass = NO_CLASS_HANDLE;
if (isStructArg)
{
objClass = gtGetStructHandle(argx);
if (argx->TypeGet() == TYP_STRUCT)
{
// For TYP_STRUCT arguments we must have an OBJ, LCL_VAR or MKREFANY
switch (actualArg->OperGet())
{
case GT_OBJ:
structSize = actualArg->AsObj()->GetLayout()->GetSize();
assert(structSize == info.compCompHnd->getClassSize(objClass));
break;
case GT_LCL_VAR:
structSize = lvaGetDesc(actualArg->AsLclVarCommon())->lvExactSize;
break;
case GT_MKREFANY:
structSize = info.compCompHnd->getClassSize(objClass);
break;
default:
BADCODE("illegal argument tree in fgInitArgInfo");
break;
}
}
else
{
structSize = genTypeSize(argx);
assert(structSize == info.compCompHnd->getClassSize(objClass));
}
}
#if defined(TARGET_AMD64)
#ifdef UNIX_AMD64_ABI
if (!isStructArg)
{
size = 1; // On AMD64, all primitives fit in a single (64-bit) 'slot'
byteSize = genTypeSize(argx);
}
else
{
size = (unsigned)(roundUp(structSize, TARGET_POINTER_SIZE)) / TARGET_POINTER_SIZE;
byteSize = structSize;
eeGetSystemVAmd64PassStructInRegisterDescriptor(objClass, &structDesc);
}
#else // !UNIX_AMD64_ABI
size = 1; // On AMD64 Windows, all args fit in a single (64-bit) 'slot'
if (!isStructArg)
{
byteSize = genTypeSize(argx);
}
#endif // UNIX_AMD64_ABI
#elif defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)
if (isStructArg)
{
if (isHfaArg)
{
// HFA structs are passed by value in multiple registers.
// The "size" in registers may differ the size in pointer-sized units.
CORINFO_CLASS_HANDLE structHnd = gtGetStructHandle(argx);
size = GetHfaCount(structHnd);
byteSize = info.compCompHnd->getClassSize(structHnd);
}
else
{
// Structs are either passed in 1 or 2 (64-bit) slots.
// Structs that are the size of 2 pointers are passed by value in multiple registers,
// if sufficient registers are available.
// Structs that are larger than 2 pointers (except for HFAs) are passed by
// reference (to a copy)
size = (unsigned)(roundUp(structSize, TARGET_POINTER_SIZE)) / TARGET_POINTER_SIZE;
byteSize = structSize;
if (size > 2)
{
size = 1;
}
}
// Note that there are some additional rules for multireg structs.
// (i.e they cannot be split between registers and the stack)
}
else
{
size = 1; // Otherwise, all primitive types fit in a single (64-bit) 'slot'
byteSize = genTypeSize(argx);
}
#elif defined(TARGET_ARM) || defined(TARGET_X86)
if (isStructArg)
{
size = (unsigned)(roundUp(structSize, TARGET_POINTER_SIZE)) / TARGET_POINTER_SIZE;
byteSize = structSize;
}
else
{
// The typical case.
// Long/double type argument(s) will be modified as needed in Lowering.
size = genTypeStSz(argx->gtType);
byteSize = genTypeSize(argx);
}
#else
#error Unsupported or unset target architecture
#endif // TARGET_XXX
if (isStructArg)
{
assert(argx == args->GetNode());
assert(structSize != 0);
structPassingKind howToPassStruct;
structBaseType = getArgTypeForStruct(objClass, &howToPassStruct, callIsVararg, structSize);
passStructByRef = (howToPassStruct == SPK_ByReference);
#if defined(TARGET_LOONGARCH64)
if (!passStructByRef)
{
assert((howToPassStruct == SPK_ByValue) || (howToPassStruct == SPK_PrimitiveType));
floatFieldFlags = info.compCompHnd->getLoongArch64PassStructInRegisterFlags(objClass);
passUsingFloatRegs = (floatFieldFlags & STRUCT_HAS_FLOAT_FIELDS_MASK) ? true : false;
compFloatingPointUsed |= passUsingFloatRegs;
if ((floatFieldFlags & (STRUCT_HAS_FLOAT_FIELDS_MASK ^ STRUCT_FLOAT_FIELD_ONLY_ONE)) != 0)
{
// On LoongArch64, "getPrimitiveTypeForStruct" will incorrectly return "TYP_LONG"
// for "struct { float, float }", and retyping to a primitive here will cause the
// multi-reg morphing to not kick in (the struct in question needs to be passed in
// two FP registers). Here is just keep "structBaseType" as "TYP_STRUCT".
// TODO-LoongArch64: fix "getPrimitiveTypeForStruct" or use the ABI information in
// the arg entry instead of calling it here.
structBaseType = TYP_STRUCT;
}
if ((floatFieldFlags & (STRUCT_HAS_FLOAT_FIELDS_MASK ^ STRUCT_FLOAT_FIELD_ONLY_TWO)) != 0)
{
size = 1;
}
else if ((floatFieldFlags & STRUCT_FLOAT_FIELD_ONLY_TWO) != 0)
{
size = 2;
}
}
else // if (passStructByRef)
{
size = 1;
byteSize = TARGET_POINTER_SIZE;
}
#else
if (howToPassStruct == SPK_ByReference)
{
byteSize = TARGET_POINTER_SIZE;
}
else
{
byteSize = structSize;
}
if (howToPassStruct == SPK_PrimitiveType)
{
#ifdef TARGET_ARM
// TODO-CQ: getArgTypeForStruct should *not* return TYP_DOUBLE for a double struct,
// or for a struct of two floats. This causes the struct to be address-taken.
if (structBaseType == TYP_DOUBLE)
{
size = 2;
}
else
#endif // TARGET_ARM
{
size = 1;
}
}
else if (passStructByRef)
{
size = 1;
}
#endif
}
const var_types argType = args->GetNode()->TypeGet();
if (args->GetNode()->OperIs(GT_PUTARG_TYPE))
{
byteSize = genTypeSize(argType);
}
// The 'size' value has now must have been set. (the original value of zero is an invalid value)
assert(size != 0);
assert(byteSize != 0);
if (compMacOsArm64Abi())
{
// Arm64 Apple has a special ABI for passing small size arguments on stack,
// bytes are aligned to 1-byte, shorts to 2-byte, int/float to 4-byte, etc.
// It means passing 8 1-byte arguments on stack can take as small as 8 bytes.
argAlignBytes = eeGetArgSizeAlignment(argType, isFloatHfa);
}
#ifdef TARGET_LOONGARCH64
regNumber nextOtherRegNum = REG_STK;
#endif
//
// Figure out if the argument will be passed in a register.
//
bool isRegArg = false;
NonStandardArgKind nonStandardArgKind = NonStandardArgKind::None;
regNumber nonStdRegNum = REG_NA;
if (isRegParamType(genActualType(argx->TypeGet()))
#ifdef UNIX_AMD64_ABI
&& (!isStructArg || structDesc.passedInRegisters)
#elif defined(TARGET_X86)
|| (isStructArg && isTrivialPointerSizedStruct(objClass))
#endif
)
{
#ifdef TARGET_ARM
if (passUsingFloatRegs)
{
// First, see if it can be back-filled
if (!anyFloatStackArgs && // Is it legal to back-fill? (We haven't put any FP args on the stack yet)
(fltArgSkippedRegMask != RBM_NONE) && // Is there an available back-fill slot?
(size == 1)) // The size to back-fill is one float register
{
// Back-fill the register.
isBackFilled = true;
regMaskTP backFillBitMask = genFindLowestBit(fltArgSkippedRegMask);
fltArgSkippedRegMask &=
~backFillBitMask; // Remove the back-filled register(s) from the skipped mask
nextFltArgRegNum = genMapFloatRegNumToRegArgNum(genRegNumFromMask(backFillBitMask));
assert(nextFltArgRegNum < MAX_FLOAT_REG_ARG);
}
// Does the entire float, double, or HFA fit in the FP arg registers?
// Check if the last register needed is still in the argument register range.
isRegArg = (nextFltArgRegNum + size - 1) < MAX_FLOAT_REG_ARG;
if (!isRegArg)
{
anyFloatStackArgs = true;
}
}
else
{
isRegArg = intArgRegNum < MAX_REG_ARG;
}
#elif defined(TARGET_ARM64)
if (passUsingFloatRegs)
{
// Check if the last register needed is still in the fp argument register range.
isRegArg = (nextFltArgRegNum + (size - 1)) < MAX_FLOAT_REG_ARG;
// Do we have a HFA arg that we wanted to pass in registers, but we ran out of FP registers?
if (isHfaArg && !isRegArg)
{
// recompute the 'size' so that it represent the number of stack slots rather than the number of
// registers
//
unsigned roundupSize = (unsigned)roundUp(structSize, TARGET_POINTER_SIZE);
size = roundupSize / TARGET_POINTER_SIZE;
// We also must update fltArgRegNum so that we no longer try to
// allocate any new floating point registers for args
// This prevents us from backfilling a subsequent arg into d7
//
fltArgRegNum = MAX_FLOAT_REG_ARG;
}
}
else
{
// Check if the last register needed is still in the int argument register range.
isRegArg = (intArgRegNum + (size - 1)) < maxRegArgs;
// Did we run out of registers when we had a 16-byte struct (size===2) ?
// (i.e we only have one register remaining but we needed two registers to pass this arg)
// This prevents us from backfilling a subsequent arg into x7
//
if (!isRegArg && (size > 1))
{
// Arm64 windows native varargs allows splitting a 16 byte struct between stack
// and the last general purpose register.
if (TargetOS::IsWindows && callIsVararg)
{
// Override the decision and force a split.
isRegArg = (intArgRegNum + (size - 1)) <= maxRegArgs;
}
else
{
// We also must update intArgRegNum so that we no longer try to
// allocate any new general purpose registers for args
//
intArgRegNum = maxRegArgs;
}
}
}
#elif defined(TARGET_LOONGARCH64)
if (passUsingFloatRegs)
{
// Check if the last register needed is still in the fp argument register range.
passUsingFloatRegs = isRegArg = (nextFltArgRegNum + (size - 1)) < MAX_FLOAT_REG_ARG;
if (isStructArg)
{
if ((floatFieldFlags & (STRUCT_FLOAT_FIELD_FIRST | STRUCT_FLOAT_FIELD_SECOND)) &&
passUsingFloatRegs)
{
passUsingFloatRegs = isRegArg = intArgRegNum < maxRegArgs;
}
if (!passUsingFloatRegs)
{
size = structSize > 8 ? 2 : 1;
floatFieldFlags = 0;
}
else if (passUsingFloatRegs)
{
if ((floatFieldFlags & STRUCT_FLOAT_FIELD_ONLY_TWO) != 0)
{
nextOtherRegNum = genMapFloatRegArgNumToRegNum(nextFltArgRegNum + 1);
}
else if ((floatFieldFlags & STRUCT_FLOAT_FIELD_SECOND) != 0)
{
assert(size == 1);
size = 2;
passUsingFloatRegs = false;
nextOtherRegNum = genMapFloatRegArgNumToRegNum(nextFltArgRegNum);
}
else if ((floatFieldFlags & STRUCT_FLOAT_FIELD_FIRST) != 0)
{
assert(size == 1);
size = 2;
nextOtherRegNum = genMapIntRegArgNumToRegNum(intArgRegNum);
}
}
}
assert(!isHfaArg); // LoongArch64 does not support HFA.
}
// if we run out of floating-point argument registers, try the int argument registers.
if (!isRegArg)
{
// Check if the last register needed is still in the int argument register range.
isRegArg = (intArgRegNum + (size - 1)) < maxRegArgs;
if (!passUsingFloatRegs && isRegArg && (size > 1))
{
nextOtherRegNum = genMapIntRegArgNumToRegNum(intArgRegNum + 1);
}
// Did we run out of registers when we had a 16-byte struct (size===2) ?
// (i.e we only have one register remaining but we needed two registers to pass this arg)
//
if (!isRegArg && (size > 1))
{
// We also must update intArgRegNum so that we no longer try to
// allocate any new general purpose registers for args
//
isRegArg = intArgRegNum < maxRegArgs; // the split-struct case.
nextOtherRegNum = REG_STK;
}
}
#else // not TARGET_ARM or TARGET_ARM64 or TARGET_LOONGARCH64
#if defined(UNIX_AMD64_ABI)
// Here a struct can be passed in register following the classifications of its members and size.
// Now make sure there are actually enough registers to do so.
if (isStructArg)
{
unsigned int structFloatRegs = 0;
unsigned int structIntRegs = 0;
for (unsigned int i = 0; i < structDesc.eightByteCount; i++)
{
if (structDesc.IsIntegralSlot(i))
{
structIntRegs++;
}
else if (structDesc.IsSseSlot(i))
{
structFloatRegs++;
}
}
isRegArg = ((nextFltArgRegNum + structFloatRegs) <= MAX_FLOAT_REG_ARG) &&
((intArgRegNum + structIntRegs) <= MAX_REG_ARG);
}
else
{
if (passUsingFloatRegs)
{
isRegArg = nextFltArgRegNum < MAX_FLOAT_REG_ARG;
}
else
{
isRegArg = intArgRegNum < MAX_REG_ARG;
}
}
#else // !defined(UNIX_AMD64_ABI)
isRegArg = (intArgRegNum + (size - 1)) < maxRegArgs;
#endif // !defined(UNIX_AMD64_ABI)
#endif // TARGET_ARM
}
else
{
isRegArg = false;
}
// If there are nonstandard args (outside the calling convention) they were inserted above
// and noted them in a table so we can recognize them here and build their argInfo.
//
// They should not affect the placement of any other args or stack space required.
// Example: on AMD64 R10 and R11 are used for indirect VSD (generic interface) and cookie calls.
bool isNonStandard = nonStandardArgs.Find(argx, &nonStdRegNum, &nonStandardArgKind);
if (isNonStandard)
{
isRegArg = (nonStdRegNum != REG_STK);
}
else if (call->IsTailCallViaJitHelper())
{
// We have already (before calling fgMorphArgs()) appended the 4 special args
// required by the x86 tailcall helper. These args are required to go on the
// stack. Force them to the stack here.
assert(numArgs >= 4);
if (argIndex >= numArgs - 4)
{
isRegArg = false;
}
}
// Now we know if the argument goes in registers or not and how big it is.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_ARM
// If we ever allocate a floating point argument to the stack, then all
// subsequent HFA/float/double arguments go on the stack.
if (!isRegArg && passUsingFloatRegs)
{
for (; fltArgRegNum < MAX_FLOAT_REG_ARG; ++fltArgRegNum)
{
fltArgSkippedRegMask |= genMapArgNumToRegMask(fltArgRegNum, TYP_FLOAT);
}
}
// If we think we're going to split a struct between integer registers and the stack, check to
// see if we've already assigned a floating-point arg to the stack.
if (isRegArg && // We decided above to use a register for the argument
!passUsingFloatRegs && // We're using integer registers
(intArgRegNum + size > MAX_REG_ARG) && // We're going to split a struct type onto registers and stack
anyFloatStackArgs) // We've already used the stack for a floating-point argument
{
isRegArg = false; // Change our mind; don't pass this struct partially in registers
// Skip the rest of the integer argument registers
for (; intArgRegNum < MAX_REG_ARG; ++intArgRegNum)
{
argSkippedRegMask |= genMapArgNumToRegMask(intArgRegNum, TYP_I_IMPL);
}
}
#endif // TARGET_ARM
// Now create the fgArgTabEntry.
fgArgTabEntry* newArgEntry;
if (isRegArg)
{
regNumber nextRegNum = REG_STK;
#if defined(UNIX_AMD64_ABI)
regNumber nextOtherRegNum = REG_STK;
unsigned int structFloatRegs = 0;
unsigned int structIntRegs = 0;
#endif // defined(UNIX_AMD64_ABI)
if (isNonStandard)
{
nextRegNum = nonStdRegNum;
}
#if defined(UNIX_AMD64_ABI)
else if (isStructArg && structDesc.passedInRegisters)
{
// It is a struct passed in registers. Assign the next available register.
assert((structDesc.eightByteCount <= 2) && "Too many eightbytes.");
regNumber* nextRegNumPtrs[2] = {&nextRegNum, &nextOtherRegNum};
for (unsigned int i = 0; i < structDesc.eightByteCount; i++)
{
if (structDesc.IsIntegralSlot(i))
{
*nextRegNumPtrs[i] = genMapIntRegArgNumToRegNum(intArgRegNum + structIntRegs);
++structIntRegs;
}
else if (structDesc.IsSseSlot(i))
{
*nextRegNumPtrs[i] = genMapFloatRegArgNumToRegNum(nextFltArgRegNum + structFloatRegs);
++structFloatRegs;
}
}
}
#endif // defined(UNIX_AMD64_ABI)
else
{
// fill in or update the argInfo table
nextRegNum = passUsingFloatRegs ? genMapFloatRegArgNumToRegNum(nextFltArgRegNum)
: genMapIntRegArgNumToRegNum(intArgRegNum);
}
#ifdef TARGET_AMD64
#ifndef UNIX_AMD64_ABI
assert(size == 1);
#endif
#endif
// This is a register argument - put it in the table
newArgEntry =
call->fgArgInfo->AddRegArg(argIndex, argx, args, nextRegNum, size, byteSize, argAlignBytes, isStructArg,
isFloatHfa, callIsVararg UNIX_LOONGARCH64_ONLY_ARG(nextOtherRegNum)
UNIX_AMD64_ABI_ONLY_ARG(nextOtherRegNum)
UNIX_AMD64_ABI_ONLY_ARG(structIntRegs)
UNIX_AMD64_ABI_ONLY_ARG(structFloatRegs)
UNIX_AMD64_ABI_ONLY_ARG(&structDesc));
newArgEntry->SetIsBackFilled(isBackFilled);
// Set up the next intArgRegNum and fltArgRegNum values.
if (!isBackFilled)
{
#if defined(TARGET_LOONGARCH64)
// Increment intArgRegNum by 'size' registers
if (!isNonStandard)
{
if ((size > 1) && ((intArgRegNum + 1) == maxRegArgs) && (nextOtherRegNum == REG_STK))
{
assert(!passUsingFloatRegs);
assert(size == 2);
intArgRegNum = maxRegArgs;
}
else if ((floatFieldFlags & STRUCT_HAS_FLOAT_FIELDS_MASK) == 0x0)
{
if (passUsingFloatRegs)
{
fltArgRegNum += 1;
}
else
{
intArgRegNum += size;
}
}
else if ((floatFieldFlags & STRUCT_FLOAT_FIELD_ONLY_ONE) != 0)
{
structBaseType = structSize == 8 ? TYP_DOUBLE : TYP_FLOAT;
fltArgRegNum += 1;
newArgEntry->structFloatFieldType[0] = structBaseType;
}
else if ((floatFieldFlags & (STRUCT_FLOAT_FIELD_FIRST | STRUCT_FLOAT_FIELD_SECOND)) != 0)
{
fltArgRegNum += 1;
intArgRegNum += 1;
if ((floatFieldFlags & STRUCT_FLOAT_FIELD_FIRST) != 0)
{
newArgEntry->structFloatFieldType[0] =
(floatFieldFlags & STRUCT_FIRST_FIELD_SIZE_IS8) ? TYP_DOUBLE : TYP_FLOAT;
newArgEntry->structFloatFieldType[1] =
(floatFieldFlags & STRUCT_SECOND_FIELD_SIZE_IS8) ? TYP_LONG : TYP_INT;
}
else
{
newArgEntry->structFloatFieldType[0] =
(floatFieldFlags & STRUCT_FIRST_FIELD_SIZE_IS8) ? TYP_LONG : TYP_INT;
newArgEntry->structFloatFieldType[1] =
(floatFieldFlags & STRUCT_SECOND_FIELD_SIZE_IS8) ? TYP_DOUBLE : TYP_FLOAT;
}
}
else if ((floatFieldFlags & STRUCT_FLOAT_FIELD_ONLY_TWO) != 0)
{
fltArgRegNum += 2;
newArgEntry->structFloatFieldType[0] =
(floatFieldFlags & STRUCT_FIRST_FIELD_SIZE_IS8) ? TYP_DOUBLE : TYP_FLOAT;
newArgEntry->structFloatFieldType[1] =
(floatFieldFlags & STRUCT_SECOND_FIELD_SIZE_IS8) ? TYP_DOUBLE : TYP_FLOAT;
}
}
#else
#if defined(UNIX_AMD64_ABI)
if (isStructArg)
{
// For this case, we've already set the regNums in the argTabEntry
intArgRegNum += structIntRegs;
fltArgRegNum += structFloatRegs;
}
else
#endif // defined(UNIX_AMD64_ABI)
{
if (!isNonStandard)
{
#if FEATURE_ARG_SPLIT
// Check for a split (partially enregistered) struct
if (compFeatureArgSplit() && !passUsingFloatRegs && ((intArgRegNum + size) > MAX_REG_ARG))
{
// This indicates a partial enregistration of a struct type
assert((isStructArg) || argx->OperIs(GT_FIELD_LIST) || argx->OperIsCopyBlkOp() ||
(argx->gtOper == GT_COMMA && (argx->gtFlags & GTF_ASG)));
unsigned numRegsPartial = MAX_REG_ARG - intArgRegNum;
assert((unsigned char)numRegsPartial == numRegsPartial);
call->fgArgInfo->SplitArg(argIndex, numRegsPartial, size - numRegsPartial);
}
#endif // FEATURE_ARG_SPLIT
if (passUsingFloatRegs)
{
fltArgRegNum += size;
#ifdef WINDOWS_AMD64_ABI
// Whenever we pass an integer register argument
// we skip the corresponding floating point register argument
intArgRegNum = min(intArgRegNum + size, MAX_REG_ARG);
#endif // WINDOWS_AMD64_ABI
// No supported architecture supports partial structs using float registers.
assert(fltArgRegNum <= MAX_FLOAT_REG_ARG);
}
else
{
// Increment intArgRegNum by 'size' registers
intArgRegNum += size;
#ifdef WINDOWS_AMD64_ABI
fltArgRegNum = min(fltArgRegNum + size, MAX_FLOAT_REG_ARG);
#endif // WINDOWS_AMD64_ABI
}
}
}
#endif // defined(TARGET_LOONGARCH64)
}
}
else // We have an argument that is not passed in a register
{
// This is a stack argument - put it in the table
newArgEntry = call->fgArgInfo->AddStkArg(argIndex, argx, args, size, byteSize, argAlignBytes, isStructArg,
isFloatHfa, callIsVararg);
#ifdef UNIX_AMD64_ABI
// TODO-Amd64-Unix-CQ: This is temporary (see also in fgMorphArgs).
if (structDesc.passedInRegisters)
{
newArgEntry->structDesc.CopyFrom(structDesc);
}
#endif
}
newArgEntry->nonStandardArgKind = nonStandardArgKind;
if (GlobalJitOptions::compFeatureHfa)
{
if (isHfaArg)
{
newArgEntry->SetHfaType(hfaType, hfaSlots);
}
}
newArgEntry->SetMultiRegNums();
noway_assert(newArgEntry != nullptr);
if (newArgEntry->isStruct)
{
newArgEntry->passedByRef = passStructByRef;
newArgEntry->argType = (structBaseType == TYP_UNKNOWN) ? argx->TypeGet() : structBaseType;
}
else
{
newArgEntry->argType = argx->TypeGet();
}
DEBUG_ARG_SLOTS_ONLY(argSlots += size;)
} // end foreach argument loop
#ifdef DEBUG
if (verbose)
{
JITDUMP("ArgTable for %d.%s after fgInitArgInfo:\n", call->gtTreeID, GenTree::OpName(call->gtOper));
call->fgArgInfo->Dump(this);
JITDUMP("\n");
}
#endif
}
//------------------------------------------------------------------------
// fgMorphArgs: Walk and transform (morph) the arguments of a call
//
// Arguments:
// callNode - the call for which we are doing the argument morphing
//
// Return Value:
// Like most morph methods, this method returns the morphed node,
// though in this case there are currently no scenarios where the
// node itself is re-created.
//
// Notes:
// This calls fgInitArgInfo to create the 'fgArgInfo' for the call.
// If it has already been created, that method will simply return.
//
// This method changes the state of the call node. It uses the existence
// of gtCallLateArgs (the late arguments list) to determine if it has
// already done the first round of morphing.
//
// The first time it is called (i.e. during global morphing), this method
// computes the "late arguments". This is when it determines which arguments
// need to be evaluated to temps prior to the main argument setup, and which
// can be directly evaluated into the argument location. It also creates a
// second argument list (gtCallLateArgs) that does the final placement of the
// arguments, e.g. into registers or onto the stack.
//
// The "non-late arguments", aka the gtCallArgs, are doing the in-order
// evaluation of the arguments that might have side-effects, such as embedded
// assignments, calls or possible throws. In these cases, it and earlier
// arguments must be evaluated to temps.
//
// On targets with a fixed outgoing argument area (FEATURE_FIXED_OUT_ARGS),
// if we have any nested calls, we need to defer the copying of the argument
// into the fixed argument area until after the call. If the argument did not
// otherwise need to be computed into a temp, it is moved to gtCallLateArgs and
// replaced in the "early" arg list (gtCallArgs) with a placeholder node.
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
GenTreeCall* Compiler::fgMorphArgs(GenTreeCall* call)
{
GenTreeCall::Use* args;
GenTree* argx;
GenTreeFlags flagsSummary = GTF_EMPTY;
unsigned argIndex = 0;
DEBUG_ARG_SLOTS_ONLY(unsigned argSlots = 0;)
bool reMorphing = call->AreArgsComplete();
// Set up the fgArgInfo.
fgInitArgInfo(call);
JITDUMP("%sMorphing args for %d.%s:\n", (reMorphing) ? "Re" : "", call->gtTreeID, GenTree::OpName(call->gtOper));
// If we are remorphing, process the late arguments (which were determined by a previous caller).
if (reMorphing)
{
for (GenTreeCall::Use& use : call->LateArgs())
{
use.SetNode(fgMorphTree(use.GetNode()));
flagsSummary |= use.GetNode()->gtFlags;
}
assert(call->fgArgInfo != nullptr);
}
call->fgArgInfo->RemorphReset();
// First we morph the argument subtrees ('this' pointer, arguments, etc.).
// During the first call to fgMorphArgs we also record the
// information about late arguments we have in 'fgArgInfo'.
// This information is used later to contruct the gtCallLateArgs */
// Process the 'this' argument value, if present.
if (call->gtCallThisArg != nullptr)
{
argx = call->gtCallThisArg->GetNode();
fgArgTabEntry* thisArgEntry = call->fgArgInfo->GetArgEntry(0, reMorphing);
argx = fgMorphTree(argx);
call->gtCallThisArg->SetNode(argx);
// This is a register argument - possibly update it in the table.
call->fgArgInfo->UpdateRegArg(thisArgEntry, argx, reMorphing);
flagsSummary |= argx->gtFlags;
if (!reMorphing && call->IsExpandedEarly() && call->IsVirtualVtable())
{
if (!argx->OperIsLocal())
{
thisArgEntry->needTmp = true;
call->fgArgInfo->SetNeedsTemps();
}
}
assert(argIndex == 0);
argIndex++;
DEBUG_ARG_SLOTS_ONLY(argSlots++;)
}
// Note that this name is a bit of a misnomer - it indicates that there are struct args
// that occupy more than a single slot that are passed by value (not necessarily in regs).
bool hasMultiregStructArgs = false;
for (args = call->gtCallArgs; args != nullptr; args = args->GetNext(), argIndex++)
{
GenTree** parentArgx = &args->NodeRef();
fgArgTabEntry* argEntry = call->fgArgInfo->GetArgEntry(argIndex, reMorphing);
// Morph the arg node, and update the parent and argEntry pointers.
argx = *parentArgx;
argx = fgMorphTree(argx);
*parentArgx = argx;
assert(argx == args->GetNode());
DEBUG_ARG_SLOTS_ONLY(unsigned size = argEntry->getSize();)
CORINFO_CLASS_HANDLE copyBlkClass = NO_CLASS_HANDLE;
#if defined(DEBUG_ARG_SLOTS)
if (!compMacOsArm64Abi())
{
if (argEntry->GetByteAlignment() == 2 * TARGET_POINTER_SIZE)
{
if (argSlots % 2 == 1)
{
argSlots++;
}
}
}
#endif // DEBUG
if (argEntry->isNonStandard() && argEntry->isPassedInRegisters())
{
// We need to update the node field for this nonStandard arg here
// as it may have been changed by the call to fgMorphTree.
call->fgArgInfo->UpdateRegArg(argEntry, argx, reMorphing);
flagsSummary |= argx->gtFlags;
continue;
}
DEBUG_ARG_SLOTS_ASSERT(size != 0);
DEBUG_ARG_SLOTS_ONLY(argSlots += argEntry->getSlotCount();)
if (argx->IsLocalAddrExpr() != nullptr)
{
argx->gtType = TYP_I_IMPL;
}
// Get information about this argument.
var_types hfaType = argEntry->GetHfaType();
bool isHfaArg = (hfaType != TYP_UNDEF);
bool passUsingFloatRegs = argEntry->isPassedInFloatRegisters();
unsigned structSize = 0;
// Struct arguments may be morphed into a node that is not a struct type.
// In such case the fgArgTabEntry keeps track of whether the original node (before morphing)
// was a struct and the struct classification.
bool isStructArg = argEntry->isStruct;
GenTree* argObj = argx->gtEffectiveVal(true /*commaOnly*/);
if (isStructArg && varTypeIsStruct(argObj) && !argObj->OperIs(GT_ASG, GT_MKREFANY, GT_FIELD_LIST, GT_ARGPLACE))
{
CORINFO_CLASS_HANDLE objClass = gtGetStructHandle(argObj);
unsigned originalSize;
if (argObj->TypeGet() == TYP_STRUCT)
{
if (argObj->OperIs(GT_OBJ))
{
// Get the size off the OBJ node.
originalSize = argObj->AsObj()->GetLayout()->GetSize();
assert(originalSize == info.compCompHnd->getClassSize(objClass));
}
else
{
// We have a BADCODE assert for this in fgInitArgInfo.
assert(argObj->OperIs(GT_LCL_VAR));
originalSize = lvaGetDesc(argObj->AsLclVarCommon())->lvExactSize;
}
}
else
{
originalSize = genTypeSize(argx);
assert(originalSize == info.compCompHnd->getClassSize(objClass));
}
unsigned roundupSize = (unsigned)roundUp(originalSize, TARGET_POINTER_SIZE);
var_types structBaseType = argEntry->argType;
// First, handle the case where the argument is passed by reference.
if (argEntry->passedByRef)
{
DEBUG_ARG_SLOTS_ASSERT(size == 1);
copyBlkClass = objClass;
#ifdef UNIX_AMD64_ABI
assert(!"Structs are not passed by reference on x64/ux");
#endif // UNIX_AMD64_ABI
}
else // This is passed by value.
{
#if defined(TARGET_LOONGARCH64)
// For LoongArch64 the struct {float a; float b;} can be passed by two float registers.
DEBUG_ARG_SLOTS_ASSERT((size == roundupSize / TARGET_POINTER_SIZE) ||
((structBaseType == TYP_STRUCT) && (originalSize == TARGET_POINTER_SIZE) &&
(size == 2) && (size == argEntry->numRegs)));
#else
// Check to see if we can transform this into load of a primitive type.
// 'size' must be the number of pointer sized items
DEBUG_ARG_SLOTS_ASSERT(size == roundupSize / TARGET_POINTER_SIZE);
#endif
structSize = originalSize;
unsigned passingSize = originalSize;
// Check to see if we can transform this struct load (GT_OBJ) into a GT_IND of the appropriate size.
// When it can do this is platform-dependent:
// - In general, it can be done for power of 2 structs that fit in a single register.
// - For ARM and ARM64 it must also be a non-HFA struct, or have a single field.
// - This is irrelevant for X86, since structs are always passed by value on the stack.
GenTree* lclVar = fgIsIndirOfAddrOfLocal(argObj);
bool canTransform = false;
if (structBaseType != TYP_STRUCT)
{
if (isPow2(passingSize))
{
canTransform = (!argEntry->IsHfaArg() || (passingSize == genTypeSize(argEntry->GetHfaType())));
}
#if defined(TARGET_ARM64) || defined(UNIX_AMD64_ABI) || defined(TARGET_LOONGARCH64)
// For ARM64 or AMD64/UX we can pass non-power-of-2 structs in a register, but we can
// only transform in that case if the arg is a local.
// TODO-CQ: This transformation should be applicable in general, not just for the ARM64
// or UNIX_AMD64_ABI cases where they will be passed in registers.
else
{
canTransform = (lclVar != nullptr);
passingSize = genTypeSize(structBaseType);
}
#endif // TARGET_ARM64 || UNIX_AMD64_ABI || TARGET_LOONGARCH64
}
if (!canTransform)
{
#if defined(TARGET_AMD64)
#ifndef UNIX_AMD64_ABI
// On Windows structs are always copied and passed by reference (handled above) unless they are
// passed by value in a single register.
assert(size == 1);
copyBlkClass = objClass;
#else // UNIX_AMD64_ABI
// On Unix, structs are always passed by value.
// We only need a copy if we have one of the following:
// - The sizes don't match for a non-lclVar argument.
// - We have a known struct type (e.g. SIMD) that requires multiple registers.
// TODO-Amd64-Unix-Throughput: We don't need to keep the structDesc in the argEntry if it's not
// actually passed in registers.
if (argEntry->isPassedInRegisters())
{
if (argObj->OperIs(GT_OBJ))
{
if (passingSize != structSize)
{
copyBlkClass = objClass;
}
}
else if (lclVar == nullptr)
{
// This should only be the case of a value directly producing a known struct type.
assert(argObj->TypeGet() != TYP_STRUCT);
if (argEntry->numRegs > 1)
{
copyBlkClass = objClass;
}
}
}
#endif // UNIX_AMD64_ABI
#elif defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)
if ((passingSize != structSize) && (lclVar == nullptr))
{
copyBlkClass = objClass;
}
#endif
#ifdef TARGET_ARM
// TODO-1stClassStructs: Unify these conditions across targets.
if (((lclVar != nullptr) &&
(lvaGetPromotionType(lclVar->AsLclVarCommon()->GetLclNum()) == PROMOTION_TYPE_INDEPENDENT)) ||
((argObj->OperIs(GT_OBJ)) && (passingSize != structSize)))
{
copyBlkClass = objClass;
}
if (structSize < TARGET_POINTER_SIZE)
{
copyBlkClass = objClass;
}
#endif // TARGET_ARM
}
else
{
// We have a struct argument that fits into a register, and it is either a power of 2,
// or a local.
// Change our argument, as needed, into a value of the appropriate type.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef TARGET_ARM
DEBUG_ARG_SLOTS_ASSERT((size == 1) || ((structBaseType == TYP_DOUBLE) && (size == 2)));
#else
DEBUG_ARG_SLOTS_ASSERT((size == 1) || (varTypeIsSIMD(structBaseType) &&
size == (genTypeSize(structBaseType) / REGSIZE_BYTES)));
#endif
assert((structBaseType != TYP_STRUCT) && (genTypeSize(structBaseType) >= originalSize));
if (argObj->OperIs(GT_OBJ))
{
argObj->ChangeOper(GT_IND);
// Now see if we can fold *(&X) into X
if (argObj->AsOp()->gtOp1->gtOper == GT_ADDR)
{
GenTree* temp = argObj->AsOp()->gtOp1->AsOp()->gtOp1;
// Keep the DONT_CSE flag in sync
// (as the addr always marks it for its op1)
temp->gtFlags &= ~GTF_DONT_CSE;
temp->gtFlags |= (argObj->gtFlags & GTF_DONT_CSE);
DEBUG_DESTROY_NODE(argObj->AsOp()->gtOp1); // GT_ADDR
DEBUG_DESTROY_NODE(argObj); // GT_IND
argObj = temp;
*parentArgx = temp;
argx = temp;
}
}
if (argObj->gtOper == GT_LCL_VAR)
{
unsigned lclNum = argObj->AsLclVarCommon()->GetLclNum();
LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (varDsc->lvPromoted)
{
if (varDsc->lvFieldCnt == 1)
{
// get the first and only promoted field
LclVarDsc* fieldVarDsc = lvaGetDesc(varDsc->lvFieldLclStart);
if (genTypeSize(fieldVarDsc->TypeGet()) >= originalSize)
{
// we will use the first and only promoted field
argObj->AsLclVarCommon()->SetLclNum(varDsc->lvFieldLclStart);
if (varTypeIsEnregisterable(fieldVarDsc->TypeGet()) &&
(genTypeSize(fieldVarDsc->TypeGet()) == originalSize))
{
// Just use the existing field's type
argObj->gtType = fieldVarDsc->TypeGet();
}
else
{
// Can't use the existing field's type, so use GT_LCL_FLD to swizzle
// to a new type
lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::SwizzleArg));
argObj->ChangeOper(GT_LCL_FLD);
argObj->gtType = structBaseType;
}
assert(varTypeIsEnregisterable(argObj->TypeGet()));
assert(copyBlkClass == NO_CLASS_HANDLE);
}
else
{
// use GT_LCL_FLD to swizzle the single field struct to a new type
lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::SwizzleArg));
argObj->ChangeOper(GT_LCL_FLD);
argObj->gtType = structBaseType;
}
}
else
{
// The struct fits into a single register, but it has been promoted into its
// constituent fields, and so we have to re-assemble it
copyBlkClass = objClass;
}
}
else if (genTypeSize(varDsc->TypeGet()) != genTypeSize(structBaseType))
{
// Not a promoted struct, so just swizzle the type by using GT_LCL_FLD
lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::SwizzleArg));
argObj->ChangeOper(GT_LCL_FLD);
argObj->gtType = structBaseType;
}
}
else
{
// Not a GT_LCL_VAR, so we can just change the type on the node
argObj->gtType = structBaseType;
}
assert(varTypeIsEnregisterable(argObj->TypeGet()) ||
((copyBlkClass != NO_CLASS_HANDLE) && varTypeIsEnregisterable(structBaseType)));
}
#if !defined(UNIX_AMD64_ABI) && !defined(TARGET_ARMARCH) && !defined(TARGET_LOONGARCH64)
// TODO-CQ-XARCH: there is no need for a temp copy if we improve our code generation in
// `genPutStructArgStk` for xarch like we did it for Arm/Arm64.
// We still have a struct unless we converted the GT_OBJ into a GT_IND above...
if (isHfaArg && passUsingFloatRegs)
{
}
else if (structBaseType == TYP_STRUCT)
{
// If the valuetype size is not a multiple of TARGET_POINTER_SIZE,
// we must copyblk to a temp before doing the obj to avoid
// the obj reading memory past the end of the valuetype
CLANG_FORMAT_COMMENT_ANCHOR;
if (roundupSize > originalSize)
{
copyBlkClass = objClass;
// There are a few special cases where we can omit using a CopyBlk
// where we normally would need to use one.
if (argObj->OperIs(GT_OBJ) &&
argObj->AsObj()->gtGetOp1()->IsLocalAddrExpr() != nullptr) // Is the source a LclVar?
{
copyBlkClass = NO_CLASS_HANDLE;
}
}
}
#endif // !UNIX_AMD64_ABI
}
}
if (argEntry->isPassedInRegisters())
{
call->fgArgInfo->UpdateRegArg(argEntry, argx, reMorphing);
}
else
{
call->fgArgInfo->UpdateStkArg(argEntry, argx, reMorphing);
}
if (copyBlkClass != NO_CLASS_HANDLE)
{
fgMakeOutgoingStructArgCopy(call, args, copyBlkClass);
}
if (argx->gtOper == GT_MKREFANY)
{
// 'Lower' the MKREFANY tree and insert it.
noway_assert(!reMorphing);
#ifdef TARGET_X86
// Build the mkrefany as a GT_FIELD_LIST
GenTreeFieldList* fieldList = new (this, GT_FIELD_LIST) GenTreeFieldList();
fieldList->AddField(this, argx->AsOp()->gtGetOp1(), OFFSETOF__CORINFO_TypedReference__dataPtr, TYP_BYREF);
fieldList->AddField(this, argx->AsOp()->gtGetOp2(), OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL);
fgArgTabEntry* fp = gtArgEntryByNode(call, argx);
args->SetNode(fieldList);
assert(fp->GetNode() == fieldList);
#else // !TARGET_X86
// Get a new temp
// Here we don't need unsafe value cls check since the addr of temp is used only in mkrefany
unsigned tmp = lvaGrabTemp(true DEBUGARG("by-value mkrefany struct argument"));
lvaSetStruct(tmp, impGetRefAnyClass(), false);
// Build the mkrefany as a comma node:
// (tmp.ptr=argx),(tmp.type=handle)
GenTreeLclFld* destPtrSlot = gtNewLclFldNode(tmp, TYP_I_IMPL, OFFSETOF__CORINFO_TypedReference__dataPtr);
GenTreeLclFld* destTypeSlot = gtNewLclFldNode(tmp, TYP_I_IMPL, OFFSETOF__CORINFO_TypedReference__type);
destPtrSlot->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(GetRefanyDataField()));
destPtrSlot->gtFlags |= GTF_VAR_DEF;
destTypeSlot->SetFieldSeq(GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField()));
destTypeSlot->gtFlags |= GTF_VAR_DEF;
GenTree* asgPtrSlot = gtNewAssignNode(destPtrSlot, argx->AsOp()->gtOp1);
GenTree* asgTypeSlot = gtNewAssignNode(destTypeSlot, argx->AsOp()->gtOp2);
GenTree* asg = gtNewOperNode(GT_COMMA, TYP_VOID, asgPtrSlot, asgTypeSlot);
// Change the expression to "(tmp=val)"
args->SetNode(asg);
// EvalArgsToTemps will cause tmp to actually get loaded as the argument
call->fgArgInfo->EvalToTmp(argEntry, tmp, asg);
lvaSetVarAddrExposed(tmp DEBUGARG(AddressExposedReason::TOO_CONSERVATIVE));
#endif // !TARGET_X86
}
#if FEATURE_MULTIREG_ARGS
if (isStructArg)
{
if (((argEntry->numRegs + argEntry->GetStackSlotsNumber()) > 1) ||
(isHfaArg && argx->TypeGet() == TYP_STRUCT))
{
hasMultiregStructArgs = true;
}
}
#ifdef TARGET_ARM
else if ((argEntry->argType == TYP_LONG) || (argEntry->argType == TYP_DOUBLE))
{
assert((argEntry->numRegs == 2) || (argEntry->numSlots == 2));
}
#endif
else
{
// We must have exactly one register or slot.
assert(((argEntry->numRegs == 1) && (argEntry->GetStackSlotsNumber() == 0)) ||
((argEntry->numRegs == 0) && (argEntry->GetStackSlotsNumber() == 1)));
}
#endif
#if defined(TARGET_X86)
if (isStructArg)
{
GenTree* lclNode = argx->OperIs(GT_LCL_VAR) ? argx : fgIsIndirOfAddrOfLocal(argx);
if ((lclNode != nullptr) &&
(lvaGetPromotionType(lclNode->AsLclVarCommon()->GetLclNum()) == Compiler::PROMOTION_TYPE_INDEPENDENT))
{
// Make a GT_FIELD_LIST of the field lclVars.
GenTreeLclVarCommon* lcl = lclNode->AsLclVarCommon();
LclVarDsc* varDsc = lvaGetDesc(lcl);
GenTreeFieldList* fieldList = new (this, GT_FIELD_LIST) GenTreeFieldList();
fgArgTabEntry* fp = gtArgEntryByNode(call, argx);
args->SetNode(fieldList);
assert(fp->GetNode() == fieldList);
for (unsigned fieldLclNum = varDsc->lvFieldLclStart;
fieldLclNum < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++fieldLclNum)
{
LclVarDsc* fieldVarDsc = lvaGetDesc(fieldLclNum);
GenTree* fieldLcl;
if (fieldLclNum == varDsc->lvFieldLclStart)
{
lcl->SetLclNum(fieldLclNum);
lcl->SetOperResetFlags(GT_LCL_VAR);
lcl->gtType = fieldVarDsc->TypeGet();
fieldLcl = lcl;
}
else
{
fieldLcl = gtNewLclvNode(fieldLclNum, fieldVarDsc->TypeGet());
}
fieldList->AddField(this, fieldLcl, fieldVarDsc->lvFldOffset, fieldVarDsc->TypeGet());
}
}
}
#endif // TARGET_X86
flagsSummary |= args->GetNode()->gtFlags;
} // end foreach argument loop
if (!reMorphing)
{
call->fgArgInfo->ArgsComplete();
}
/* Process the function address, if indirect call */
if (call->gtCallType == CT_INDIRECT)
{
call->gtCallAddr = fgMorphTree(call->gtCallAddr);
// Const CSE may create an assignment node here
flagsSummary |= call->gtCallAddr->gtFlags;
}
#if FEATURE_FIXED_OUT_ARGS
// Record the outgoing argument size. If the call is a fast tail
// call, it will setup its arguments in incoming arg area instead
// of the out-going arg area, so we don't need to track the
// outgoing arg size.
if (!call->IsFastTailCall())
{
#if defined(UNIX_AMD64_ABI)
// This is currently required for the UNIX ABI to work correctly.
opts.compNeedToAlignFrame = true;
#endif // UNIX_AMD64_ABI
const unsigned outgoingArgSpaceSize = GetOutgoingArgByteSize(call->fgArgInfo->GetNextSlotByteOffset());
#if defined(DEBUG_ARG_SLOTS)
unsigned preallocatedArgCount = 0;
if (!compMacOsArm64Abi())
{
preallocatedArgCount = call->fgArgInfo->GetNextSlotNum();
assert(outgoingArgSpaceSize == preallocatedArgCount * REGSIZE_BYTES);
}
#endif
call->fgArgInfo->SetOutArgSize(max(outgoingArgSpaceSize, MIN_ARG_AREA_FOR_CALL));
#ifdef DEBUG
if (verbose)
{
const fgArgInfo* argInfo = call->fgArgInfo;
#if defined(DEBUG_ARG_SLOTS)
if (!compMacOsArm64Abi())
{
printf("argSlots=%d, preallocatedArgCount=%d, nextSlotNum=%d, nextSlotByteOffset=%d, "
"outgoingArgSpaceSize=%d\n",
argSlots, preallocatedArgCount, argInfo->GetNextSlotNum(), argInfo->GetNextSlotByteOffset(),
outgoingArgSpaceSize);
}
else
{
printf("nextSlotByteOffset=%d, outgoingArgSpaceSize=%d\n", argInfo->GetNextSlotByteOffset(),
outgoingArgSpaceSize);
}
#else
printf("nextSlotByteOffset=%d, outgoingArgSpaceSize=%d\n", argInfo->GetNextSlotByteOffset(),
outgoingArgSpaceSize);
#endif
}
#endif
}
#endif // FEATURE_FIXED_OUT_ARGS
// Clear the ASG and EXCEPT (if possible) flags on the call node
call->gtFlags &= ~GTF_ASG;
if (!call->OperMayThrow(this))
{
call->gtFlags &= ~GTF_EXCEPT;
}
// Union in the side effect flags from the call's operands
call->gtFlags |= flagsSummary & GTF_ALL_EFFECT;
// If we are remorphing or don't have any register arguments or other arguments that need
// temps, then we don't need to call SortArgs() and EvalArgsToTemps().
//
if (!reMorphing && (call->fgArgInfo->HasRegArgs() || call->fgArgInfo->NeedsTemps()))
{
// Do the 'defer or eval to temp' analysis.
call->fgArgInfo->SortArgs();
call->fgArgInfo->EvalArgsToTemps();
}
if (hasMultiregStructArgs)
{
fgMorphMultiregStructArgs(call);
}
#ifdef DEBUG
if (verbose)
{
JITDUMP("ArgTable for %d.%s after fgMorphArgs:\n", call->gtTreeID, GenTree::OpName(call->gtOper));
call->fgArgInfo->Dump(this);
JITDUMP("\n");
}
#endif
return call;
}
#ifdef _PREFAST_
#pragma warning(pop)
#endif
//-----------------------------------------------------------------------------
// fgMorphMultiregStructArgs: Locate the TYP_STRUCT arguments and
// call fgMorphMultiregStructArg on each of them.
//
// Arguments:
// call : a GenTreeCall node that has one or more TYP_STRUCT arguments\.
//
// Notes:
// We only call fgMorphMultiregStructArg for struct arguments that are not passed as simple types.
// It will ensure that the struct arguments are in the correct form.
// If this method fails to find any TYP_STRUCT arguments it will assert.
//
void Compiler::fgMorphMultiregStructArgs(GenTreeCall* call)
{
bool foundStructArg = false;
GenTreeFlags flagsSummary = GTF_EMPTY;
#ifdef TARGET_X86
assert(!"Logic error: no MultiregStructArgs for X86");
#endif
#if defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)
assert(!"Logic error: no MultiregStructArgs for Windows X64 ABI");
#endif
for (GenTreeCall::Use& use : call->Args())
{
// For late arguments the arg tree that is overridden is in the gtCallLateArgs list.
// For such late args the gtCallArgList contains the setup arg node (evaluating the arg.)
// The tree from the gtCallLateArgs list is passed to the callee. The fgArgEntry node contains the mapping
// between the nodes in both lists. If the arg is not a late arg, the fgArgEntry->node points to itself,
// otherwise points to the list in the late args list.
bool isLateArg = (use.GetNode()->gtFlags & GTF_LATE_ARG) != 0;
fgArgTabEntry* fgEntryPtr = gtArgEntryByNode(call, use.GetNode());
assert(fgEntryPtr != nullptr);
GenTree* argx = fgEntryPtr->GetNode();
GenTreeCall::Use* lateUse = nullptr;
GenTree* lateNode = nullptr;
if (isLateArg)
{
for (GenTreeCall::Use& lateArgUse : call->LateArgs())
{
GenTree* argNode = lateArgUse.GetNode();
if (argx == argNode)
{
lateUse = &lateArgUse;
lateNode = argNode;
break;
}
}
assert((lateUse != nullptr) && (lateNode != nullptr));
}
if (!fgEntryPtr->isStruct)
{
continue;
}
unsigned size = (fgEntryPtr->numRegs + fgEntryPtr->GetStackSlotsNumber());
if ((size > 1) || (fgEntryPtr->IsHfaArg() && argx->TypeGet() == TYP_STRUCT))
{
foundStructArg = true;
if (varTypeIsStruct(argx) && !argx->OperIs(GT_FIELD_LIST))
{
if (fgEntryPtr->IsHfaRegArg())
{
var_types hfaType = fgEntryPtr->GetHfaType();
unsigned structSize;
if (argx->OperIs(GT_OBJ))
{
structSize = argx->AsObj()->GetLayout()->GetSize();
}
else if (varTypeIsSIMD(argx))
{
structSize = genTypeSize(argx);
}
else
{
assert(argx->OperIs(GT_LCL_VAR));
structSize = lvaGetDesc(argx->AsLclVar())->lvExactSize;
}
assert(structSize > 0);
if (structSize == genTypeSize(hfaType))
{
if (argx->OperIs(GT_OBJ))
{
argx->SetOper(GT_IND);
}
argx->gtType = hfaType;
}
}
GenTree* newArgx = fgMorphMultiregStructArg(argx, fgEntryPtr);
// Did we replace 'argx' with a new tree?
if (newArgx != argx)
{
// link the new arg node into either the late arg list or the gtCallArgs list
if (isLateArg)
{
lateUse->SetNode(newArgx);
}
else
{
use.SetNode(newArgx);
}
assert(fgEntryPtr->GetNode() == newArgx);
}
}
}
}
// We should only call this method when we actually have one or more multireg struct args
assert(foundStructArg);
// Update the flags
call->gtFlags |= (flagsSummary & GTF_ALL_EFFECT);
}
//-----------------------------------------------------------------------------
// fgMorphMultiregStructArg: Given a TYP_STRUCT arg from a call argument list,
// morph the argument as needed to be passed correctly.
//
// Arguments:
// arg - A GenTree node containing a TYP_STRUCT arg
// fgEntryPtr - the fgArgTabEntry information for the current 'arg'
//
// Notes:
// The arg must be a GT_OBJ or GT_LCL_VAR or GT_LCL_FLD of TYP_STRUCT.
// If 'arg' is a lclVar passed on the stack, we will ensure that any lclVars that must be on the
// stack are marked as doNotEnregister, and then we return.
//
// If it is passed by register, we mutate the argument into the GT_FIELD_LIST form
// which is only used for struct arguments.
//
// If arg is a LclVar we check if it is struct promoted and has the right number of fields
// and if they are at the appropriate offsets we will use the struct promted fields
// in the GT_FIELD_LIST nodes that we create.
// If we have a GT_LCL_VAR that isn't struct promoted or doesn't meet the requirements
// we will use a set of GT_LCL_FLDs nodes to access the various portions of the struct
// this also forces the struct to be stack allocated into the local frame.
// For the GT_OBJ case will clone the address expression and generate two (or more)
// indirections.
//
GenTree* Compiler::fgMorphMultiregStructArg(GenTree* arg, fgArgTabEntry* fgEntryPtr)
{
assert(varTypeIsStruct(arg->TypeGet()));
#if !defined(TARGET_ARMARCH) && !defined(UNIX_AMD64_ABI) && !defined(TARGET_LOONGARCH64)
NYI("fgMorphMultiregStructArg requires implementation for this target");
#endif
#ifdef TARGET_ARM
if ((fgEntryPtr->IsSplit() && fgEntryPtr->GetStackSlotsNumber() + fgEntryPtr->numRegs > 4) ||
(!fgEntryPtr->IsSplit() && fgEntryPtr->GetRegNum() == REG_STK))
#else
if (fgEntryPtr->GetRegNum() == REG_STK)
#endif
{
GenTreeLclVarCommon* lcl = nullptr;
GenTree* actualArg = arg->gtEffectiveVal();
if (actualArg->OperGet() == GT_OBJ)
{
if (actualArg->gtGetOp1()->OperIs(GT_ADDR) && actualArg->gtGetOp1()->gtGetOp1()->OperIs(GT_LCL_VAR))
{
lcl = actualArg->gtGetOp1()->gtGetOp1()->AsLclVarCommon();
}
}
else if (actualArg->OperGet() == GT_LCL_VAR)
{
lcl = actualArg->AsLclVarCommon();
}
if (lcl != nullptr)
{
if (lvaGetPromotionType(lcl->GetLclNum()) == PROMOTION_TYPE_INDEPENDENT)
{
arg = fgMorphLclArgToFieldlist(lcl);
}
else if (arg->TypeGet() == TYP_STRUCT)
{
// If this is a non-register struct, it must be referenced from memory.
if (!actualArg->OperIs(GT_OBJ))
{
// Create an Obj of the temp to use it as a call argument.
arg = gtNewOperNode(GT_ADDR, TYP_I_IMPL, arg);
arg = gtNewObjNode(lvaGetStruct(lcl->GetLclNum()), arg);
}
// Its fields will need to be accessed by address.
lvaSetVarDoNotEnregister(lcl->GetLclNum() DEBUG_ARG(DoNotEnregisterReason::IsStructArg));
}
}
return arg;
}
#if FEATURE_MULTIREG_ARGS
// Examine 'arg' and setup argValue objClass and structSize
//
const CORINFO_CLASS_HANDLE objClass = gtGetStructHandle(arg);
GenTree* argValue = arg; // normally argValue will be arg, but see right below
unsigned structSize = 0;
if (arg->TypeGet() != TYP_STRUCT)
{
structSize = genTypeSize(arg->TypeGet());
assert(structSize == info.compCompHnd->getClassSize(objClass));
}
else if (arg->OperGet() == GT_OBJ)
{
GenTreeObj* argObj = arg->AsObj();
const ClassLayout* objLayout = argObj->GetLayout();
structSize = objLayout->GetSize();
assert(structSize == info.compCompHnd->getClassSize(objClass));
// If we have a GT_OBJ of a GT_ADDR then we set argValue to the child node of the GT_ADDR.
GenTree* op1 = argObj->gtOp1;
if (op1->OperGet() == GT_ADDR)
{
GenTree* underlyingTree = op1->AsOp()->gtOp1;
// Only update to the same type.
if (underlyingTree->OperIs(GT_LCL_VAR))
{
const LclVarDsc* varDsc = lvaGetDesc(underlyingTree->AsLclVar());
if (ClassLayout::AreCompatible(varDsc->GetLayout(), objLayout))
{
argValue = underlyingTree;
}
}
}
}
else if (arg->OperGet() == GT_LCL_VAR)
{
LclVarDsc* varDsc = lvaGetDesc(arg->AsLclVarCommon());
structSize = varDsc->lvExactSize;
assert(structSize == info.compCompHnd->getClassSize(objClass));
}
else
{
structSize = info.compCompHnd->getClassSize(objClass);
}
var_types hfaType = TYP_UNDEF;
var_types elemType = TYP_UNDEF;
unsigned elemCount = 0;
unsigned elemSize = 0;
var_types type[MAX_ARG_REG_COUNT] = {}; // TYP_UNDEF = 0
hfaType = fgEntryPtr->GetHfaType();
if (varTypeIsValidHfaType(hfaType) && fgEntryPtr->isPassedInFloatRegisters())
{
elemType = hfaType;
elemSize = genTypeSize(elemType);
elemCount = structSize / elemSize;
assert(elemSize * elemCount == structSize);
for (unsigned inx = 0; inx < elemCount; inx++)
{
type[inx] = elemType;
}
}
else
{
assert(structSize <= MAX_ARG_REG_COUNT * TARGET_POINTER_SIZE);
BYTE gcPtrs[MAX_ARG_REG_COUNT];
info.compCompHnd->getClassGClayout(objClass, &gcPtrs[0]);
elemCount = roundUp(structSize, TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE;
#ifdef TARGET_LOONGARCH64
// For LoongArch64's ABI, the struct which size is TARGET_POINTER_SIZE
// may be passed by two registers.
// e.g `struct {int a; float b;}` passed by an integer register and a float register.
if (fgEntryPtr->numRegs == 2)
{
elemCount = 2;
}
#endif
for (unsigned inx = 0; inx < elemCount; inx++)
{
#if defined(UNIX_AMD64_ABI)
if (gcPtrs[inx] == TYPE_GC_NONE)
{
type[inx] = GetTypeFromClassificationAndSizes(fgEntryPtr->structDesc.eightByteClassifications[inx],
fgEntryPtr->structDesc.eightByteSizes[inx]);
}
else
#elif defined(TARGET_LOONGARCH64)
if (varTypeIsFloating(fgEntryPtr->structFloatFieldType[inx]) ||
(genTypeSize(fgEntryPtr->structFloatFieldType[inx]) == 4))
{
type[inx] = fgEntryPtr->structFloatFieldType[inx];
}
else
#endif // TARGET_LOONGARCH64
{
type[inx] = getJitGCType(gcPtrs[inx]);
}
}
#ifndef UNIX_AMD64_ABI
if ((argValue->OperGet() == GT_LCL_FLD) || (argValue->OperGet() == GT_LCL_VAR))
{
elemSize = TARGET_POINTER_SIZE;
// We can safely widen this to aligned bytes since we are loading from
// a GT_LCL_VAR or a GT_LCL_FLD which is properly padded and
// lives in the stack frame or will be a promoted field.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifndef TARGET_LOONGARCH64
// For LoongArch64's ABI, the struct which size is TARGET_POINTER_SIZE
// may be passed by two registers.
// e.g `struct {int a; float b;}` passed by an integer register and a float register.
structSize = elemCount * TARGET_POINTER_SIZE;
#endif
}
else // we must have a GT_OBJ
{
assert(argValue->OperGet() == GT_OBJ);
// We need to load the struct from an arbitrary address
// and we can't read past the end of the structSize
// We adjust the last load type here
//
unsigned remainingBytes = structSize % TARGET_POINTER_SIZE;
unsigned lastElem = elemCount - 1;
if (remainingBytes != 0)
{
switch (remainingBytes)
{
case 1:
type[lastElem] = TYP_BYTE;
break;
case 2:
type[lastElem] = TYP_SHORT;
break;
#if defined(TARGET_ARM64) || defined(UNIX_AMD64_ABI) || defined(TARGET_LOONGARCH64)
case 4:
type[lastElem] = TYP_INT;
break;
#endif // (TARGET_ARM64) || (UNIX_AMD64_ABI) || (TARGET_LOONGARCH64)
default:
noway_assert(!"NYI: odd sized struct in fgMorphMultiregStructArg");
break;
}
}
}
#endif // !UNIX_AMD64_ABI
}
// We should still have a TYP_STRUCT
assert(varTypeIsStruct(argValue->TypeGet()));
GenTreeFieldList* newArg = nullptr;
// Are we passing a struct LclVar?
//
if (argValue->OperGet() == GT_LCL_VAR)
{
GenTreeLclVarCommon* varNode = argValue->AsLclVarCommon();
unsigned varNum = varNode->GetLclNum();
LclVarDsc* varDsc = lvaGetDesc(varNum);
// At this point any TYP_STRUCT LclVar must be an aligned struct
// or an HFA struct, both which are passed by value.
//
assert((varDsc->lvSize() == elemCount * TARGET_POINTER_SIZE) || varDsc->lvIsHfa());
varDsc->lvIsMultiRegArg = true;
#ifdef DEBUG
if (verbose)
{
JITDUMP("Multireg struct argument V%02u : ", varNum);
fgEntryPtr->Dump();
}
#endif // DEBUG
#ifndef UNIX_AMD64_ABI
// This local variable must match the layout of the 'objClass' type exactly
if (varDsc->lvIsHfa() && fgEntryPtr->isPassedInFloatRegisters())
{
// We have a HFA struct.
noway_assert(elemType == varDsc->GetHfaType());
noway_assert(elemSize == genTypeSize(elemType));
noway_assert(elemCount == (varDsc->lvExactSize / elemSize));
noway_assert(elemSize * elemCount == varDsc->lvExactSize);
for (unsigned inx = 0; (inx < elemCount); inx++)
{
noway_assert(type[inx] == elemType);
}
}
else
{
#if defined(TARGET_ARM64)
// We must have a 16-byte struct (non-HFA)
noway_assert(elemCount == 2);
#elif defined(TARGET_ARM)
noway_assert(elemCount <= 4);
#endif
for (unsigned inx = 0; inx < elemCount; inx++)
{
var_types currentGcLayoutType = varDsc->GetLayout()->GetGCPtrType(inx);
// We setup the type[inx] value above using the GC info from 'objClass'
// This GT_LCL_VAR must have the same GC layout info
//
if (varTypeIsGC(currentGcLayoutType))
{
noway_assert(type[inx] == currentGcLayoutType);
}
else
{
// We may have use a small type when we setup the type[inx] values above
// We can safely widen this to TYP_I_IMPL
type[inx] = TYP_I_IMPL;
}
}
}
if (varDsc->lvPromoted && varDsc->lvIsHfa() && fgEntryPtr->isPassedInFloatRegisters())
{
bool canMorphToFieldList = true;
for (unsigned fldOffset = 0; fldOffset < varDsc->lvExactSize; fldOffset += elemSize)
{
const unsigned fldVarNum = lvaGetFieldLocal(varDsc, fldOffset);
if ((fldVarNum == BAD_VAR_NUM) || !varTypeUsesFloatReg(lvaGetDesc(fldVarNum)))
{
canMorphToFieldList = false;
break;
}
}
if (canMorphToFieldList)
{
newArg = fgMorphLclArgToFieldlist(varNode);
}
}
else
#endif // !UNIX_AMD64_ABI
#if defined(TARGET_ARM64) || defined(UNIX_AMD64_ABI) || defined(TARGET_LOONGARCH64)
// Is this LclVar a promoted struct with exactly 2 fields?
if (varDsc->lvPromoted && (varDsc->lvFieldCnt == 2) && !varDsc->lvIsHfa())
{
// See if we have two promoted fields that start at offset 0 and 8?
unsigned loVarNum = lvaGetFieldLocal(varDsc, 0);
unsigned hiVarNum = lvaGetFieldLocal(varDsc, TARGET_POINTER_SIZE);
// Did we find the promoted fields at the necessary offsets?
if ((loVarNum != BAD_VAR_NUM) && (hiVarNum != BAD_VAR_NUM))
{
LclVarDsc* loVarDsc = lvaGetDesc(loVarNum);
LclVarDsc* hiVarDsc = lvaGetDesc(hiVarNum);
var_types loType = loVarDsc->lvType;
var_types hiType = hiVarDsc->lvType;
if ((varTypeIsFloating(loType) != genIsValidFloatReg(fgEntryPtr->GetRegNum(0))) ||
(varTypeIsFloating(hiType) != genIsValidFloatReg(fgEntryPtr->GetRegNum(1))))
{
// TODO-LSRA - It currently doesn't support the passing of floating point LCL_VARS in the integer
// registers. So for now we will use GT_LCLFLD's to pass this struct (it won't be enregistered)
//
JITDUMP("Multireg struct V%02u will be passed using GT_LCLFLD because it has float fields.\n",
varNum);
//
// we call lvaSetVarDoNotEnregister and do the proper transformation below.
//
}
else
{
// We can use the struct promoted field as the two arguments
// Create a new tree for 'arg'
// replace the existing LDOBJ(ADDR(LCLVAR))
// with a FIELD_LIST(LCLVAR-LO, FIELD_LIST(LCLVAR-HI, nullptr))
//
newArg = new (this, GT_FIELD_LIST) GenTreeFieldList();
newArg->AddField(this, gtNewLclvNode(loVarNum, loType), 0, loType);
newArg->AddField(this, gtNewLclvNode(hiVarNum, hiType), TARGET_POINTER_SIZE, hiType);
}
}
}
else
{
//
// We will create a list of GT_LCL_FLDs nodes to pass this struct
//
lvaSetVarDoNotEnregister(varNum DEBUG_ARG(DoNotEnregisterReason::LocalField));
}
#elif defined(TARGET_ARM)
// Is this LclVar a promoted struct with exactly same size?
if (varDsc->lvPromoted && (varDsc->lvFieldCnt == elemCount) && !varDsc->lvIsHfa())
{
// See if we have promoted fields?
unsigned varNums[4];
bool hasBadVarNum = false;
for (unsigned inx = 0; inx < elemCount; inx++)
{
varNums[inx] = lvaGetFieldLocal(varDsc, TARGET_POINTER_SIZE * inx);
if (varNums[inx] == BAD_VAR_NUM)
{
hasBadVarNum = true;
break;
}
}
// Did we find the promoted fields at the necessary offsets?
if (!hasBadVarNum)
{
LclVarDsc* varDscs[4];
var_types varType[4];
bool varIsFloat = false;
for (unsigned inx = 0; inx < elemCount; inx++)
{
varDscs[inx] = lvaGetDesc(varNums[inx]);
varType[inx] = varDscs[inx]->lvType;
if (varTypeIsFloating(varType[inx]))
{
// TODO-LSRA - It currently doesn't support the passing of floating point LCL_VARS in the
// integer
// registers. So for now we will use GT_LCLFLD's to pass this struct (it won't be enregistered)
//
JITDUMP("Multireg struct V%02u will be passed using GT_LCLFLD because it has float fields.\n",
varNum);
//
// we call lvaSetVarDoNotEnregister and do the proper transformation below.
//
varIsFloat = true;
break;
}
}
if (!varIsFloat)
{
newArg = fgMorphLclArgToFieldlist(varNode);
}
}
}
else
{
//
// We will create a list of GT_LCL_FLDs nodes to pass this struct
//
lvaSetVarDoNotEnregister(varNum DEBUG_ARG(DoNotEnregisterReason::LocalField));
}
#endif // TARGET_ARM
}
// If we didn't set newarg to a new List Node tree
//
if (newArg == nullptr)
{
if (fgEntryPtr->GetRegNum() == REG_STK)
{
// We leave this stack passed argument alone
return arg;
}
// Are we passing a GT_LCL_FLD (or a GT_LCL_VAR that was not struct promoted )
// A GT_LCL_FLD could also contain a 16-byte struct or HFA struct inside it?
//
if ((argValue->OperGet() == GT_LCL_FLD) || (argValue->OperGet() == GT_LCL_VAR))
{
GenTreeLclVarCommon* varNode = argValue->AsLclVarCommon();
unsigned varNum = varNode->GetLclNum();
LclVarDsc* varDsc = lvaGetDesc(varNum);
unsigned baseOffset = varNode->GetLclOffs();
unsigned lastOffset = baseOffset + structSize;
// The allocated size of our LocalVar must be at least as big as lastOffset
assert(varDsc->lvSize() >= lastOffset);
if (varDsc->HasGCPtr())
{
// alignment of the baseOffset is required
noway_assert((baseOffset % TARGET_POINTER_SIZE) == 0);
#ifndef UNIX_AMD64_ABI
noway_assert(elemSize == TARGET_POINTER_SIZE);
#endif
unsigned baseIndex = baseOffset / TARGET_POINTER_SIZE;
ClassLayout* layout = varDsc->GetLayout();
for (unsigned inx = 0; (inx < elemCount); inx++)
{
// The GC information must match what we setup using 'objClass'
if (layout->IsGCPtr(baseIndex + inx) || varTypeGCtype(type[inx]))
{
noway_assert(type[inx] == layout->GetGCPtrType(baseIndex + inx));
}
}
}
else // this varDsc contains no GC pointers
{
for (unsigned inx = 0; inx < elemCount; inx++)
{
// The GC information must match what we setup using 'objClass'
noway_assert(!varTypeIsGC(type[inx]));
}
}
//
// We create a list of GT_LCL_FLDs nodes to pass this struct
//
lvaSetVarDoNotEnregister(varNum DEBUG_ARG(DoNotEnregisterReason::LocalField));
// Create a new tree for 'arg'
// replace the existing LDOBJ(ADDR(LCLVAR))
// with a FIELD_LIST(LCLFLD-LO, LCLFLD-HI)
//
unsigned offset = baseOffset;
newArg = new (this, GT_FIELD_LIST) GenTreeFieldList();
for (unsigned inx = 0; inx < elemCount; inx++)
{
GenTree* nextLclFld = gtNewLclFldNode(varNum, type[inx], offset);
newArg->AddField(this, nextLclFld, offset, type[inx]);
#ifdef TARGET_LOONGARCH64
if (structSize > TARGET_POINTER_SIZE)
{
// For LoongArch64's ABI, maybe there is a padding.
// e.g. `struct {float a; long b;}`
offset += TARGET_POINTER_SIZE;
}
else
#endif
{
offset += genTypeSize(type[inx]);
}
}
}
// Are we passing a GT_OBJ struct?
//
else if (argValue->OperGet() == GT_OBJ)
{
GenTreeObj* argObj = argValue->AsObj();
GenTree* baseAddr = argObj->gtOp1;
var_types addrType = baseAddr->TypeGet();
if (baseAddr->OperGet() == GT_ADDR)
{
GenTree* addrTaken = baseAddr->AsOp()->gtOp1;
if (addrTaken->IsLocal())
{
GenTreeLclVarCommon* varNode = addrTaken->AsLclVarCommon();
unsigned varNum = varNode->GetLclNum();
// We access non-struct type (for example, long) as a struct type.
// Make sure lclVar lives on stack to make sure its fields are accessible by address.
lvaSetVarDoNotEnregister(varNum DEBUGARG(DoNotEnregisterReason::LocalField));
}
}
// Create a new tree for 'arg'
// replace the existing LDOBJ(EXPR)
// with a FIELD_LIST(IND(EXPR), FIELD_LIST(IND(EXPR+8), nullptr) ...)
//
newArg = new (this, GT_FIELD_LIST) GenTreeFieldList();
unsigned offset = 0;
for (unsigned inx = 0; inx < elemCount; inx++)
{
GenTree* curAddr = baseAddr;
if (offset != 0)
{
GenTree* baseAddrDup = gtCloneExpr(baseAddr);
noway_assert(baseAddrDup != nullptr);
curAddr = gtNewOperNode(GT_ADD, addrType, baseAddrDup, gtNewIconNode(offset, TYP_I_IMPL));
}
else
{
curAddr = baseAddr;
}
GenTree* curItem = gtNewIndir(type[inx], curAddr);
// For safety all GT_IND should have at least GT_GLOB_REF set.
curItem->gtFlags |= GTF_GLOB_REF;
newArg->AddField(this, curItem, offset, type[inx]);
#ifdef TARGET_LOONGARCH64
if (structSize > TARGET_POINTER_SIZE)
{
// For LoongArch64's ABI, maybe there is a padding.
// e.g. `struct {float a; long b;}`
offset += TARGET_POINTER_SIZE;
}
else
#endif
{
offset += genTypeSize(type[inx]);
}
}
}
}
#ifdef DEBUG
// If we reach here we should have set newArg to something
if (newArg == nullptr)
{
gtDispTree(argValue);
assert(!"Missing case in fgMorphMultiregStructArg");
}
#endif
noway_assert(newArg != nullptr);
#ifdef DEBUG
if (verbose)
{
printf("fgMorphMultiregStructArg created tree:\n");
gtDispTree(newArg);
}
#endif
arg = newArg; // consider calling fgMorphTree(newArg);
#endif // FEATURE_MULTIREG_ARGS
return arg;
}
//------------------------------------------------------------------------
// fgMorphLclArgToFieldlist: Morph a GT_LCL_VAR node to a GT_FIELD_LIST of its promoted fields
//
// Arguments:
// lcl - The GT_LCL_VAR node we will transform
//
// Return value:
// The new GT_FIELD_LIST that we have created.
//
GenTreeFieldList* Compiler::fgMorphLclArgToFieldlist(GenTreeLclVarCommon* lcl)
{
LclVarDsc* varDsc = lvaGetDesc(lcl);
assert(varDsc->lvPromoted);
unsigned fieldCount = varDsc->lvFieldCnt;
unsigned fieldLclNum = varDsc->lvFieldLclStart;
GenTreeFieldList* fieldList = new (this, GT_FIELD_LIST) GenTreeFieldList();
for (unsigned i = 0; i < fieldCount; i++)
{
LclVarDsc* fieldVarDsc = lvaGetDesc(fieldLclNum);
GenTree* lclVar = gtNewLclvNode(fieldLclNum, fieldVarDsc->TypeGet());
fieldList->AddField(this, lclVar, fieldVarDsc->lvFldOffset, fieldVarDsc->TypeGet());
fieldLclNum++;
}
return fieldList;
}
//------------------------------------------------------------------------
// fgMakeOutgoingStructArgCopy: make a copy of a struct variable if necessary,
// to pass to a callee.
//
// Arguments:
// call - call being processed
// args - args for the call
// copyBlkClass - class handle for the struct
//
// The arg is updated if necessary with the copy.
//
void Compiler::fgMakeOutgoingStructArgCopy(GenTreeCall* call, GenTreeCall::Use* args, CORINFO_CLASS_HANDLE copyBlkClass)
{
GenTree* argx = args->GetNode();
noway_assert(argx->gtOper != GT_MKREFANY);
fgArgTabEntry* argEntry = Compiler::gtArgEntryByNode(call, argx);
// If we're optimizing, see if we can avoid making a copy.
//
// We don't need a copy if this is the last use of an implicit by-ref local.
//
if (opts.OptimizationEnabled())
{
GenTreeLclVar* const lcl = argx->IsImplicitByrefParameterValue(this);
if (lcl != nullptr)
{
const unsigned varNum = lcl->GetLclNum();
LclVarDsc* const varDsc = lvaGetDesc(varNum);
const unsigned short totalAppearances = varDsc->lvRefCnt(RCS_EARLY);
// We don't have liveness so we rely on other indications of last use.
//
// We handle these cases:
//
// * (must not copy) If the call is a tail call, the use is a last use.
// We must skip the copy if we have a fast tail call.
//
// * (may not copy) if the call is noreturn, the use is a last use.
// We also check for just one reference here as we are not doing
// alias analysis of the call's parameters, or checking if the call
// site is not within some try region.
//
// * (may not copy) if there is exactly one use of the local in the method,
// and the call is not in loop, this is a last use.
//
// fgMightHaveLoop() is expensive; check it last, only if necessary.
//
if (call->IsTailCall() || //
((totalAppearances == 1) && call->IsNoReturn()) || //
((totalAppearances == 1) && !fgMightHaveLoop()))
{
args->SetNode(lcl);
assert(argEntry->GetNode() == lcl);
JITDUMP("did not need to make outgoing copy for last use of implicit byref V%2d\n", varNum);
return;
}
}
}
JITDUMP("making an outgoing copy for struct arg\n");
if (fgOutgoingArgTemps == nullptr)
{
fgOutgoingArgTemps = hashBv::Create(this);
}
unsigned tmp = 0;
bool found = false;
// Attempt to find a local we have already used for an outgoing struct and reuse it.
// We do not reuse within a statement.
if (!opts.MinOpts())
{
indexType lclNum;
FOREACH_HBV_BIT_SET(lclNum, fgOutgoingArgTemps)
{
LclVarDsc* varDsc = lvaGetDesc((unsigned)lclNum);
if (typeInfo::AreEquivalent(varDsc->lvVerTypeInfo, typeInfo(TI_STRUCT, copyBlkClass)) &&
!fgCurrentlyInUseArgTemps->testBit(lclNum))
{
tmp = (unsigned)lclNum;
found = true;
JITDUMP("reusing outgoing struct arg");
break;
}
}
NEXT_HBV_BIT_SET;
}
// Create the CopyBlk tree and insert it.
if (!found)
{
// Get a new temp
// Here We don't need unsafe value cls check, since the addr of this temp is used only in copyblk.
tmp = lvaGrabTemp(true DEBUGARG("by-value struct argument"));
lvaSetStruct(tmp, copyBlkClass, false);
if (call->IsVarargs())
{
lvaSetStructUsedAsVarArg(tmp);
}
fgOutgoingArgTemps->setBit(tmp);
}
fgCurrentlyInUseArgTemps->setBit(tmp);
// TYP_SIMD structs should not be enregistered, since ABI requires it to be
// allocated on stack and address of it needs to be passed.
if (lclVarIsSIMDType(tmp))
{
// TODO: check if we need this block here or other parts already deal with it.
lvaSetVarDoNotEnregister(tmp DEBUGARG(DoNotEnregisterReason::IsStructArg));
}
// Create a reference to the temp
GenTree* dest = gtNewLclvNode(tmp, lvaTable[tmp].lvType);
dest->gtFlags |= (GTF_DONT_CSE | GTF_VAR_DEF); // This is a def of the local, "entire" by construction.
// Copy the valuetype to the temp
GenTree* copyBlk = gtNewBlkOpNode(dest, argx, false /* not volatile */, true /* copyBlock */);
copyBlk = fgMorphCopyBlock(copyBlk);
#if FEATURE_FIXED_OUT_ARGS
// Do the copy early, and evalute the temp later (see EvalArgsToTemps)
// When on Unix create LCL_FLD for structs passed in more than one registers. See fgMakeTmpArgNode
GenTree* arg = copyBlk;
#else // FEATURE_FIXED_OUT_ARGS
// Structs are always on the stack, and thus never need temps
// so we have to put the copy and temp all into one expression.
argEntry->tmpNum = tmp;
GenTree* arg = fgMakeTmpArgNode(argEntry);
// Change the expression to "(tmp=val),tmp"
arg = gtNewOperNode(GT_COMMA, arg->TypeGet(), copyBlk, arg);
#endif // FEATURE_FIXED_OUT_ARGS
args->SetNode(arg);
call->fgArgInfo->EvalToTmp(argEntry, tmp, arg);
}
#ifdef TARGET_ARM
// See declaration for specification comment.
void Compiler::fgAddSkippedRegsInPromotedStructArg(LclVarDsc* varDsc,
unsigned firstArgRegNum,
regMaskTP* pArgSkippedRegMask)
{
assert(varDsc->lvPromoted);
// There's no way to do these calculations without breaking abstraction and assuming that
// integer register arguments are consecutive ints. They are on ARM.
// To start, figure out what register contains the last byte of the first argument.
LclVarDsc* firstFldVarDsc = lvaGetDesc(varDsc->lvFieldLclStart);
unsigned lastFldRegOfLastByte =
(firstFldVarDsc->lvFldOffset + firstFldVarDsc->lvExactSize - 1) / TARGET_POINTER_SIZE;
;
// Now we're keeping track of the register that the last field ended in; see what registers
// subsequent fields start in, and whether any are skipped.
// (We assume here the invariant that the fields are sorted in offset order.)
for (unsigned fldVarOffset = 1; fldVarOffset < varDsc->lvFieldCnt; fldVarOffset++)
{
unsigned fldVarNum = varDsc->lvFieldLclStart + fldVarOffset;
LclVarDsc* fldVarDsc = lvaGetDesc(fldVarNum);
unsigned fldRegOffset = fldVarDsc->lvFldOffset / TARGET_POINTER_SIZE;
assert(fldRegOffset >= lastFldRegOfLastByte); // Assuming sorted fields.
// This loop should enumerate the offsets of any registers skipped.
// Find what reg contains the last byte:
// And start at the first register after that. If that isn't the first reg of the current
for (unsigned skippedRegOffsets = lastFldRegOfLastByte + 1; skippedRegOffsets < fldRegOffset;
skippedRegOffsets++)
{
// If the register number would not be an arg reg, we're done.
if (firstArgRegNum + skippedRegOffsets >= MAX_REG_ARG)
return;
*pArgSkippedRegMask |= genRegMask(regNumber(firstArgRegNum + skippedRegOffsets));
}
lastFldRegOfLastByte = (fldVarDsc->lvFldOffset + fldVarDsc->lvExactSize - 1) / TARGET_POINTER_SIZE;
}
}
#endif // TARGET_ARM
/*****************************************************************************
*
* A little helper used to rearrange nested commutative operations. The
* effect is that nested associative, commutative operations are transformed
* into a 'left-deep' tree, i.e. into something like this:
*
* (((a op b) op c) op d) op...
*/
#if REARRANGE_ADDS
void Compiler::fgMoveOpsLeft(GenTree* tree)
{
GenTree* op1;
GenTree* op2;
genTreeOps oper;
do
{
op1 = tree->AsOp()->gtOp1;
op2 = tree->AsOp()->gtOp2;
oper = tree->OperGet();
noway_assert(GenTree::OperIsCommutative(oper));
noway_assert(oper == GT_ADD || oper == GT_XOR || oper == GT_OR || oper == GT_AND || oper == GT_MUL);
noway_assert(!varTypeIsFloating(tree->TypeGet()) || !opts.genFPorder);
noway_assert(oper == op2->gtOper);
// Commutativity doesn't hold if overflow checks are needed
if (tree->gtOverflowEx() || op2->gtOverflowEx())
{
return;
}
if (gtIsActiveCSE_Candidate(op2))
{
// If we have marked op2 as a CSE candidate,
// we can't perform a commutative reordering
// because any value numbers that we computed for op2
// will be incorrect after performing a commutative reordering
//
return;
}
if (oper == GT_MUL && (op2->gtFlags & GTF_MUL_64RSLT))
{
return;
}
// Check for GTF_ADDRMODE_NO_CSE flag on add/mul Binary Operators
if (((oper == GT_ADD) || (oper == GT_MUL)) && ((tree->gtFlags & GTF_ADDRMODE_NO_CSE) != 0))
{
return;
}
if ((tree->gtFlags | op2->gtFlags) & GTF_BOOLEAN)
{
// We could deal with this, but we were always broken and just hit the assert
// below regarding flags, which means it's not frequent, so will just bail out.
// See #195514
return;
}
noway_assert(!tree->gtOverflowEx() && !op2->gtOverflowEx());
GenTree* ad1 = op2->AsOp()->gtOp1;
GenTree* ad2 = op2->AsOp()->gtOp2;
// Compiler::optOptimizeBools() can create GT_OR of two GC pointers yeilding a GT_INT
// We can not reorder such GT_OR trees
//
if (varTypeIsGC(ad1->TypeGet()) != varTypeIsGC(op2->TypeGet()))
{
break;
}
// Don't split up a byref calculation and create a new byref. E.g.,
// [byref]+ (ref, [int]+ (int, int)) => [byref]+ ([byref]+ (ref, int), int).
// Doing this transformation could create a situation where the first
// addition (that is, [byref]+ (ref, int) ) creates a byref pointer that
// no longer points within the ref object. If a GC happens, the byref won't
// get updated. This can happen, for instance, if one of the int components
// is negative. It also requires the address generation be in a fully-interruptible
// code region.
//
if (varTypeIsGC(op1->TypeGet()) && op2->TypeGet() == TYP_I_IMPL)
{
assert(varTypeIsGC(tree->TypeGet()) && (oper == GT_ADD));
break;
}
/* Change "(x op (y op z))" to "(x op y) op z" */
/* ie. "(op1 op (ad1 op ad2))" to "(op1 op ad1) op ad2" */
GenTree* new_op1 = op2;
new_op1->AsOp()->gtOp1 = op1;
new_op1->AsOp()->gtOp2 = ad1;
/* Change the flags. */
// Make sure we arent throwing away any flags
noway_assert((new_op1->gtFlags &
~(GTF_MAKE_CSE | GTF_DONT_CSE | // It is ok that new_op1->gtFlags contains GTF_DONT_CSE flag.
GTF_REVERSE_OPS | // The reverse ops flag also can be set, it will be re-calculated
GTF_NODE_MASK | GTF_ALL_EFFECT | GTF_UNSIGNED)) == 0);
new_op1->gtFlags =
(new_op1->gtFlags & (GTF_NODE_MASK | GTF_DONT_CSE)) | // Make sure we propagate GTF_DONT_CSE flag.
(op1->gtFlags & GTF_ALL_EFFECT) | (ad1->gtFlags & GTF_ALL_EFFECT);
/* Retype new_op1 if it has not/become a GC ptr. */
if (varTypeIsGC(op1->TypeGet()))
{
noway_assert((varTypeIsGC(tree->TypeGet()) && op2->TypeGet() == TYP_I_IMPL &&
oper == GT_ADD) || // byref(ref + (int+int))
(varTypeIsI(tree->TypeGet()) && op2->TypeGet() == TYP_I_IMPL &&
oper == GT_OR)); // int(gcref | int(gcref|intval))
new_op1->gtType = tree->gtType;
}
else if (varTypeIsGC(ad2->TypeGet()))
{
// Neither ad1 nor op1 are GC. So new_op1 isnt either
noway_assert(op1->gtType == TYP_I_IMPL && ad1->gtType == TYP_I_IMPL);
new_op1->gtType = TYP_I_IMPL;
}
// If new_op1 is a new expression. Assign it a new unique value number.
// vnStore is null before the ValueNumber phase has run
if (vnStore != nullptr)
{
// We can only keep the old value number on new_op1 if both op1 and ad2
// have the same non-NoVN value numbers. Since op is commutative, comparing
// only ad2 and op1 is enough.
if ((op1->gtVNPair.GetLiberal() == ValueNumStore::NoVN) ||
(ad2->gtVNPair.GetLiberal() == ValueNumStore::NoVN) ||
(ad2->gtVNPair.GetLiberal() != op1->gtVNPair.GetLiberal()))
{
new_op1->gtVNPair.SetBoth(vnStore->VNForExpr(nullptr, new_op1->TypeGet()));
}
}
tree->AsOp()->gtOp1 = new_op1;
tree->AsOp()->gtOp2 = ad2;
/* If 'new_op1' is now the same nested op, process it recursively */
if ((ad1->gtOper == oper) && !ad1->gtOverflowEx())
{
fgMoveOpsLeft(new_op1);
}
/* If 'ad2' is now the same nested op, process it
* Instead of recursion, we set up op1 and op2 for the next loop.
*/
op1 = new_op1;
op2 = ad2;
} while ((op2->gtOper == oper) && !op2->gtOverflowEx());
return;
}
#endif
/*****************************************************************************/
void Compiler::fgSetRngChkTarget(GenTree* tree, bool delay)
{
if (tree->OperIs(GT_BOUNDS_CHECK))
{
GenTreeBoundsChk* const boundsChk = tree->AsBoundsChk();
BasicBlock* const failBlock = fgSetRngChkTargetInner(boundsChk->gtThrowKind, delay);
if (failBlock != nullptr)
{
boundsChk->gtIndRngFailBB = failBlock;
}
}
else if (tree->OperIs(GT_INDEX_ADDR))
{
GenTreeIndexAddr* const indexAddr = tree->AsIndexAddr();
BasicBlock* const failBlock = fgSetRngChkTargetInner(SCK_RNGCHK_FAIL, delay);
if (failBlock != nullptr)
{
indexAddr->gtIndRngFailBB = failBlock;
}
}
else
{
noway_assert(tree->OperIs(GT_ARR_ELEM, GT_ARR_INDEX));
fgSetRngChkTargetInner(SCK_RNGCHK_FAIL, delay);
}
}
BasicBlock* Compiler::fgSetRngChkTargetInner(SpecialCodeKind kind, bool delay)
{
if (opts.MinOpts())
{
delay = false;
}
if (!opts.compDbgCode)
{
if (!delay && !compIsForInlining())
{
// Create/find the appropriate "range-fail" label
return fgRngChkTarget(compCurBB, kind);
}
}
return nullptr;
}
/*****************************************************************************
*
* Expand a GT_INDEX node and fully morph the child operands
*
* The orginal GT_INDEX node is bashed into the GT_IND node that accesses
* the array element. We expand the GT_INDEX node into a larger tree that
* evaluates the array base and index. The simplest expansion is a GT_COMMA
* with a GT_BOUNDS_CHECK and a GT_IND with a GTF_INX_RNGCHK flag.
* For complex array or index expressions one or more GT_COMMA assignments
* are inserted so that we only evaluate the array or index expressions once.
*
* The fully expanded tree is then morphed. This causes gtFoldExpr to
* perform local constant prop and reorder the constants in the tree and
* fold them.
*
* We then parse the resulting array element expression in order to locate
* and label the constants and variables that occur in the tree.
*/
const int MAX_ARR_COMPLEXITY = 4;
const int MAX_INDEX_COMPLEXITY = 4;
GenTree* Compiler::fgMorphArrayIndex(GenTree* tree)
{
noway_assert(tree->gtOper == GT_INDEX);
GenTreeIndex* asIndex = tree->AsIndex();
var_types elemTyp = asIndex->TypeGet();
unsigned elemSize = asIndex->gtIndElemSize;
CORINFO_CLASS_HANDLE elemStructType = asIndex->gtStructElemClass;
noway_assert(elemTyp != TYP_STRUCT || elemStructType != NO_CLASS_HANDLE);
// Fold "cns_str"[cns_index] to ushort constant
// NOTE: don't do it for empty string, the operation will fail anyway
if (opts.OptimizationEnabled() && asIndex->Arr()->OperIs(GT_CNS_STR) &&
!asIndex->Arr()->AsStrCon()->IsStringEmptyField() && asIndex->Index()->IsIntCnsFitsInI32())
{
const int cnsIndex = static_cast<int>(asIndex->Index()->AsIntConCommon()->IconValue());
if (cnsIndex >= 0)
{
int length;
const char16_t* str = info.compCompHnd->getStringLiteral(asIndex->Arr()->AsStrCon()->gtScpHnd,
asIndex->Arr()->AsStrCon()->gtSconCPX, &length);
if ((cnsIndex < length) && (str != nullptr))
{
GenTree* cnsCharNode = gtNewIconNode(str[cnsIndex], TYP_INT);
INDEBUG(cnsCharNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
return cnsCharNode;
}
}
}
#ifdef FEATURE_SIMD
if (varTypeIsStruct(elemTyp) && structSizeMightRepresentSIMDType(elemSize))
{
// If this is a SIMD type, this is the point at which we lose the type information,
// so we need to set the correct type on the GT_IND.
// (We don't care about the base type here, so we only check, but don't retain, the return value).
unsigned simdElemSize = 0;
if (getBaseJitTypeAndSizeOfSIMDType(elemStructType, &simdElemSize) != CORINFO_TYPE_UNDEF)
{
assert(simdElemSize == elemSize);
elemTyp = getSIMDTypeForSize(elemSize);
// This is the new type of the node.
tree->gtType = elemTyp;
// Now set elemStructType to null so that we don't confuse value numbering.
elemStructType = NO_CLASS_HANDLE;
}
}
#endif // FEATURE_SIMD
// Set up the array length's offset into lenOffs
// And the first element's offset into elemOffs
ssize_t lenOffs;
uint8_t elemOffs;
if (tree->gtFlags & GTF_INX_STRING_LAYOUT)
{
lenOffs = OFFSETOF__CORINFO_String__stringLen;
elemOffs = OFFSETOF__CORINFO_String__chars;
tree->gtFlags &= ~GTF_INX_STRING_LAYOUT; // Clear this flag as it is used for GTF_IND_VOLATILE
}
else
{
// We have a standard array
lenOffs = OFFSETOF__CORINFO_Array__length;
elemOffs = OFFSETOF__CORINFO_Array__data;
}
// In minopts, we expand GT_INDEX to GT_IND(GT_INDEX_ADDR) in order to minimize the size of the IR. As minopts
// compilation time is roughly proportional to the size of the IR, this helps keep compilation times down.
// Furthermore, this representation typically saves on code size in minopts w.r.t. the complete expansion
// performed when optimizing, as it does not require LclVar nodes (which are always stack loads/stores in
// minopts).
//
// When we *are* optimizing, we fully expand GT_INDEX to:
// 1. Evaluate the array address expression and store the result in a temp if the expression is complex or
// side-effecting.
// 2. Evaluate the array index expression and store the result in a temp if the expression is complex or
// side-effecting.
// 3. Perform an explicit bounds check: GT_BOUNDS_CHECK(index, GT_ARR_LENGTH(array))
// 4. Compute the address of the element that will be accessed:
// GT_ADD(GT_ADD(array, firstElementOffset), GT_MUL(index, elementSize)) OR
// GT_ADD(GT_ADD(array, GT_ADD(GT_MUL(index, elementSize), firstElementOffset)))
// 5. Wrap the address in a GT_ADD_ADDR (the information saved there will later be used by VN).
// 6. Dereference the address with a GT_IND.
//
// This expansion explicitly exposes the bounds check and the address calculation to the optimizer, which allows
// for more straightforward bounds-check removal, CSE, etc.
if (opts.MinOpts())
{
GenTree* const array = fgMorphTree(asIndex->Arr());
GenTree* const index = fgMorphTree(asIndex->Index());
GenTreeIndexAddr* const indexAddr = new (this, GT_INDEX_ADDR)
GenTreeIndexAddr(array, index, elemTyp, elemStructType, elemSize, static_cast<unsigned>(lenOffs), elemOffs);
indexAddr->gtFlags |= (array->gtFlags | index->gtFlags) & GTF_ALL_EFFECT;
// Mark the indirection node as needing a range check if necessary.
// Note this will always be true unless JitSkipArrayBoundCheck() is used
if ((indexAddr->gtFlags & GTF_INX_RNGCHK) != 0)
{
fgSetRngChkTarget(indexAddr);
}
if (!tree->TypeIs(TYP_STRUCT))
{
tree->ChangeOper(GT_IND);
}
else
{
DEBUG_DESTROY_NODE(tree);
tree = gtNewObjNode(elemStructType, indexAddr);
INDEBUG(tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
}
GenTreeIndir* const indir = tree->AsIndir();
indir->Addr() = indexAddr;
bool canCSE = indir->CanCSE();
indir->gtFlags = indexAddr->gtFlags & GTF_ALL_EFFECT;
if (!canCSE)
{
indir->SetDoNotCSE();
}
INDEBUG(indexAddr->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
return indir;
}
GenTree* arrRef = asIndex->Arr();
GenTree* index = asIndex->Index();
bool chkd = ((tree->gtFlags & GTF_INX_RNGCHK) != 0); // if false, range checking will be disabled
bool indexNonFaulting = ((tree->gtFlags & GTF_INX_NOFAULT) != 0); // if true, mark GTF_IND_NONFAULTING
bool nCSE = ((tree->gtFlags & GTF_DONT_CSE) != 0);
GenTree* arrRefDefn = nullptr; // non-NULL if we need to allocate a temp for the arrRef expression
GenTree* indexDefn = nullptr; // non-NULL if we need to allocate a temp for the index expression
GenTree* bndsChk = nullptr;
// If we're doing range checking, introduce a GT_BOUNDS_CHECK node for the address.
if (chkd)
{
GenTree* arrRef2 = nullptr; // The second copy will be used in array address expression
GenTree* index2 = nullptr;
// If the arrRef or index expressions involves an assignment, a call, or reads from global memory,
// then we *must* allocate a temporary in which to "localize" those values, to ensure that the
// same values are used in the bounds check and the actual dereference.
// Also we allocate the temporary when the expression is sufficiently complex/expensive.
//
// Note that if the expression is a GT_FIELD, it has not yet been morphed so its true complexity is
// not exposed. Without that condition there are cases of local struct fields that were previously,
// needlessly, marked as GTF_GLOB_REF, and when that was fixed, there were some regressions that
// were mostly ameliorated by adding this condition.
//
// Likewise, allocate a temporary if the expression is a GT_LCL_FLD node. These used to be created
// after fgMorphArrayIndex from GT_FIELD trees so this preserves the existing behavior. This is
// perhaps a decision that should be left to CSE but FX diffs show that it is slightly better to
// do this here.
if ((arrRef->gtFlags & (GTF_ASG | GTF_CALL | GTF_GLOB_REF)) ||
gtComplexityExceeds(&arrRef, MAX_ARR_COMPLEXITY) || arrRef->OperIs(GT_FIELD, GT_LCL_FLD))
{
unsigned arrRefTmpNum = lvaGrabTemp(true DEBUGARG("arr expr"));
arrRefDefn = gtNewTempAssign(arrRefTmpNum, arrRef);
arrRef = gtNewLclvNode(arrRefTmpNum, arrRef->TypeGet());
arrRef2 = gtNewLclvNode(arrRefTmpNum, arrRef->TypeGet());
}
else
{
arrRef2 = gtCloneExpr(arrRef);
noway_assert(arrRef2 != nullptr);
}
if ((index->gtFlags & (GTF_ASG | GTF_CALL | GTF_GLOB_REF)) || gtComplexityExceeds(&index, MAX_ARR_COMPLEXITY) ||
index->OperIs(GT_FIELD, GT_LCL_FLD))
{
unsigned indexTmpNum = lvaGrabTemp(true DEBUGARG("index expr"));
indexDefn = gtNewTempAssign(indexTmpNum, index);
index = gtNewLclvNode(indexTmpNum, index->TypeGet());
index2 = gtNewLclvNode(indexTmpNum, index->TypeGet());
}
else
{
index2 = gtCloneExpr(index);
noway_assert(index2 != nullptr);
}
// Next introduce a GT_BOUNDS_CHECK node
var_types bndsChkType = TYP_INT; // By default, try to use 32-bit comparison for array bounds check.
#ifdef TARGET_64BIT
// The CLI Spec allows an array to be indexed by either an int32 or a native int. In the case
// of a 64 bit architecture this means the array index can potentially be a TYP_LONG, so for this case,
// the comparison will have to be widened to 64 bits.
if (index->TypeGet() == TYP_I_IMPL)
{
bndsChkType = TYP_I_IMPL;
}
#endif // TARGET_64BIT
GenTree* arrLen = gtNewArrLen(TYP_INT, arrRef, (int)lenOffs, compCurBB);
if (bndsChkType != TYP_INT)
{
arrLen = gtNewCastNode(bndsChkType, arrLen, true, bndsChkType);
}
GenTreeBoundsChk* arrBndsChk = new (this, GT_BOUNDS_CHECK) GenTreeBoundsChk(index, arrLen, SCK_RNGCHK_FAIL);
arrBndsChk->gtInxType = elemTyp;
bndsChk = arrBndsChk;
// Now we'll switch to using the second copies for arrRef and index
// to compute the address expression
arrRef = arrRef2;
index = index2;
}
// Create the "addr" which is "*(arrRef + ((index * elemSize) + elemOffs))"
GenTree* addr;
#ifdef TARGET_64BIT
// Widen 'index' on 64-bit targets
if (index->TypeGet() != TYP_I_IMPL)
{
if (index->OperGet() == GT_CNS_INT)
{
index->gtType = TYP_I_IMPL;
}
else
{
index = gtNewCastNode(TYP_I_IMPL, index, true, TYP_I_IMPL);
}
}
#endif // TARGET_64BIT
/* Scale the index value if necessary */
if (elemSize > 1)
{
GenTree* size = gtNewIconNode(elemSize, TYP_I_IMPL);
// Fix 392756 WP7 Crossgen
//
// During codegen optGetArrayRefScaleAndIndex() makes the assumption that op2 of a GT_MUL node
// is a constant and is not capable of handling CSE'ing the elemSize constant into a lclvar.
// Hence to prevent the constant from becoming a CSE we mark it as NO_CSE.
//
size->gtFlags |= GTF_DONT_CSE;
/* Multiply by the array element size */
addr = gtNewOperNode(GT_MUL, TYP_I_IMPL, index, size);
}
else
{
addr = index;
}
// Be careful to only create the byref pointer when the full index expression is added to the array reference.
// We don't want to create a partial byref address expression that doesn't include the full index offset:
// a byref must point within the containing object. It is dangerous (especially when optimizations come into
// play) to create a "partial" byref that doesn't point exactly to the correct object; there is risk that
// the partial byref will not point within the object, and thus not get updated correctly during a GC.
// This is mostly a risk in fully-interruptible code regions.
// We can generate two types of trees for "addr":
//
// 1) "arrRef + (index + elemOffset)"
// 2) "(arrRef + elemOffset) + index"
//
// XArch has powerful addressing modes such as [base + index*scale + offset] so it's fine with 1),
// while for Arm we better try to make an invariant sub-tree as large as possible, which is usually
// "(arrRef + elemOffset)" and is CSE/LoopHoisting friendly => produces better codegen.
// 2) should still be safe from GC's point of view since both ADD operations are byref and point to
// within the object so GC will be able to correctly track and update them.
bool groupArrayRefWithElemOffset = false;
#ifdef TARGET_ARMARCH
groupArrayRefWithElemOffset = true;
// TODO: in some cases even on ARM we better use 1) shape because if "index" is invariant and "arrRef" is not
// we at least will be able to hoist/CSE "index + elemOffset" in some cases.
// See https://github.com/dotnet/runtime/pull/61293#issuecomment-964146497
// Use 2) form only for primitive types for now - it significantly reduced number of size regressions
if (!varTypeIsIntegral(elemTyp) && !varTypeIsFloating(elemTyp))
{
groupArrayRefWithElemOffset = false;
}
#endif
// First element's offset
GenTree* elemOffset = gtNewIconNode(elemOffs, TYP_I_IMPL);
if (groupArrayRefWithElemOffset)
{
GenTree* basePlusOffset = gtNewOperNode(GT_ADD, TYP_BYREF, arrRef, elemOffset);
addr = gtNewOperNode(GT_ADD, TYP_BYREF, basePlusOffset, addr);
}
else
{
addr = gtNewOperNode(GT_ADD, TYP_I_IMPL, addr, elemOffset);
addr = gtNewOperNode(GT_ADD, TYP_BYREF, arrRef, addr);
}
addr = new (this, GT_ARR_ADDR) GenTreeArrAddr(addr, elemTyp, elemStructType, elemOffs);
// Change the orginal GT_INDEX node into a GT_IND node
tree->SetOper(GT_IND);
// If the index node is a floating-point type, notify the compiler
// we'll potentially use floating point registers at the time of codegen.
if (varTypeUsesFloatReg(tree->gtType))
{
this->compFloatingPointUsed = true;
}
// We've now consumed the GTF_INX_RNGCHK and GTF_INX_NOFAULT, and the node
// is no longer a GT_INDEX node.
tree->gtFlags &= ~(GTF_INX_RNGCHK | GTF_INX_NOFAULT);
tree->AsOp()->gtOp1 = addr;
// If there's a bounds check, the indir won't fault.
if (bndsChk || indexNonFaulting)
{
tree->gtFlags |= GTF_IND_NONFAULTING;
addr->gtFlags |= GTF_ARR_ADDR_NONNULL;
}
else
{
tree->gtFlags |= GTF_EXCEPT;
}
if (nCSE)
{
tree->gtFlags |= GTF_DONT_CSE;
}
// Did we create a bndsChk tree?
if (bndsChk)
{
// Use a GT_COMMA node to prepend the array bound check
//
tree = gtNewOperNode(GT_COMMA, elemTyp, bndsChk, tree);
/* Mark the indirection node as needing a range check */
fgSetRngChkTarget(bndsChk);
}
if (indexDefn != nullptr)
{
// Use a GT_COMMA node to prepend the index assignment
//
tree = gtNewOperNode(GT_COMMA, tree->TypeGet(), indexDefn, tree);
}
if (arrRefDefn != nullptr)
{
// Use a GT_COMMA node to prepend the arRef assignment
//
tree = gtNewOperNode(GT_COMMA, tree->TypeGet(), arrRefDefn, tree);
}
JITDUMP("fgMorphArrayIndex (before remorph):\n")
DISPTREE(tree)
tree = fgMorphTree(tree);
JITDUMP("fgMorphArrayIndex (after remorph):\n")
DISPTREE(tree)
return tree;
}
#ifdef TARGET_X86
/*****************************************************************************
*
* Wrap fixed stack arguments for varargs functions to go through varargs
* cookie to access them, except for the cookie itself.
*
* Non-x86 platforms are allowed to access all arguments directly
* so we don't need this code.
*
*/
GenTree* Compiler::fgMorphStackArgForVarArgs(unsigned lclNum, var_types varType, unsigned lclOffs)
{
/* For the fixed stack arguments of a varargs function, we need to go
through the varargs cookies to access them, except for the
cookie itself */
LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (varDsc->lvIsParam && !varDsc->lvIsRegArg && lclNum != lvaVarargsHandleArg)
{
// Create a node representing the local pointing to the base of the args
GenTree* ptrArg =
gtNewOperNode(GT_SUB, TYP_I_IMPL, gtNewLclvNode(lvaVarargsBaseOfStkArgs, TYP_I_IMPL),
gtNewIconNode(varDsc->GetStackOffset() -
codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES - lclOffs));
// Access the argument through the local
GenTree* tree;
if (varTypeIsStruct(varType))
{
CORINFO_CLASS_HANDLE typeHnd = varDsc->GetStructHnd();
assert(typeHnd != nullptr);
tree = gtNewObjNode(typeHnd, ptrArg);
}
else
{
tree = gtNewOperNode(GT_IND, varType, ptrArg);
}
tree->gtFlags |= GTF_IND_TGTANYWHERE;
if (varDsc->IsAddressExposed())
{
tree->gtFlags |= GTF_GLOB_REF;
}
return fgMorphTree(tree);
}
return NULL;
}
#endif
/*****************************************************************************
*
* Transform the given GT_LCL_VAR tree for code generation.
*/
GenTree* Compiler::fgMorphLocalVar(GenTree* tree, bool forceRemorph)
{
assert(tree->gtOper == GT_LCL_VAR);
unsigned lclNum = tree->AsLclVarCommon()->GetLclNum();
var_types varType = lvaGetRealType(lclNum);
LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (varDsc->IsAddressExposed())
{
tree->gtFlags |= GTF_GLOB_REF;
}
#ifdef TARGET_X86
if (info.compIsVarArgs)
{
GenTree* newTree = fgMorphStackArgForVarArgs(lclNum, varType, 0);
if (newTree != nullptr)
{
if (newTree->OperIsBlk() && ((tree->gtFlags & GTF_VAR_DEF) == 0))
{
newTree->SetOper(GT_IND);
}
return newTree;
}
}
#endif // TARGET_X86
/* If not during the global morphing phase bail */
if (!fgGlobalMorph && !forceRemorph)
{
return tree;
}
bool varAddr = (tree->gtFlags & GTF_DONT_CSE) != 0;
noway_assert(!(tree->gtFlags & GTF_VAR_DEF) || varAddr); // GTF_VAR_DEF should always imply varAddr
if (!varAddr && varDsc->lvNormalizeOnLoad())
{
// TYP_BOOL quirk: previously, the code in optAssertionIsSubrange did not handle TYP_BOOL.
// Now it does, but this leads to some regressions because we lose the uniform VNs for trees
// that represent the "reduced" normalize-on-load locals, i. e. LCL_VAR(small type V00), created
// here with local assertions, and "expanded", i. e. CAST(small type <- LCL_VAR(int V00)).
// This is a pretty fundamental problem with how normalize-on-load locals appear to the optimizer.
// This quirk preserves the previous behavior.
// TODO-CQ: fix the VNs for normalize-on-load locals and remove this quirk.
bool isBoolQuirk = varType == TYP_BOOL;
// Assertion prop can tell us to omit adding a cast here. This is
// useful when the local is a small-typed parameter that is passed in a
// register: in that case, the ABI specifies that the upper bits might
// be invalid, but the assertion guarantees us that we have normalized
// when we wrote it.
if (optLocalAssertionProp && !isBoolQuirk &&
optAssertionIsSubrange(tree, IntegralRange::ForType(varType), apFull) != NO_ASSERTION_INDEX)
{
// The previous assertion can guarantee us that if this node gets
// assigned a register, it will be normalized already. It is still
// possible that this node ends up being in memory, in which case
// normalization will still be needed, so we better have the right
// type.
assert(tree->TypeGet() == varDsc->TypeGet());
return tree;
}
// Small-typed arguments and aliased locals are normalized on load.
// Other small-typed locals are normalized on store.
// Also, under the debugger as the debugger could write to the variable.
// If this is one of the former, insert a narrowing cast on the load.
// ie. Convert: var-short --> cast-short(var-int)
tree->gtType = TYP_INT;
fgMorphTreeDone(tree);
tree = gtNewCastNode(TYP_INT, tree, false, varType);
fgMorphTreeDone(tree);
return tree;
}
return tree;
}
/*****************************************************************************
Grab a temp for big offset morphing.
This method will grab a new temp if no temp of this "type" has been created.
Or it will return the same cached one if it has been created.
*/
unsigned Compiler::fgGetBigOffsetMorphingTemp(var_types type)
{
unsigned lclNum = fgBigOffsetMorphingTemps[type];
if (lclNum == BAD_VAR_NUM)
{
// We haven't created a temp for this kind of type. Create one now.
lclNum = lvaGrabTemp(false DEBUGARG("Big Offset Morphing"));
fgBigOffsetMorphingTemps[type] = lclNum;
}
else
{
// We better get the right type.
noway_assert(lvaTable[lclNum].TypeGet() == type);
}
noway_assert(lclNum != BAD_VAR_NUM);
return lclNum;
}
/*****************************************************************************
*
* Transform the given GT_FIELD tree for code generation.
*/
GenTree* Compiler::fgMorphField(GenTree* tree, MorphAddrContext* mac)
{
assert(tree->gtOper == GT_FIELD);
CORINFO_FIELD_HANDLE symHnd = tree->AsField()->gtFldHnd;
unsigned fldOffset = tree->AsField()->gtFldOffset;
GenTree* objRef = tree->AsField()->GetFldObj();
bool objIsLocal = false;
FieldSeqNode* fieldSeq = FieldSeqStore::NotAField();
if (!tree->AsField()->gtFldMayOverlap)
{
if (objRef != nullptr)
{
fieldSeq = GetFieldSeqStore()->CreateSingleton(symHnd, FieldSeqNode::FieldKind::Instance);
}
else
{
// Only simple statics get importred as GT_FIELDs.
fieldSeq = GetFieldSeqStore()->CreateSingleton(symHnd, FieldSeqNode::FieldKind::SimpleStatic);
}
}
// Reset the flag because we may reuse the node.
tree->AsField()->gtFldMayOverlap = false;
if (fgGlobalMorph && (objRef != nullptr) && (objRef->gtOper == GT_ADDR))
{
// Make sure we've checked if 'objRef' is an address of an implicit-byref parameter.
// If it is, fgMorphImplicitByRefArgs may change it do a different opcode, which the
// simd field rewrites are sensitive to.
fgMorphImplicitByRefArgs(objRef);
}
noway_assert(((objRef != nullptr) && (objRef->IsLocalAddrExpr() != nullptr)) ||
((tree->gtFlags & GTF_GLOB_REF) != 0));
#ifdef FEATURE_SIMD
// if this field belongs to simd struct, translate it to simd intrinsic.
if (mac == nullptr)
{
if (IsBaselineSimdIsaSupported())
{
GenTree* newTree = fgMorphFieldToSimdGetElement(tree);
if (newTree != tree)
{
newTree = fgMorphTree(newTree);
return newTree;
}
}
}
else if ((objRef != nullptr) && (objRef->OperGet() == GT_ADDR) && varTypeIsSIMD(objRef->gtGetOp1()))
{
GenTreeLclVarCommon* lcl = objRef->IsLocalAddrExpr();
if (lcl != nullptr)
{
lvaSetVarDoNotEnregister(lcl->GetLclNum() DEBUGARG(DoNotEnregisterReason::LocalField));
}
}
#endif
// Create a default MorphAddrContext early so it doesn't go out of scope
// before it is used.
MorphAddrContext defMAC(MACK_Ind);
/* Is this an instance data member? */
if (objRef)
{
GenTree* addr;
objIsLocal = objRef->IsLocal();
if (tree->gtFlags & GTF_IND_TLS_REF)
{
NO_WAY("instance field can not be a TLS ref.");
}
/* We'll create the expression "*(objRef + mem_offs)" */
noway_assert(varTypeIsGC(objRef->TypeGet()) || objRef->TypeGet() == TYP_I_IMPL);
/*
Now we have a tree like this:
+--------------------+
| GT_FIELD | tree
+----------+---------+
|
+--------------+-------------+
|tree->AsField()->GetFldObj()|
+--------------+-------------+
We want to make it like this (when fldOffset is <= MAX_UNCHECKED_OFFSET_FOR_NULL_OBJECT):
+--------------------+
| GT_IND/GT_OBJ | tree
+---------+----------+
|
|
+---------+----------+
| GT_ADD | addr
+---------+----------+
|
/ \
/ \
/ \
+-------------------+ +----------------------+
| objRef | | fldOffset |
| | | (when fldOffset !=0) |
+-------------------+ +----------------------+
or this (when fldOffset is > MAX_UNCHECKED_OFFSET_FOR_NULL_OBJECT):
+--------------------+
| GT_IND/GT_OBJ | tree
+----------+---------+
|
+----------+---------+
| GT_COMMA | comma2
+----------+---------+
|
/ \
/ \
/ \
/ \
+---------+----------+ +---------+----------+
comma | GT_COMMA | | "+" (i.e. GT_ADD) | addr
+---------+----------+ +---------+----------+
| |
/ \ / \
/ \ / \
/ \ / \
+-----+-----+ +-----+-----+ +---------+ +-----------+
asg | GT_ASG | ind | GT_IND | | tmpLcl | | fldOffset |
+-----+-----+ +-----+-----+ +---------+ +-----------+
| |
/ \ |
/ \ |
/ \ |
+-----+-----+ +-----+-----+ +-----------+
| tmpLcl | | objRef | | tmpLcl |
+-----------+ +-----------+ +-----------+
*/
var_types objRefType = objRef->TypeGet();
GenTree* comma = nullptr;
// NULL mac means we encounter the GT_FIELD first. This denotes a dereference of the field,
// and thus is equivalent to a MACK_Ind with zero offset.
if (mac == nullptr)
{
mac = &defMAC;
}
// This flag is set to enable the "conservative" style of explicit null-check insertion.
// This means that we insert an explicit null check whenever we create byref by adding a
// constant offset to a ref, in a MACK_Addr context (meaning that the byref is not immediately
// dereferenced). The alternative is "aggressive", which would not insert such checks (for
// small offsets); in this plan, we would transfer some null-checking responsibility to
// callee's of methods taking byref parameters. They would have to add explicit null checks
// when creating derived byrefs from argument byrefs by adding constants to argument byrefs, in
// contexts where the resulting derived byref is not immediately dereferenced (or if the offset is too
// large). To make the "aggressive" scheme work, however, we'd also have to add explicit derived-from-null
// checks for byref parameters to "external" methods implemented in C++, and in P/Invoke stubs.
// This is left here to point out how to implement it.
CLANG_FORMAT_COMMENT_ANCHOR;
#define CONSERVATIVE_NULL_CHECK_BYREF_CREATION 1
bool addExplicitNullCheck = false;
// Implicit byref locals and string literals are never null.
if (fgAddrCouldBeNull(objRef))
{
// If the objRef is a GT_ADDR node, it, itself, never requires null checking. The expression
// whose address is being taken is either a local or static variable, whose address is necessarily
// non-null, or else it is a field dereference, which will do its own bounds checking if necessary.
if (objRef->gtOper != GT_ADDR && (mac->m_kind == MACK_Addr || mac->m_kind == MACK_Ind))
{
if (!mac->m_allConstantOffsets || fgIsBigOffset(mac->m_totalOffset + fldOffset))
{
addExplicitNullCheck = true;
}
else
{
// In R2R mode the field offset for some fields may change when the code
// is loaded. So we can't rely on a zero offset here to suppress the null check.
//
// See GitHub issue #16454.
bool fieldHasChangeableOffset = false;
#ifdef FEATURE_READYTORUN
fieldHasChangeableOffset = (tree->AsField()->gtFieldLookup.addr != nullptr);
#endif
#if CONSERVATIVE_NULL_CHECK_BYREF_CREATION
addExplicitNullCheck = (mac->m_kind == MACK_Addr) &&
((mac->m_totalOffset + fldOffset > 0) || fieldHasChangeableOffset);
#else
addExplicitNullCheck = (objRef->gtType == TYP_BYREF && mac->m_kind == MACK_Addr &&
((mac->m_totalOffset + fldOffset > 0) || fieldHasChangeableOffset));
#endif
}
}
}
if (addExplicitNullCheck)
{
#ifdef DEBUG
if (verbose)
{
printf("Before explicit null check morphing:\n");
gtDispTree(tree);
}
#endif
//
// Create the "comma" subtree
//
GenTree* asg = nullptr;
GenTree* nullchk;
unsigned lclNum;
if (objRef->gtOper != GT_LCL_VAR)
{
lclNum = fgGetBigOffsetMorphingTemp(genActualType(objRef->TypeGet()));
// Create the "asg" node
asg = gtNewTempAssign(lclNum, objRef);
}
else
{
lclNum = objRef->AsLclVarCommon()->GetLclNum();
}
GenTree* lclVar = gtNewLclvNode(lclNum, objRefType);
nullchk = gtNewNullCheck(lclVar, compCurBB);
nullchk->gtFlags |= GTF_DONT_CSE; // Don't try to create a CSE for these TYP_BYTE indirections
if (asg)
{
// Create the "comma" node.
comma = gtNewOperNode(GT_COMMA,
TYP_VOID, // We don't want to return anything from this "comma" node.
// Set the type to TYP_VOID, so we can select "cmp" instruction
// instead of "mov" instruction later on.
asg, nullchk);
}
else
{
comma = nullchk;
}
addr = gtNewLclvNode(lclNum, objRefType); // Use "tmpLcl" to create "addr" node.
}
else
{
addr = objRef;
}
#ifdef FEATURE_READYTORUN
if (tree->AsField()->gtFieldLookup.addr != nullptr)
{
GenTree* offsetNode = nullptr;
if (tree->AsField()->gtFieldLookup.accessType == IAT_PVALUE)
{
offsetNode = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)tree->AsField()->gtFieldLookup.addr,
GTF_ICON_CONST_PTR, true);
#ifdef DEBUG
offsetNode->gtGetOp1()->AsIntCon()->gtTargetHandle = (size_t)symHnd;
#endif
}
else
{
noway_assert(!"unexpected accessType for R2R field access");
}
var_types addType = (objRefType == TYP_I_IMPL) ? TYP_I_IMPL : TYP_BYREF;
addr = gtNewOperNode(GT_ADD, addType, addr, offsetNode);
}
#endif
if (fldOffset != 0)
{
// Generate the "addr" node.
// Add the member offset to the object's address.
addr = gtNewOperNode(GT_ADD, (objRefType == TYP_I_IMPL) ? TYP_I_IMPL : TYP_BYREF, addr,
gtNewIconHandleNode(fldOffset, GTF_ICON_FIELD_OFF, fieldSeq));
}
// Now let's set the "tree" as a GT_IND tree.
tree->SetOper(GT_IND);
tree->AsOp()->gtOp1 = addr;
tree->SetIndirExceptionFlags(this);
if (addExplicitNullCheck)
{
//
// Create "comma2" node and link it to "tree".
//
GenTree* comma2;
comma2 = gtNewOperNode(GT_COMMA,
addr->TypeGet(), // The type of "comma2" node is the same as the type of "addr" node.
comma, addr);
tree->AsOp()->gtOp1 = comma2;
}
#ifdef DEBUG
if (verbose)
{
if (addExplicitNullCheck)
{
printf("After adding explicit null check:\n");
gtDispTree(tree);
}
}
#endif
}
else /* This is a static data member */
{
if (tree->gtFlags & GTF_IND_TLS_REF)
{
// Thread Local Storage static field reference
//
// Field ref is a TLS 'Thread-Local-Storage' reference
//
// Build this tree: IND(*) #
// |
// ADD(I_IMPL)
// / \.
// / CNS(fldOffset)
// /
// /
// /
// IND(I_IMPL) == [Base of this DLL's TLS]
// |
// ADD(I_IMPL)
// / \.
// / CNS(IdValue*4) or MUL
// / / \.
// IND(I_IMPL) / CNS(4)
// | /
// CNS(TLS_HDL,0x2C) IND
// |
// CNS(pIdAddr)
//
// # Denotes the orginal node
//
void** pIdAddr = nullptr;
unsigned IdValue = info.compCompHnd->getFieldThreadLocalStoreID(symHnd, (void**)&pIdAddr);
//
// If we can we access the TLS DLL index ID value directly
// then pIdAddr will be NULL and
// IdValue will be the actual TLS DLL index ID
//
GenTree* dllRef = nullptr;
if (pIdAddr == nullptr)
{
if (IdValue != 0)
{
dllRef = gtNewIconNode(IdValue * 4, TYP_I_IMPL);
}
}
else
{
dllRef = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)pIdAddr, GTF_ICON_CONST_PTR, true);
// Next we multiply by 4
dllRef = gtNewOperNode(GT_MUL, TYP_I_IMPL, dllRef, gtNewIconNode(4, TYP_I_IMPL));
}
#define WIN32_TLS_SLOTS (0x2C) // Offset from fs:[0] where the pointer to the slots resides
// Mark this ICON as a TLS_HDL, codegen will use FS:[cns]
GenTree* tlsRef = gtNewIconHandleNode(WIN32_TLS_SLOTS, GTF_ICON_TLS_HDL);
// Translate GTF_FLD_INITCLASS to GTF_ICON_INITCLASS
if ((tree->gtFlags & GTF_FLD_INITCLASS) != 0)
{
tree->gtFlags &= ~GTF_FLD_INITCLASS;
tlsRef->gtFlags |= GTF_ICON_INITCLASS;
}
tlsRef = gtNewOperNode(GT_IND, TYP_I_IMPL, tlsRef);
if (dllRef != nullptr)
{
/* Add the dllRef */
tlsRef = gtNewOperNode(GT_ADD, TYP_I_IMPL, tlsRef, dllRef);
}
/* indirect to have tlsRef point at the base of the DLLs Thread Local Storage */
tlsRef = gtNewOperNode(GT_IND, TYP_I_IMPL, tlsRef);
if (fldOffset != 0)
{
GenTree* fldOffsetNode = new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, fldOffset, fieldSeq);
/* Add the TLS static field offset to the address */
tlsRef = gtNewOperNode(GT_ADD, TYP_I_IMPL, tlsRef, fldOffsetNode);
}
// Final indirect to get to actual value of TLS static field
tree->SetOper(GT_IND);
tree->AsOp()->gtOp1 = tlsRef;
noway_assert(tree->gtFlags & GTF_IND_TLS_REF);
}
else
{
// Normal static field reference
//
// If we can we access the static's address directly
// then pFldAddr will be NULL and
// fldAddr will be the actual address of the static field
//
void** pFldAddr = nullptr;
void* fldAddr = info.compCompHnd->getFieldAddress(symHnd, (void**)&pFldAddr);
// We should always be able to access this static field address directly
//
assert(pFldAddr == nullptr);
// For boxed statics, this direct address will be for the box. We have already added
// the indirection for the field itself and attached the sequence, in importation.
bool isBoxedStatic = gtIsStaticFieldPtrToBoxedStruct(tree->TypeGet(), symHnd);
if (isBoxedStatic)
{
fieldSeq = FieldSeqStore::NotAField();
}
// TODO-CQ: enable this optimization for 32 bit targets.
bool isStaticReadOnlyInited = false;
#ifdef TARGET_64BIT
if (tree->TypeIs(TYP_REF) && !isBoxedStatic)
{
bool pIsSpeculative = true;
if (info.compCompHnd->getStaticFieldCurrentClass(symHnd, &pIsSpeculative) != NO_CLASS_HANDLE)
{
isStaticReadOnlyInited = !pIsSpeculative;
}
}
#endif // TARGET_64BIT
// TODO: choices made below have mostly historical reasons and
// should be unified to always use the IND(<address>) form.
CLANG_FORMAT_COMMENT_ANCHOR;
#if defined(TARGET_64BIT) || defined(TARGET_X86)
bool preferIndir = true;
#else // !TARGET_64BIT
bool preferIndir = isBoxedStatic;
#endif // !TARGET_64BIT
if (preferIndir)
{
GenTreeFlags handleKind = GTF_EMPTY;
if (isBoxedStatic)
{
handleKind = GTF_ICON_STATIC_BOX_PTR;
}
else if (isStaticReadOnlyInited)
{
handleKind = GTF_ICON_CONST_PTR;
}
else
{
handleKind = GTF_ICON_STATIC_HDL;
}
GenTree* addr = gtNewIconHandleNode((size_t)fldAddr, handleKind, fieldSeq);
// Translate GTF_FLD_INITCLASS to GTF_ICON_INITCLASS, if we need to.
if (((tree->gtFlags & GTF_FLD_INITCLASS) != 0) && !isStaticReadOnlyInited)
{
tree->gtFlags &= ~GTF_FLD_INITCLASS;
addr->gtFlags |= GTF_ICON_INITCLASS;
}
tree->SetOper(GT_IND);
tree->AsOp()->gtOp1 = addr;
if (isBoxedStatic)
{
// The box for the static cannot be null, and is logically invariant, since it
// represents (a base for) the static's address.
tree->gtFlags |= (GTF_IND_INVARIANT | GTF_IND_NONFAULTING | GTF_IND_NONNULL);
}
else if (isStaticReadOnlyInited)
{
JITDUMP("Marking initialized static read-only field '%s' as invariant.\n", eeGetFieldName(symHnd));
// Static readonly field is not null at this point (see getStaticFieldCurrentClass impl).
tree->gtFlags |= (GTF_IND_INVARIANT | GTF_IND_NONFAULTING | GTF_IND_NONNULL);
}
return fgMorphSmpOp(tree);
}
else
{
// Only volatile or classinit could be set, and they map over
noway_assert((tree->gtFlags & ~(GTF_FLD_VOLATILE | GTF_FLD_INITCLASS | GTF_COMMON_MASK)) == 0);
static_assert_no_msg(GTF_FLD_VOLATILE == GTF_CLS_VAR_VOLATILE);
static_assert_no_msg(GTF_FLD_INITCLASS == GTF_CLS_VAR_INITCLASS);
tree->SetOper(GT_CLS_VAR);
tree->AsClsVar()->gtClsVarHnd = symHnd;
tree->AsClsVar()->gtFieldSeq = fieldSeq;
}
return tree;
}
}
noway_assert(tree->gtOper == GT_IND);
if (fldOffset == 0)
{
GenTree* addr = tree->AsOp()->gtOp1;
// 'addr' may be a GT_COMMA. Skip over any comma nodes
addr = addr->gtEffectiveVal();
#ifdef DEBUG
if (verbose)
{
printf("\nBefore calling fgAddFieldSeqForZeroOffset:\n");
gtDispTree(tree);
}
#endif
// We expect 'addr' to be an address at this point.
assert(addr->TypeGet() == TYP_BYREF || addr->TypeGet() == TYP_I_IMPL || addr->TypeGet() == TYP_REF);
// Since we don't make a constant zero to attach the field sequence to, associate it with the "addr" node.
fgAddFieldSeqForZeroOffset(addr, fieldSeq);
}
// Pass down the current mac; if non null we are computing an address
GenTree* result = fgMorphSmpOp(tree, mac);
#ifdef DEBUG
if (verbose)
{
printf("\nFinal value of Compiler::fgMorphField after calling fgMorphSmpOp:\n");
gtDispTree(result);
}
#endif
return result;
}
//------------------------------------------------------------------------------
// fgMorphCallInline: attempt to inline a call
//
// Arguments:
// call - call expression to inline, inline candidate
// inlineResult - result tracking and reporting
//
// Notes:
// Attempts to inline the call.
//
// If successful, callee's IR is inserted in place of the call, and
// is marked with an InlineContext.
//
// If unsuccessful, the transformations done in anticipation of a
// possible inline are undone, and the candidate flag on the call
// is cleared.
void Compiler::fgMorphCallInline(GenTreeCall* call, InlineResult* inlineResult)
{
bool inliningFailed = false;
// Is this call an inline candidate?
if (call->IsInlineCandidate())
{
InlineContext* createdContext = nullptr;
// Attempt the inline
fgMorphCallInlineHelper(call, inlineResult, &createdContext);
// We should have made up our minds one way or another....
assert(inlineResult->IsDecided());
// If we failed to inline, we have a bit of work to do to cleanup
if (inlineResult->IsFailure())
{
if (createdContext != nullptr)
{
// We created a context before we got to the failure, so mark
// it as failed in the tree.
createdContext->SetFailed(inlineResult);
}
else
{
#ifdef DEBUG
// In debug we always put all inline attempts into the inline tree.
InlineContext* ctx =
m_inlineStrategy->NewContext(call->gtInlineCandidateInfo->inlinersContext, fgMorphStmt, call);
ctx->SetFailed(inlineResult);
#endif
}
inliningFailed = true;
// Clear the Inline Candidate flag so we can ensure later we tried
// inlining all candidates.
//
call->gtFlags &= ~GTF_CALL_INLINE_CANDIDATE;
}
}
else
{
// This wasn't an inline candidate. So it must be a GDV candidate.
assert(call->IsGuardedDevirtualizationCandidate());
// We already know we can't inline this call, so don't even bother to try.
inliningFailed = true;
}
// If we failed to inline (or didn't even try), do some cleanup.
if (inliningFailed)
{
if (call->gtReturnType != TYP_VOID)
{
JITDUMP("Inlining [%06u] failed, so bashing " FMT_STMT " to NOP\n", dspTreeID(call), fgMorphStmt->GetID());
// Detach the GT_CALL tree from the original statement by
// hanging a "nothing" node to it. Later the "nothing" node will be removed
// and the original GT_CALL tree will be picked up by the GT_RET_EXPR node.
noway_assert(fgMorphStmt->GetRootNode() == call);
fgMorphStmt->SetRootNode(gtNewNothingNode());
}
}
}
//------------------------------------------------------------------------------
// fgMorphCallInlineHelper: Helper to attempt to inline a call
//
// Arguments:
// call - call expression to inline, inline candidate
// result - result to set to success or failure
// createdContext - The context that was created if the inline attempt got to the inliner.
//
// Notes:
// Attempts to inline the call.
//
// If successful, callee's IR is inserted in place of the call, and
// is marked with an InlineContext.
//
// If unsuccessful, the transformations done in anticipation of a
// possible inline are undone, and the candidate flag on the call
// is cleared.
//
// If a context was created because we got to the importer then it is output by this function.
// If the inline succeeded, this context will already be marked as successful. If it failed and
// a context is returned, then it will not have been marked as success or failed.
void Compiler::fgMorphCallInlineHelper(GenTreeCall* call, InlineResult* result, InlineContext** createdContext)
{
// Don't expect any surprises here.
assert(result->IsCandidate());
if (lvaCount >= MAX_LV_NUM_COUNT_FOR_INLINING)
{
// For now, attributing this to call site, though it's really
// more of a budget issue (lvaCount currently includes all
// caller and prospective callee locals). We still might be
// able to inline other callees into this caller, or inline
// this callee in other callers.
result->NoteFatal(InlineObservation::CALLSITE_TOO_MANY_LOCALS);
return;
}
if (call->IsVirtual())
{
result->NoteFatal(InlineObservation::CALLSITE_IS_VIRTUAL);
return;
}
// Re-check this because guarded devirtualization may allow these through.
if (gtIsRecursiveCall(call) && call->IsImplicitTailCall())
{
result->NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL);
return;
}
// impMarkInlineCandidate() is expected not to mark tail prefixed calls
// and recursive tail calls as inline candidates.
noway_assert(!call->IsTailPrefixedCall());
noway_assert(!call->IsImplicitTailCall() || !gtIsRecursiveCall(call));
//
// Calling inlinee's compiler to inline the method.
//
unsigned startVars = lvaCount;
#ifdef DEBUG
if (verbose)
{
printf("Expanding INLINE_CANDIDATE in statement ");
printStmtID(fgMorphStmt);
printf(" in " FMT_BB ":\n", compCurBB->bbNum);
gtDispStmt(fgMorphStmt);
if (call->IsImplicitTailCall())
{
printf("Note: candidate is implicit tail call\n");
}
}
#endif
impInlineRoot()->m_inlineStrategy->NoteAttempt(result);
//
// Invoke the compiler to inline the call.
//
fgInvokeInlineeCompiler(call, result, createdContext);
if (result->IsFailure())
{
// Undo some changes made in anticipation of inlining...
// Zero out the used locals
memset(lvaTable + startVars, 0, (lvaCount - startVars) * sizeof(*lvaTable));
for (unsigned i = startVars; i < lvaCount; i++)
{
new (&lvaTable[i], jitstd::placement_t()) LclVarDsc(); // call the constructor.
}
lvaCount = startVars;
#ifdef DEBUG
if (verbose)
{
// printf("Inlining failed. Restore lvaCount to %d.\n", lvaCount);
}
#endif
return;
}
#ifdef DEBUG
if (verbose)
{
// printf("After inlining lvaCount=%d.\n", lvaCount);
}
#endif
}
//------------------------------------------------------------------------
// fgCanFastTailCall: Check to see if this tail call can be optimized as epilog+jmp.
//
// Arguments:
// callee - The callee to check
// failReason - If this method returns false, the reason why. Can be nullptr.
//
// Return Value:
// Returns true or false based on whether the callee can be fastTailCalled
//
// Notes:
// This function is target specific and each target will make the fastTailCall
// decision differently. See the notes below.
//
// This function calls fgInitArgInfo() to initialize the arg info table, which
// is used to analyze the argument. This function can alter the call arguments
// by adding argument IR nodes for non-standard arguments.
//
// Windows Amd64:
// A fast tail call can be made whenever the number of callee arguments
// is less than or equal to the number of caller arguments, or we have four
// or fewer callee arguments. This is because, on Windows AMD64, each
// argument uses exactly one register or one 8-byte stack slot. Thus, we only
// need to count arguments, and not be concerned with the size of each
// incoming or outgoing argument.
//
// Can fast tail call examples (amd64 Windows):
//
// -- Callee will have all register arguments --
// caller(int, int, int, int)
// callee(int, int, float, int)
//
// -- Callee requires stack space that is equal or less than the caller --
// caller(struct, struct, struct, struct, struct, struct)
// callee(int, int, int, int, int, int)
//
// -- Callee requires stack space that is less than the caller --
// caller(struct, double, struct, float, struct, struct)
// callee(int, int, int, int, int)
//
// -- Callee will have all register arguments --
// caller(int)
// callee(int, int, int, int)
//
// Cannot fast tail call examples (amd64 Windows):
//
// -- Callee requires stack space that is larger than the caller --
// caller(struct, double, struct, float, struct, struct)
// callee(int, int, int, int, int, double, double, double)
//
// -- Callee has a byref struct argument --
// caller(int, int, int)
// callee(struct(size 3 bytes))
//
// Unix Amd64 && Arm64:
// A fastTailCall decision can be made whenever the callee's stack space is
// less than or equal to the caller's stack space. There are many permutations
// of when the caller and callee have different stack sizes if there are
// structs being passed to either the caller or callee.
//
// Exceptions:
// If the callee has a 9 to 16 byte struct argument and the callee has
// stack arguments, the decision will be to not fast tail call. This is
// because before fgMorphArgs is done, the struct is unknown whether it
// will be placed on the stack or enregistered. Therefore, the conservative
// decision of do not fast tail call is taken. This limitations should be
// removed if/when fgMorphArgs no longer depends on fgCanFastTailCall.
//
// Can fast tail call examples (amd64 Unix):
//
// -- Callee will have all register arguments --
// caller(int, int, int, int)
// callee(int, int, float, int)
//
// -- Callee requires stack space that is equal to the caller --
// caller({ long, long }, { int, int }, { int }, { int }, { int }, { int }) -- 6 int register arguments, 16 byte
// stack
// space
// callee(int, int, int, int, int, int, int, int) -- 6 int register arguments, 16 byte stack space
//
// -- Callee requires stack space that is less than the caller --
// caller({ long, long }, int, { long, long }, int, { long, long }, { long, long }) 6 int register arguments, 32 byte
// stack
// space
// callee(int, int, int, int, int, int, { long, long } ) // 6 int register arguments, 16 byte stack space
//
// -- Callee will have all register arguments --
// caller(int)
// callee(int, int, int, int)
//
// Cannot fast tail call examples (amd64 Unix):
//
// -- Callee requires stack space that is larger than the caller --
// caller(float, float, float, float, float, float, float, float) -- 8 float register arguments
// callee(int, int, int, int, int, int, int, int) -- 6 int register arguments, 16 byte stack space
//
// -- Callee has structs which cannot be enregistered (Implementation Limitation) --
// caller(float, float, float, float, float, float, float, float, { double, double, double }) -- 8 float register
// arguments, 24 byte stack space
// callee({ double, double, double }) -- 24 bytes stack space
//
// -- Callee requires stack space and has a struct argument >8 bytes and <16 bytes (Implementation Limitation) --
// caller(int, int, int, int, int, int, { double, double, double }) -- 6 int register arguments, 24 byte stack space
// callee(int, int, int, int, int, int, { int, int }) -- 6 int registers, 16 byte stack space
//
// -- Caller requires stack space and nCalleeArgs > nCallerArgs (Bug) --
// caller({ double, double, double, double, double, double }) // 48 byte stack
// callee(int, int) -- 2 int registers
bool Compiler::fgCanFastTailCall(GenTreeCall* callee, const char** failReason)
{
#if FEATURE_FASTTAILCALL
// To reach here means that the return types of the caller and callee are tail call compatible.
// In the case of structs that can be returned in a register, compRetNativeType is set to the actual return type.
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (callee->IsTailPrefixedCall())
{
var_types retType = info.compRetType;
assert(impTailCallRetTypeCompatible(false, retType, info.compMethodInfo->args.retTypeClass, info.compCallConv,
(var_types)callee->gtReturnType, callee->gtRetClsHnd,
callee->GetUnmanagedCallConv()));
}
#endif
assert(!callee->AreArgsComplete());
fgInitArgInfo(callee);
fgArgInfo* argInfo = callee->fgArgInfo;
unsigned calleeArgStackSize = 0;
unsigned callerArgStackSize = info.compArgStackSize;
auto reportFastTailCallDecision = [&](const char* thisFailReason) {
if (failReason != nullptr)
{
*failReason = thisFailReason;
}
#ifdef DEBUG
if ((JitConfig.JitReportFastTailCallDecisions()) == 1)
{
if (callee->gtCallType != CT_INDIRECT)
{
const char* methodName;
methodName = eeGetMethodFullName(callee->gtCallMethHnd);
printf("[Fast tailcall decision]: Caller: %s\n[Fast tailcall decision]: Callee: %s -- Decision: ",
info.compFullName, methodName);
}
else
{
printf("[Fast tailcall decision]: Caller: %s\n[Fast tailcall decision]: Callee: IndirectCall -- "
"Decision: ",
info.compFullName);
}
if (thisFailReason == nullptr)
{
printf("Will fast tailcall");
}
else
{
printf("Will not fast tailcall (%s)", thisFailReason);
}
printf(" (CallerArgStackSize: %d, CalleeArgStackSize: %d)\n\n", callerArgStackSize, calleeArgStackSize);
}
else
{
if (thisFailReason == nullptr)
{
JITDUMP("[Fast tailcall decision]: Will fast tailcall\n");
}
else
{
JITDUMP("[Fast tailcall decision]: Will not fast tailcall (%s)\n", thisFailReason);
}
}
#endif // DEBUG
};
for (unsigned index = 0; index < argInfo->ArgCount(); ++index)
{
fgArgTabEntry* arg = argInfo->GetArgEntry(index, false);
calleeArgStackSize = roundUp(calleeArgStackSize, arg->GetByteAlignment());
calleeArgStackSize += arg->GetStackByteSize();
#ifdef TARGET_ARM
if (arg->IsSplit())
{
reportFastTailCallDecision("Splitted argument in callee is not supported on ARM32");
return false;
}
#endif // TARGET_ARM
}
calleeArgStackSize = GetOutgoingArgByteSize(calleeArgStackSize);
#ifdef TARGET_ARM
if (compHasSplitParam)
{
reportFastTailCallDecision("Splitted argument in caller is not supported on ARM32");
return false;
}
if (compIsProfilerHookNeeded())
{
reportFastTailCallDecision("Profiler is not supported on ARM32");
return false;
}
// On ARM32 we have only one non-parameter volatile register and we need it
// for the GS security cookie check. We could technically still tailcall
// when the callee does not use all argument registers, but we keep the
// code simple here.
if (getNeedsGSSecurityCookie())
{
reportFastTailCallDecision("Not enough registers available due to the GS security cookie check");
return false;
}
#endif
if (!opts.compFastTailCalls)
{
reportFastTailCallDecision("Configuration doesn't allow fast tail calls");
return false;
}
if (callee->IsStressTailCall())
{
reportFastTailCallDecision("Fast tail calls are not performed under tail call stress");
return false;
}
#ifdef TARGET_ARM
if (callee->IsR2RRelativeIndir() || callee->HasNonStandardAddedArgs(this))
{
reportFastTailCallDecision(
"Method with non-standard args passed in callee saved register cannot be tail called");
return false;
}
#endif
// Note on vararg methods:
// If the caller is vararg method, we don't know the number of arguments passed by caller's caller.
// But we can be sure that in-coming arg area of vararg caller would be sufficient to hold its
// fixed args. Therefore, we can allow a vararg method to fast tail call other methods as long as
// out-going area required for callee is bounded by caller's fixed argument space.
//
// Note that callee being a vararg method is not a problem since we can account the params being passed.
//
// We will currently decide to not fast tail call on Windows armarch if the caller or callee is a vararg
// method. This is due to the ABI differences for native vararg methods for these platforms. There is
// work required to shuffle arguments to the correct locations.
CLANG_FORMAT_COMMENT_ANCHOR;
if (TargetOS::IsWindows && TargetArchitecture::IsArmArch && (info.compIsVarArgs || callee->IsVarargs()))
{
reportFastTailCallDecision("Fast tail calls with varargs not supported on Windows ARM/ARM64");
return false;
}
if (compLocallocUsed)
{
reportFastTailCallDecision("Localloc used");
return false;
}
#ifdef TARGET_AMD64
// Needed for Jit64 compat.
// In future, enabling fast tail calls from methods that need GS cookie
// check would require codegen side work to emit GS cookie check before a
// tail call.
if (getNeedsGSSecurityCookie())
{
reportFastTailCallDecision("GS Security cookie check required");
return false;
}
#endif
// If the NextCallReturnAddress intrinsic is used we should do normal calls.
if (info.compHasNextCallRetAddr)
{
reportFastTailCallDecision("Uses NextCallReturnAddress intrinsic");
return false;
}
if (callee->HasRetBufArg()) // RetBuf
{
// If callee has RetBuf param, caller too must have it.
// Otherwise go the slow route.
if (info.compRetBuffArg == BAD_VAR_NUM)
{
reportFastTailCallDecision("Callee has RetBuf but caller does not.");
return false;
}
}
// For a fast tail call the caller will use its incoming arg stack space to place
// arguments, so if the callee requires more arg stack space than is available here
// the fast tail call cannot be performed. This is common to all platforms.
// Note that the GC'ness of on stack args need not match since the arg setup area is marked
// as non-interruptible for fast tail calls.
if (calleeArgStackSize > callerArgStackSize)
{
reportFastTailCallDecision("Not enough incoming arg space");
return false;
}
// For Windows some struct parameters are copied on the local frame
// and then passed by reference. We cannot fast tail call in these situation
// as we need to keep our frame around.
if (fgCallHasMustCopyByrefParameter(callee))
{
reportFastTailCallDecision("Callee has a byref parameter");
return false;
}
reportFastTailCallDecision(nullptr);
return true;
#else // FEATURE_FASTTAILCALL
if (failReason)
*failReason = "Fast tailcalls are not supported on this platform";
return false;
#endif
}
//------------------------------------------------------------------------
// fgCallHasMustCopyByrefParameter: Check to see if this call has a byref parameter that
// requires a struct copy in the caller.
//
// Arguments:
// callee - The callee to check
//
// Return Value:
// Returns true or false based on whether this call has a byref parameter that
// requires a struct copy in the caller.
#if FEATURE_FASTTAILCALL
bool Compiler::fgCallHasMustCopyByrefParameter(GenTreeCall* callee)
{
fgArgInfo* argInfo = callee->fgArgInfo;
bool hasMustCopyByrefParameter = false;
for (unsigned index = 0; index < argInfo->ArgCount(); ++index)
{
fgArgTabEntry* arg = argInfo->GetArgEntry(index, false);
if (arg->isStruct)
{
if (arg->passedByRef)
{
// Generally a byref arg will block tail calling, as we have to
// make a local copy of the struct for the callee.
hasMustCopyByrefParameter = true;
// If we're optimizing, we may be able to pass our caller's byref to our callee,
// and so still be able to avoid a struct copy.
if (opts.OptimizationEnabled())
{
// First, see if this arg is an implicit byref param.
GenTreeLclVar* const lcl = arg->GetNode()->IsImplicitByrefParameterValue(this);
if (lcl != nullptr)
{
// Yes, the arg is an implicit byref param.
const unsigned lclNum = lcl->GetLclNum();
LclVarDsc* const varDsc = lvaGetDesc(lcl);
// The param must not be promoted; if we've promoted, then the arg will be
// a local struct assembled from the promoted fields.
if (varDsc->lvPromoted)
{
JITDUMP("Arg [%06u] is promoted implicit byref V%02u, so no tail call\n",
dspTreeID(arg->GetNode()), lclNum);
}
else
{
JITDUMP("Arg [%06u] is unpromoted implicit byref V%02u, seeing if we can still tail call\n",
dspTreeID(arg->GetNode()), lclNum);
// We have to worry about introducing aliases if we bypass copying
// the struct at the call. We'll do some limited analysis to see if we
// can rule this out.
const unsigned argLimit = 6;
// If this is the only appearance of the byref in the method, then
// aliasing is not possible.
//
// If no other call arg refers to this byref, and no other arg is
// a pointer which could refer to this byref, we can optimize.
//
// We only check this for calls with small numbers of arguments,
// as the analysis cost will be quadratic.
//
const unsigned totalAppearances = varDsc->lvRefCnt(RCS_EARLY);
const unsigned callAppearances = (unsigned)varDsc->lvRefCntWtd(RCS_EARLY);
assert(totalAppearances >= callAppearances);
if (totalAppearances == 1)
{
JITDUMP("... yes, arg is the only appearance of V%02u\n", lclNum);
hasMustCopyByrefParameter = false;
}
else if (totalAppearances > callAppearances)
{
// lvRefCntWtd tracks the number of appearances of the arg at call sites.
// If this number doesn't match the regular ref count, there is
// a non-call appearance, and we must be conservative.
//
JITDUMP("... no, arg has %u non-call appearance(s)\n",
totalAppearances - callAppearances);
}
else if (argInfo->ArgCount() <= argLimit)
{
JITDUMP("... all %u appearance(s) are as implicit byref args to calls.\n"
"... Running alias analysis on this call's args\n",
totalAppearances);
GenTree* interferingArg = nullptr;
for (unsigned index2 = 0; index2 < argInfo->ArgCount(); ++index2)
{
if (index2 == index)
{
continue;
}
fgArgTabEntry* const arg2 = argInfo->GetArgEntry(index2, false);
JITDUMP("... checking other arg [%06u]...\n", dspTreeID(arg2->GetNode()));
DISPTREE(arg2->GetNode());
// Do we pass 'lcl' more than once to the callee?
if (arg2->isStruct && arg2->passedByRef)
{
GenTreeLclVarCommon* const lcl2 =
arg2->GetNode()->IsImplicitByrefParameterValue(this);
if ((lcl2 != nullptr) && (lclNum == lcl2->GetLclNum()))
{
// not copying would introduce aliased implicit byref structs
// in the callee ... we can't optimize.
interferingArg = arg2->GetNode();
break;
}
else
{
JITDUMP("... arg refers to different implicit byref V%02u\n",
lcl2->GetLclNum());
continue;
}
}
// Do we pass a byref pointer which might point within 'lcl'?
//
// We can assume the 'lcl' is unaliased on entry to the
// method, so the only way we can have an aliasing byref pointer at
// the call is if 'lcl' is address taken/exposed in the method.
//
// Note even though 'lcl' is not promoted, we are in the middle
// of the promote->rewrite->undo->(morph)->demote cycle, and so
// might see references to promoted fields of 'lcl' that haven't yet
// been demoted (see fgMarkDemotedImplicitByRefArgs).
//
// So, we also need to scan all 'lcl's fields, if any, to see if they
// are exposed.
//
// When looking for aliases from other args, we check for both TYP_BYREF
// and TYP_I_IMPL typed args here. Conceptually anything that points into
// an implicit byref parameter should be TYP_BYREF, as these parameters could
// refer to boxed heap locations (say if the method is invoked by reflection)
// but there are some stack only structs (like typed references) where
// the importer/runtime code uses TYP_I_IMPL, and fgInitArgInfo will
// transiently retype all simple address-of implicit parameter args as
// TYP_I_IMPL.
//
if ((arg2->argType == TYP_BYREF) || (arg2->argType == TYP_I_IMPL))
{
JITDUMP("...arg is a byref, must run an alias check\n");
bool checkExposure = true;
bool hasExposure = false;
// See if there is any way arg could refer to a parameter struct.
GenTree* arg2Node = arg2->GetNode();
if (arg2Node->OperIs(GT_LCL_VAR))
{
GenTreeLclVarCommon* arg2LclNode = arg2Node->AsLclVarCommon();
assert(arg2LclNode->GetLclNum() != lclNum);
LclVarDsc* arg2Dsc = lvaGetDesc(arg2LclNode);
// Other params can't alias implicit byref params
if (arg2Dsc->lvIsParam)
{
checkExposure = false;
}
}
// Because we're checking TYP_I_IMPL above, at least
// screen out obvious things that can't cause aliases.
else if (arg2Node->IsIntegralConst())
{
checkExposure = false;
}
if (checkExposure)
{
JITDUMP(
"... not sure where byref arg points, checking if V%02u is exposed\n",
lclNum);
// arg2 might alias arg, see if we've exposed
// arg somewhere in the method.
if (varDsc->lvHasLdAddrOp || varDsc->IsAddressExposed())
{
// Struct as a whole is exposed, can't optimize
JITDUMP("... V%02u is exposed\n", lclNum);
hasExposure = true;
}
else if (varDsc->lvFieldLclStart != 0)
{
// This is the promoted/undone struct case.
//
// The field start is actually the local number of the promoted local,
// use it to enumerate the fields.
const unsigned promotedLcl = varDsc->lvFieldLclStart;
LclVarDsc* const promotedVarDsc = lvaGetDesc(promotedLcl);
JITDUMP("...promoted-unpromoted case -- also checking exposure of "
"fields of V%02u\n",
promotedLcl);
for (unsigned fieldIndex = 0; fieldIndex < promotedVarDsc->lvFieldCnt;
fieldIndex++)
{
LclVarDsc* fieldDsc =
lvaGetDesc(promotedVarDsc->lvFieldLclStart + fieldIndex);
if (fieldDsc->lvHasLdAddrOp || fieldDsc->IsAddressExposed())
{
// Promoted and not yet demoted field is exposed, can't optimize
JITDUMP("... field V%02u is exposed\n",
promotedVarDsc->lvFieldLclStart + fieldIndex);
hasExposure = true;
break;
}
}
}
}
if (hasExposure)
{
interferingArg = arg2->GetNode();
break;
}
}
else
{
JITDUMP("...arg is not a byref or implicit byref (%s)\n",
varTypeName(arg2->GetNode()->TypeGet()));
}
}
if (interferingArg != nullptr)
{
JITDUMP("... no, arg [%06u] may alias with V%02u\n", dspTreeID(interferingArg),
lclNum);
}
else
{
JITDUMP("... yes, no other arg in call can alias V%02u\n", lclNum);
hasMustCopyByrefParameter = false;
}
}
else
{
JITDUMP(" ... no, call has %u > %u args, alias analysis deemed too costly\n",
argInfo->ArgCount(), argLimit);
}
}
}
}
if (hasMustCopyByrefParameter)
{
// This arg requires a struct copy. No reason to keep scanning the remaining args.
break;
}
}
}
}
return hasMustCopyByrefParameter;
}
#endif
//------------------------------------------------------------------------
// fgMorphPotentialTailCall: Attempt to morph a call that the importer has
// identified as a potential tailcall to an actual tailcall and return the
// placeholder node to use in this case.
//
// Arguments:
// call - The call to morph.
//
// Return Value:
// Returns a node to use if the call was morphed into a tailcall. If this
// function returns a node the call is done being morphed and the new node
// should be used. Otherwise the call will have been demoted to a regular call
// and should go through normal morph.
//
// Notes:
// This is called only for calls that the importer has already identified as
// potential tailcalls. It will do profitability and legality checks and
// classify which kind of tailcall we are able to (or should) do, along with
// modifying the trees to perform that kind of tailcall.
//
GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call)
{
// It should either be an explicit (i.e. tail prefixed) or an implicit tail call
assert(call->IsTailPrefixedCall() ^ call->IsImplicitTailCall());
// It cannot be an inline candidate
assert(!call->IsInlineCandidate());
auto failTailCall = [&](const char* reason, unsigned lclNum = BAD_VAR_NUM) {
#ifdef DEBUG
if (verbose)
{
printf("\nRejecting tail call in morph for call ");
printTreeID(call);
printf(": %s", reason);
if (lclNum != BAD_VAR_NUM)
{
printf(" V%02u", lclNum);
}
printf("\n");
}
#endif
// for non user funcs, we have no handles to report
info.compCompHnd->reportTailCallDecision(nullptr,
(call->gtCallType == CT_USER_FUNC) ? call->gtCallMethHnd : nullptr,
call->IsTailPrefixedCall(), TAILCALL_FAIL, reason);
// We have checked the candidate so demote.
call->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
#if FEATURE_TAILCALL_OPT
call->gtCallMoreFlags &= ~GTF_CALL_M_IMPLICIT_TAILCALL;
#endif
};
if (call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC)
{
failTailCall("Might turn into an intrinsic");
return nullptr;
}
#ifdef TARGET_ARM
if (call->gtCallMoreFlags & GTF_CALL_M_WRAPPER_DELEGATE_INV)
{
failTailCall("Non-standard calling convention");
return nullptr;
}
#endif
if (call->IsNoReturn() && !call->IsTailPrefixedCall())
{
// Such tail calls always throw an exception and we won't be able to see current
// Caller() in the stacktrace.
failTailCall("Never returns");
return nullptr;
}
#ifdef DEBUG
if (opts.compGcChecks && (info.compRetType == TYP_REF))
{
failTailCall("COMPlus_JitGCChecks or stress might have interposed a call to CORINFO_HELP_CHECK_OBJ, "
"invalidating tailcall opportunity");
return nullptr;
}
#endif
// We have to ensure to pass the incoming retValBuf as the
// outgoing one. Using a temp will not do as this function will
// not regain control to do the copy. This can happen when inlining
// a tailcall which also has a potential tailcall in it: the IL looks
// like we can do a tailcall, but the trees generated use a temp for the inlinee's
// result. TODO-CQ: Fix this.
if (info.compRetBuffArg != BAD_VAR_NUM)
{
noway_assert(call->TypeGet() == TYP_VOID);
GenTree* retValBuf = call->gtCallArgs->GetNode();
if (retValBuf->gtOper != GT_LCL_VAR || retValBuf->AsLclVarCommon()->GetLclNum() != info.compRetBuffArg)
{
failTailCall("Need to copy return buffer");
return nullptr;
}
}
// We are still not sure whether it can be a tail call. Because, when converting
// a call to an implicit tail call, we must check that there are no locals with
// their address taken. If this is the case, we have to assume that the address
// has been leaked and the current stack frame must live until after the final
// call.
// Verify that none of vars has lvHasLdAddrOp or IsAddressExposed() bit set. Note
// that lvHasLdAddrOp is much more conservative. We cannot just base it on
// IsAddressExposed() alone since it is not guaranteed to be set on all VarDscs
// during morph stage. The reason for also checking IsAddressExposed() is that in case
// of vararg methods user args are marked as addr exposed but not lvHasLdAddrOp.
// The combination of lvHasLdAddrOp and IsAddressExposed() though conservative allows us
// never to be incorrect.
//
// TODO-Throughput: have a compiler level flag to indicate whether method has vars whose
// address is taken. Such a flag could be set whenever lvHasLdAddrOp or IsAddressExposed()
// is set. This avoids the need for iterating through all lcl vars of the current
// method. Right now throughout the code base we are not consistently using 'set'
// method to set lvHasLdAddrOp and IsAddressExposed() flags.
bool isImplicitOrStressTailCall = call->IsImplicitTailCall() || call->IsStressTailCall();
if (isImplicitOrStressTailCall && compLocallocUsed)
{
failTailCall("Localloc used");
return nullptr;
}
bool hasStructParam = false;
for (unsigned varNum = 0; varNum < lvaCount; varNum++)
{
LclVarDsc* varDsc = lvaGetDesc(varNum);
// If the method is marked as an explicit tail call we will skip the
// following three hazard checks.
// We still must check for any struct parameters and set 'hasStructParam'
// so that we won't transform the recursive tail call into a loop.
//
if (isImplicitOrStressTailCall)
{
if (varDsc->lvHasLdAddrOp && !lvaIsImplicitByRefLocal(varNum))
{
failTailCall("Local address taken", varNum);
return nullptr;
}
if (varDsc->IsAddressExposed())
{
if (lvaIsImplicitByRefLocal(varNum))
{
// The address of the implicit-byref is a non-address use of the pointer parameter.
}
else if (varDsc->lvIsStructField && lvaIsImplicitByRefLocal(varDsc->lvParentLcl))
{
// The address of the implicit-byref's field is likewise a non-address use of the pointer
// parameter.
}
else if (varDsc->lvPromoted && (lvaTable[varDsc->lvFieldLclStart].lvParentLcl != varNum))
{
// This temp was used for struct promotion bookkeeping. It will not be used, and will have
// its ref count and address-taken flag reset in fgMarkDemotedImplicitByRefArgs.
assert(lvaIsImplicitByRefLocal(lvaTable[varDsc->lvFieldLclStart].lvParentLcl));
assert(fgGlobalMorph);
}
else
{
failTailCall("Local address taken", varNum);
return nullptr;
}
}
if (varDsc->lvPromoted && varDsc->lvIsParam && !lvaIsImplicitByRefLocal(varNum))
{
failTailCall("Has Struct Promoted Param", varNum);
return nullptr;
}
if (varDsc->lvPinned)
{
// A tail call removes the method from the stack, which means the pinning
// goes away for the callee. We can't allow that.
failTailCall("Has Pinned Vars", varNum);
return nullptr;
}
}
if (varTypeIsStruct(varDsc->TypeGet()) && varDsc->lvIsParam)
{
hasStructParam = true;
// This prevents transforming a recursive tail call into a loop
// but doesn't prevent tail call optimization so we need to
// look at the rest of parameters.
}
}
if (!fgCheckStmtAfterTailCall())
{
failTailCall("Unexpected statements after the tail call");
return nullptr;
}
const char* failReason = nullptr;
bool canFastTailCall = fgCanFastTailCall(call, &failReason);
CORINFO_TAILCALL_HELPERS tailCallHelpers;
bool tailCallViaJitHelper = false;
if (!canFastTailCall)
{
if (call->IsImplicitTailCall())
{
// Implicit or opportunistic tail calls are always dispatched via fast tail call
// mechanism and never via tail call helper for perf.
failTailCall(failReason);
return nullptr;
}
assert(call->IsTailPrefixedCall());
assert(call->tailCallInfo != nullptr);
// We do not currently handle non-standard args except for VSD stubs.
if (!call->IsVirtualStub() && call->HasNonStandardAddedArgs(this))
{
failTailCall(
"Method with non-standard args passed in callee trash register cannot be tail called via helper");
return nullptr;
}
// On x86 we have a faster mechanism than the general one which we use
// in almost all cases. See fgCanTailCallViaJitHelper for more information.
if (fgCanTailCallViaJitHelper())
{
tailCallViaJitHelper = true;
}
else
{
// Make sure we can get the helpers. We do this last as the runtime
// will likely be required to generate these.
CORINFO_RESOLVED_TOKEN* token = nullptr;
CORINFO_SIG_INFO* sig = call->tailCallInfo->GetSig();
unsigned flags = 0;
if (!call->tailCallInfo->IsCalli())
{
token = call->tailCallInfo->GetToken();
if (call->tailCallInfo->IsCallvirt())
{
flags |= CORINFO_TAILCALL_IS_CALLVIRT;
}
}
if (call->gtCallThisArg != nullptr)
{
var_types thisArgType = call->gtCallThisArg->GetNode()->TypeGet();
if (thisArgType != TYP_REF)
{
flags |= CORINFO_TAILCALL_THIS_ARG_IS_BYREF;
}
}
if (!info.compCompHnd->getTailCallHelpers(token, sig, (CORINFO_GET_TAILCALL_HELPERS_FLAGS)flags,
&tailCallHelpers))
{
failTailCall("Tail call help not available");
return nullptr;
}
}
}
// Check if we can make the tailcall a loop.
bool fastTailCallToLoop = false;
#if FEATURE_TAILCALL_OPT
// TODO-CQ: enable the transformation when the method has a struct parameter that can be passed in a register
// or return type is a struct that can be passed in a register.
//
// TODO-CQ: if the method being compiled requires generic context reported in gc-info (either through
// hidden generic context param or through keep alive thisptr), then while transforming a recursive
// call to such a method requires that the generic context stored on stack slot be updated. Right now,
// fgMorphRecursiveFastTailCallIntoLoop() is not handling update of generic context while transforming
// a recursive call into a loop. Another option is to modify gtIsRecursiveCall() to check that the
// generic type parameters of both caller and callee generic method are the same.
if (opts.compTailCallLoopOpt && canFastTailCall && gtIsRecursiveCall(call) && !lvaReportParamTypeArg() &&
!lvaKeepAliveAndReportThis() && !call->IsVirtual() && !hasStructParam && !varTypeIsStruct(call->TypeGet()))
{
fastTailCallToLoop = true;
}
#endif
// Ok -- now we are committed to performing a tailcall. Report the decision.
CorInfoTailCall tailCallResult;
if (fastTailCallToLoop)
{
tailCallResult = TAILCALL_RECURSIVE;
}
else if (canFastTailCall)
{
tailCallResult = TAILCALL_OPTIMIZED;
}
else
{
tailCallResult = TAILCALL_HELPER;
}
info.compCompHnd->reportTailCallDecision(nullptr,
(call->gtCallType == CT_USER_FUNC) ? call->gtCallMethHnd : nullptr,
call->IsTailPrefixedCall(), tailCallResult, nullptr);
// Are we currently planning to expand the gtControlExpr as an early virtual call target?
//
if (call->IsExpandedEarly() && call->IsVirtualVtable())
{
// It isn't alway profitable to expand a virtual call early
//
// We alway expand the TAILCALL_HELPER type late.
// And we exapnd late when we have an optimized tail call
// and the this pointer needs to be evaluated into a temp.
//
if (tailCallResult == TAILCALL_HELPER)
{
// We will alway expand this late in lower instead.
// (see LowerTailCallViaJitHelper as it needs some work
// for us to be able to expand this earlier in morph)
//
call->ClearExpandedEarly();
}
else if ((tailCallResult == TAILCALL_OPTIMIZED) &&
((call->gtCallThisArg->GetNode()->gtFlags & GTF_SIDE_EFFECT) != 0))
{
// We generate better code when we expand this late in lower instead.
//
call->ClearExpandedEarly();
}
}
// Now actually morph the call.
compTailCallUsed = true;
// This will prevent inlining this call.
call->gtCallMoreFlags |= GTF_CALL_M_TAILCALL;
if (tailCallViaJitHelper)
{
call->gtCallMoreFlags |= GTF_CALL_M_TAILCALL_VIA_JIT_HELPER;
}
#if FEATURE_TAILCALL_OPT
if (fastTailCallToLoop)
{
call->gtCallMoreFlags |= GTF_CALL_M_TAILCALL_TO_LOOP;
}
#endif
// Mark that this is no longer a pending tailcall. We need to do this before
// we call fgMorphCall again (which happens in the fast tailcall case) to
// avoid recursing back into this method.
call->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
#if FEATURE_TAILCALL_OPT
call->gtCallMoreFlags &= ~GTF_CALL_M_IMPLICIT_TAILCALL;
#endif
#ifdef DEBUG
if (verbose)
{
printf("\nGTF_CALL_M_TAILCALL bit set for call ");
printTreeID(call);
printf("\n");
if (fastTailCallToLoop)
{
printf("\nGTF_CALL_M_TAILCALL_TO_LOOP bit set for call ");
printTreeID(call);
printf("\n");
}
}
#endif
// For R2R we might need a different entry point for this call if we are doing a tailcall.
// The reason is that the normal delay load helper uses the return address to find the indirection
// cell in xarch, but now the JIT is expected to leave the indirection cell in REG_R2R_INDIRECT_PARAM:
// We optimize delegate invocations manually in the JIT so skip this for those.
if (call->IsR2RRelativeIndir() && canFastTailCall && !fastTailCallToLoop && !call->IsDelegateInvoke())
{
info.compCompHnd->updateEntryPointForTailCall(&call->gtEntryPoint);
#ifdef TARGET_XARCH
// We have already computed arg info to make the fast tailcall decision, but on X64 we now
// have to pass the indirection cell, so redo arg info.
call->ResetArgInfo();
#endif
}
// If this block has a flow successor, make suitable updates.
//
BasicBlock* const nextBlock = compCurBB->GetUniqueSucc();
if (nextBlock == nullptr)
{
// No unique successor. compCurBB should be a return.
//
assert(compCurBB->bbJumpKind == BBJ_RETURN);
}
else
{
// Flow no longer reaches nextBlock from here.
//
fgRemoveRefPred(nextBlock, compCurBB);
// Adjust profile weights.
//
// Note if this is a tail call to loop, further updates
// are needed once we install the loop edge.
//
if (compCurBB->hasProfileWeight() && nextBlock->hasProfileWeight())
{
// Since we have linear flow we can update the next block weight.
//
weight_t const blockWeight = compCurBB->bbWeight;
weight_t const nextWeight = nextBlock->bbWeight;
weight_t const newNextWeight = nextWeight - blockWeight;
// If the math would result in a negative weight then there's
// no local repair we can do; just leave things inconsistent.
//
if (newNextWeight >= 0)
{
// Note if we'd already morphed the IR in nextblock we might
// have done something profile sensitive that we should arguably reconsider.
//
JITDUMP("Reducing profile weight of " FMT_BB " from " FMT_WT " to " FMT_WT "\n", nextBlock->bbNum,
nextWeight, newNextWeight);
nextBlock->setBBProfileWeight(newNextWeight);
}
else
{
JITDUMP("Not reducing profile weight of " FMT_BB " as its weight " FMT_WT
" is less than direct flow pred " FMT_BB " weight " FMT_WT "\n",
nextBlock->bbNum, nextWeight, compCurBB->bbNum, blockWeight);
}
// If nextBlock is not a BBJ_RETURN, it should have a unique successor that
// is a BBJ_RETURN, as we allow a little bit of flow after a tail call.
//
if (nextBlock->bbJumpKind != BBJ_RETURN)
{
BasicBlock* retBlock = nextBlock->GetUniqueSucc();
// Check if we have a sequence of GT_ASG blocks where the same variable is assigned
// to temp locals over and over.
// Also allow casts on the RHSs of the assignments, and blocks with GT_NOPs.
//
// { GT_ASG(t_0, GT_CALL(...)) }
// { GT_ASG(t_1, t0) } (with casts on rhs potentially)
// ...
// { GT_ASG(t_n, t_(n - 1)) }
// { GT_RET t_n }
//
if (retBlock->bbJumpKind != BBJ_RETURN)
{
// Make sure the block has a single statement
assert(nextBlock->firstStmt() == nextBlock->lastStmt());
// And the root node is "ASG(LCL_VAR, LCL_VAR)"
GenTree* asgNode = nextBlock->firstStmt()->GetRootNode();
assert(asgNode->OperIs(GT_ASG));
unsigned lcl = asgNode->gtGetOp1()->AsLclVarCommon()->GetLclNum();
while (retBlock->bbJumpKind != BBJ_RETURN)
{
#ifdef DEBUG
Statement* nonEmptyStmt = nullptr;
for (Statement* const stmt : retBlock->Statements())
{
// Ignore NOP statements
if (!stmt->GetRootNode()->OperIs(GT_NOP))
{
// Only a single non-NOP statement is allowed
assert(nonEmptyStmt == nullptr);
nonEmptyStmt = stmt;
}
}
if (nonEmptyStmt != nullptr)
{
asgNode = nonEmptyStmt->GetRootNode();
if (!asgNode->OperIs(GT_NOP))
{
assert(asgNode->OperIs(GT_ASG));
GenTree* rhs = asgNode->gtGetOp2();
while (rhs->OperIs(GT_CAST))
{
assert(!rhs->gtOverflow());
rhs = rhs->gtGetOp1();
}
assert(lcl == rhs->AsLclVarCommon()->GetLclNum());
lcl = asgNode->gtGetOp1()->AsLclVarCommon()->GetLclNum();
}
}
#endif
retBlock = retBlock->GetUniqueSucc();
}
}
assert(retBlock->bbJumpKind == BBJ_RETURN);
if (retBlock->hasProfileWeight())
{
// Do similar updates here.
//
weight_t const nextNextWeight = retBlock->bbWeight;
weight_t const newNextNextWeight = nextNextWeight - blockWeight;
// If the math would result in an negative weight then there's
// no local repair we can do; just leave things inconsistent.
//
if (newNextNextWeight >= 0)
{
JITDUMP("Reducing profile weight of " FMT_BB " from " FMT_WT " to " FMT_WT "\n",
retBlock->bbNum, nextNextWeight, newNextNextWeight);
retBlock->setBBProfileWeight(newNextNextWeight);
}
else
{
JITDUMP("Not reducing profile weight of " FMT_BB " as its weight " FMT_WT
" is less than direct flow pred " FMT_BB " weight " FMT_WT "\n",
retBlock->bbNum, nextNextWeight, compCurBB->bbNum, blockWeight);
}
}
}
}
}
#if !FEATURE_TAILCALL_OPT_SHARED_RETURN
// We enable shared-ret tail call optimization for recursive calls even if
// FEATURE_TAILCALL_OPT_SHARED_RETURN is not defined.
if (gtIsRecursiveCall(call))
#endif
{
// Many tailcalls will have call and ret in the same block, and thus be
// BBJ_RETURN, but if the call falls through to a ret, and we are doing a
// tailcall, change it here.
compCurBB->bbJumpKind = BBJ_RETURN;
}
GenTree* stmtExpr = fgMorphStmt->GetRootNode();
#ifdef DEBUG
// Tail call needs to be in one of the following IR forms
// Either a call stmt or
// GT_RETURN(GT_CALL(..)) or GT_RETURN(GT_CAST(GT_CALL(..)))
// var = GT_CALL(..) or var = (GT_CAST(GT_CALL(..)))
// GT_COMMA(GT_CALL(..), GT_NOP) or GT_COMMA(GT_CAST(GT_CALL(..)), GT_NOP)
// In the above,
// GT_CASTS may be nested.
genTreeOps stmtOper = stmtExpr->gtOper;
if (stmtOper == GT_CALL)
{
assert(stmtExpr == call);
}
else
{
assert(stmtOper == GT_RETURN || stmtOper == GT_ASG || stmtOper == GT_COMMA);
GenTree* treeWithCall;
if (stmtOper == GT_RETURN)
{
treeWithCall = stmtExpr->gtGetOp1();
}
else if (stmtOper == GT_COMMA)
{
// Second operation must be nop.
assert(stmtExpr->gtGetOp2()->IsNothingNode());
treeWithCall = stmtExpr->gtGetOp1();
}
else
{
treeWithCall = stmtExpr->gtGetOp2();
}
// Peel off casts
while (treeWithCall->gtOper == GT_CAST)
{
assert(!treeWithCall->gtOverflow());
treeWithCall = treeWithCall->gtGetOp1();
}
assert(treeWithCall == call);
}
#endif
// Store the call type for later to introduce the correct placeholder.
var_types origCallType = call->TypeGet();
GenTree* result;
if (!canFastTailCall && !tailCallViaJitHelper)
{
// For tailcall via CORINFO_TAILCALL_HELPERS we transform into regular
// calls with (to the JIT) regular control flow so we do not need to do
// much special handling.
result = fgMorphTailCallViaHelpers(call, tailCallHelpers);
}
else
{
// Otherwise we will transform into something that does not return. For
// fast tailcalls a "jump" and for tailcall via JIT helper a call to a
// JIT helper that does not return. So peel off everything after the
// call.
Statement* nextMorphStmt = fgMorphStmt->GetNextStmt();
JITDUMP("Remove all stmts after the call.\n");
while (nextMorphStmt != nullptr)
{
Statement* stmtToRemove = nextMorphStmt;
nextMorphStmt = stmtToRemove->GetNextStmt();
fgRemoveStmt(compCurBB, stmtToRemove);
}
bool isRootReplaced = false;
GenTree* root = fgMorphStmt->GetRootNode();
if (root != call)
{
JITDUMP("Replace root node [%06d] with [%06d] tail call node.\n", dspTreeID(root), dspTreeID(call));
isRootReplaced = true;
fgMorphStmt->SetRootNode(call);
}
// Avoid potential extra work for the return (for example, vzeroupper)
call->gtType = TYP_VOID;
// The runtime requires that we perform a null check on the `this` argument before
// tail calling to a virtual dispatch stub. This requirement is a consequence of limitations
// in the runtime's ability to map an AV to a NullReferenceException if
// the AV occurs in a dispatch stub that has unmanaged caller.
if (call->IsVirtualStub())
{
call->gtFlags |= GTF_CALL_NULLCHECK;
}
// Do some target-specific transformations (before we process the args,
// etc.) for the JIT helper case.
if (tailCallViaJitHelper)
{
fgMorphTailCallViaJitHelper(call);
// Force re-evaluating the argInfo. fgMorphTailCallViaJitHelper will modify the
// argument list, invalidating the argInfo.
call->fgArgInfo = nullptr;
}
// Tail call via JIT helper: The VM can't use return address hijacking
// if we're not going to return and the helper doesn't have enough info
// to safely poll, so we poll before the tail call, if the block isn't
// already safe. Since tail call via helper is a slow mechanism it
// doen't matter whether we emit GC poll. his is done to be in parity
// with Jit64. Also this avoids GC info size increase if all most all
// methods are expected to be tail calls (e.g. F#).
//
// Note that we can avoid emitting GC-poll if we know that the current
// BB is dominated by a Gc-SafePoint block. But we don't have dominator
// info at this point. One option is to just add a place holder node for
// GC-poll (e.g. GT_GCPOLL) here and remove it in lowering if the block
// is dominated by a GC-SafePoint. For now it not clear whether
// optimizing slow tail calls is worth the effort. As a low cost check,
// we check whether the first and current basic blocks are
// GC-SafePoints.
//
// Fast Tail call as epilog+jmp - No need to insert GC-poll. Instead,
// fgSetBlockOrder() is going to mark the method as fully interruptible
// if the block containing this tail call is reachable without executing
// any call.
BasicBlock* curBlock = compCurBB;
if (canFastTailCall || (fgFirstBB->bbFlags & BBF_GC_SAFE_POINT) || (compCurBB->bbFlags & BBF_GC_SAFE_POINT) ||
(fgCreateGCPoll(GCPOLL_INLINE, compCurBB) == curBlock))
{
// We didn't insert a poll block, so we need to morph the call now
// (Normally it will get morphed when we get to the split poll block)
GenTree* temp = fgMorphCall(call);
noway_assert(temp == call);
}
// Fast tail call: in case of fast tail calls, we need a jmp epilog and
// hence mark it as BBJ_RETURN with BBF_JMP flag set.
noway_assert(compCurBB->bbJumpKind == BBJ_RETURN);
if (canFastTailCall)
{
compCurBB->bbFlags |= BBF_HAS_JMP;
}
else
{
// We call CORINFO_HELP_TAILCALL which does not return, so we will
// not need epilogue.
compCurBB->bbJumpKind = BBJ_THROW;
}
if (isRootReplaced)
{
// We have replaced the root node of this stmt and deleted the rest,
// but we still have the deleted, dead nodes on the `fgMorph*` stack
// if the root node was an `ASG`, `RET` or `CAST`.
// Return a zero con node to exit morphing of the old trees without asserts
// and forbid POST_ORDER morphing doing something wrong with our call.
var_types callType;
if (varTypeIsStruct(origCallType))
{
CORINFO_CLASS_HANDLE retClsHnd = call->gtRetClsHnd;
Compiler::structPassingKind howToReturnStruct;
callType = getReturnTypeForStruct(retClsHnd, call->GetUnmanagedCallConv(), &howToReturnStruct);
assert((howToReturnStruct != SPK_Unknown) && (howToReturnStruct != SPK_ByReference));
if (howToReturnStruct == SPK_ByValue)
{
callType = TYP_I_IMPL;
}
else if (howToReturnStruct == SPK_ByValueAsHfa || varTypeIsSIMD(callType))
{
callType = TYP_FLOAT;
}
assert((callType != TYP_UNKNOWN) && !varTypeIsStruct(callType));
}
else
{
callType = origCallType;
}
assert((callType != TYP_UNKNOWN) && !varTypeIsStruct(callType));
callType = genActualType(callType);
GenTree* zero = gtNewZeroConNode(callType);
result = fgMorphTree(zero);
}
else
{
result = call;
}
}
return result;
}
//------------------------------------------------------------------------
// fgMorphTailCallViaHelpers: Transform the given GT_CALL tree for tailcall code
// generation.
//
// Arguments:
// call - The call to transform
// helpers - The tailcall helpers provided by the runtime.
//
// Return Value:
// Returns the transformed node.
//
// Notes:
// This transforms
// GT_CALL
// {callTarget}
// {this}
// {args}
// into
// GT_COMMA
// GT_CALL StoreArgsStub
// {callTarget} (depending on flags provided by the runtime)
// {this} (as a regular arg)
// {args}
// GT_COMMA
// GT_CALL Dispatcher
// GT_ADDR ReturnAddress
// {CallTargetStub}
// GT_ADDR ReturnValue
// GT_LCL ReturnValue
// whenever the call node returns a value. If the call node does not return a
// value the last comma will not be there.
//
GenTree* Compiler::fgMorphTailCallViaHelpers(GenTreeCall* call, CORINFO_TAILCALL_HELPERS& help)
{
// R2R requires different handling but we don't support tailcall via
// helpers in R2R yet, so just leave it for now.
// TODO: R2R: TailCallViaHelper
assert(!opts.IsReadyToRun());
JITDUMP("fgMorphTailCallViaHelpers (before):\n");
DISPTREE(call);
// Don't support tail calling helper methods
assert(call->gtCallType != CT_HELPER);
// We come this route only for tail prefixed calls that cannot be dispatched as
// fast tail calls
assert(!call->IsImplicitTailCall());
// We want to use the following assert, but it can modify the IR in some cases, so we
// can't do that in an assert.
// assert(!fgCanFastTailCall(call, nullptr));
// We might or might not have called fgInitArgInfo before this point: in
// builds with FEATURE_FASTTAILCALL we will have called it when checking if
// we could do a fast tailcall, so it is possible we have added extra IR
// for non-standard args that we must get rid of. Get rid of that IR here
// and do this first as it will 'expose' the retbuf as the first arg, which
// we rely upon in fgCreateCallDispatcherAndGetResult.
call->ResetArgInfo();
GenTree* callDispatcherAndGetResult = fgCreateCallDispatcherAndGetResult(call, help.hCallTarget, help.hDispatcher);
// Change the call to a call to the StoreArgs stub.
if (call->HasRetBufArg())
{
JITDUMP("Removing retbuf");
call->gtCallArgs = call->gtCallArgs->GetNext();
call->gtCallMoreFlags &= ~GTF_CALL_M_RETBUFFARG;
}
const bool stubNeedsTargetFnPtr = (help.flags & CORINFO_TAILCALL_STORE_TARGET) != 0;
GenTree* doBeforeStoreArgsStub = nullptr;
GenTree* thisPtrStubArg = nullptr;
// Put 'this' in normal param list
if (call->gtCallThisArg != nullptr)
{
JITDUMP("Moving this pointer into arg list\n");
GenTree* objp = call->gtCallThisArg->GetNode();
GenTree* thisPtr = nullptr;
call->gtCallThisArg = nullptr;
// JIT will need one or two copies of "this" in the following cases:
// 1) the call needs null check;
// 2) StoreArgs stub needs the target function pointer address and if the call is virtual
// the stub also needs "this" in order to evalute the target.
const bool callNeedsNullCheck = call->NeedsNullCheck();
const bool stubNeedsThisPtr = stubNeedsTargetFnPtr && call->IsVirtual();
// TODO-Review: The following transformation is implemented under assumption that
// both conditions can be true. However, I could not construct such example
// where a virtual tail call would require null check. In case, if the conditions
// are mutually exclusive the following could be simplified.
if (callNeedsNullCheck || stubNeedsThisPtr)
{
// Clone "this" if "this" has no side effects.
if ((objp->gtFlags & GTF_SIDE_EFFECT) == 0)
{
thisPtr = gtClone(objp, true);
}
// Create a temp and spill "this" to the temp if "this" has side effects or "this" was too complex to clone.
if (thisPtr == nullptr)
{
const unsigned lclNum = lvaGrabTemp(true DEBUGARG("tail call thisptr"));
// tmp = "this"
doBeforeStoreArgsStub = gtNewTempAssign(lclNum, objp);
if (callNeedsNullCheck)
{
// COMMA(tmp = "this", deref(tmp))
GenTree* tmp = gtNewLclvNode(lclNum, objp->TypeGet());
GenTree* nullcheck = gtNewNullCheck(tmp, compCurBB);
doBeforeStoreArgsStub = gtNewOperNode(GT_COMMA, TYP_VOID, doBeforeStoreArgsStub, nullcheck);
}
thisPtr = gtNewLclvNode(lclNum, objp->TypeGet());
if (stubNeedsThisPtr)
{
thisPtrStubArg = gtNewLclvNode(lclNum, objp->TypeGet());
}
}
else
{
if (callNeedsNullCheck)
{
// deref("this")
doBeforeStoreArgsStub = gtNewNullCheck(objp, compCurBB);
if (stubNeedsThisPtr)
{
thisPtrStubArg = gtClone(objp, true);
}
}
else
{
assert(stubNeedsThisPtr);
thisPtrStubArg = objp;
}
}
call->gtFlags &= ~GTF_CALL_NULLCHECK;
assert((thisPtrStubArg != nullptr) == stubNeedsThisPtr);
}
else
{
thisPtr = objp;
}
// During rationalization tmp="this" and null check will be materialized
// in the right execution order.
assert(thisPtr != nullptr);
call->gtCallArgs = gtPrependNewCallArg(thisPtr, call->gtCallArgs);
}
// We may need to pass the target, for instance for calli or generic methods
// where we pass instantiating stub.
if (stubNeedsTargetFnPtr)
{
JITDUMP("Adding target since VM requested it\n");
GenTree* target;
if (!call->IsVirtual())
{
if (call->gtCallType == CT_INDIRECT)
{
noway_assert(call->gtCallAddr != nullptr);
target = call->gtCallAddr;
}
else
{
CORINFO_CONST_LOOKUP addrInfo;
info.compCompHnd->getFunctionEntryPoint(call->gtCallMethHnd, &addrInfo);
CORINFO_GENERIC_HANDLE handle = nullptr;
void* pIndirection = nullptr;
assert(addrInfo.accessType != IAT_PPVALUE && addrInfo.accessType != IAT_RELPVALUE);
if (addrInfo.accessType == IAT_VALUE)
{
handle = addrInfo.handle;
}
else if (addrInfo.accessType == IAT_PVALUE)
{
pIndirection = addrInfo.addr;
}
target = gtNewIconEmbHndNode(handle, pIndirection, GTF_ICON_FTN_ADDR, call->gtCallMethHnd);
}
}
else
{
assert(!call->tailCallInfo->GetSig()->hasTypeArg());
CORINFO_CALL_INFO callInfo;
unsigned flags = CORINFO_CALLINFO_LDFTN;
if (call->tailCallInfo->IsCallvirt())
{
flags |= CORINFO_CALLINFO_CALLVIRT;
}
eeGetCallInfo(call->tailCallInfo->GetToken(), nullptr, (CORINFO_CALLINFO_FLAGS)flags, &callInfo);
target = getVirtMethodPointerTree(thisPtrStubArg, call->tailCallInfo->GetToken(), &callInfo);
}
// Insert target as last arg
GenTreeCall::Use** newArgSlot = &call->gtCallArgs;
while (*newArgSlot != nullptr)
{
newArgSlot = &(*newArgSlot)->NextRef();
}
*newArgSlot = gtNewCallArgs(target);
}
// This is now a direct call to the store args stub and not a tailcall.
call->gtCallType = CT_USER_FUNC;
call->gtCallMethHnd = help.hStoreArgs;
call->gtFlags &= ~GTF_CALL_VIRT_KIND_MASK;
call->gtCallMoreFlags &= ~(GTF_CALL_M_TAILCALL | GTF_CALL_M_DELEGATE_INV | GTF_CALL_M_WRAPPER_DELEGATE_INV);
// The store-args stub returns no value.
call->gtRetClsHnd = nullptr;
call->gtType = TYP_VOID;
call->gtReturnType = TYP_VOID;
GenTree* callStoreArgsStub = call;
if (doBeforeStoreArgsStub != nullptr)
{
callStoreArgsStub = gtNewOperNode(GT_COMMA, TYP_VOID, doBeforeStoreArgsStub, callStoreArgsStub);
}
GenTree* finalTree =
gtNewOperNode(GT_COMMA, callDispatcherAndGetResult->TypeGet(), callStoreArgsStub, callDispatcherAndGetResult);
finalTree = fgMorphTree(finalTree);
JITDUMP("fgMorphTailCallViaHelpers (after):\n");
DISPTREE(finalTree);
return finalTree;
}
//------------------------------------------------------------------------
// fgCreateCallDispatcherAndGetResult: Given a call
// CALL
// {callTarget}
// {retbuf}
// {this}
// {args}
// create a similarly typed node that calls the tailcall dispatcher and returns
// the result, as in the following:
// COMMA
// CALL TailCallDispatcher
// ADDR ReturnAddress
// &CallTargetFunc
// ADDR RetValue
// RetValue
// If the call has type TYP_VOID, only create the CALL node.
//
// Arguments:
// origCall - the call
// callTargetStubHnd - the handle of the CallTarget function (this is a special
// IL stub created by the runtime)
// dispatcherHnd - the handle of the tailcall dispatcher function
//
// Return Value:
// A node that can be used in place of the original call.
//
GenTree* Compiler::fgCreateCallDispatcherAndGetResult(GenTreeCall* origCall,
CORINFO_METHOD_HANDLE callTargetStubHnd,
CORINFO_METHOD_HANDLE dispatcherHnd)
{
GenTreeCall* callDispatcherNode =
gtNewCallNode(CT_USER_FUNC, dispatcherHnd, TYP_VOID, nullptr, fgMorphStmt->GetDebugInfo());
// The dispatcher has signature
// void DispatchTailCalls(void* callersRetAddrSlot, void* callTarget, void* retValue)
// Add return value arg.
GenTree* retValArg;
GenTree* retVal = nullptr;
unsigned int newRetLcl = BAD_VAR_NUM;
GenTree* copyToRetBufNode = nullptr;
if (origCall->HasRetBufArg())
{
JITDUMP("Transferring retbuf\n");
GenTree* retBufArg = origCall->gtCallArgs->GetNode();
assert(info.compRetBuffArg != BAD_VAR_NUM);
assert(retBufArg->OperIsLocal());
assert(retBufArg->AsLclVarCommon()->GetLclNum() == info.compRetBuffArg);
// Caller return buffer argument retBufArg can point to GC heap while the dispatcher expects
// the return value argument retValArg to point to the stack.
// We use a temporary stack allocated return buffer to hold the value during the dispatcher call
// and copy the value back to the caller return buffer after that.
unsigned int tmpRetBufNum = lvaGrabTemp(true DEBUGARG("substitute local for return buffer"));
constexpr bool unsafeValueClsCheck = false;
lvaSetStruct(tmpRetBufNum, origCall->gtRetClsHnd, unsafeValueClsCheck);
lvaSetVarAddrExposed(tmpRetBufNum DEBUGARG(AddressExposedReason::DISPATCH_RET_BUF));
var_types tmpRetBufType = lvaGetDesc(tmpRetBufNum)->TypeGet();
retValArg = gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(tmpRetBufNum, tmpRetBufType));
var_types callerRetBufType = lvaGetDesc(info.compRetBuffArg)->TypeGet();
GenTree* dstAddr = gtNewLclvNode(info.compRetBuffArg, callerRetBufType);
GenTree* dst = gtNewObjNode(info.compMethodInfo->args.retTypeClass, dstAddr);
GenTree* src = gtNewLclvNode(tmpRetBufNum, tmpRetBufType);
constexpr bool isVolatile = false;
constexpr bool isCopyBlock = true;
copyToRetBufNode = gtNewBlkOpNode(dst, src, isVolatile, isCopyBlock);
if (origCall->gtType != TYP_VOID)
{
retVal = gtClone(retBufArg);
}
}
else if (origCall->gtType != TYP_VOID)
{
JITDUMP("Creating a new temp for the return value\n");
newRetLcl = lvaGrabTemp(false DEBUGARG("Return value for tail call dispatcher"));
if (varTypeIsStruct(origCall->gtType))
{
lvaSetStruct(newRetLcl, origCall->gtRetClsHnd, false);
}
else
{
// Since we pass a reference to the return value to the dispatcher
// we need to use the real return type so we can normalize it on
// load when we return it.
lvaTable[newRetLcl].lvType = (var_types)origCall->gtReturnType;
}
lvaSetVarAddrExposed(newRetLcl DEBUGARG(AddressExposedReason::DISPATCH_RET_BUF));
retValArg =
gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(newRetLcl, genActualType(lvaTable[newRetLcl].lvType)));
retVal = gtNewLclvNode(newRetLcl, genActualType(lvaTable[newRetLcl].lvType));
if (varTypeIsStruct(origCall->gtType))
{
retVal = impFixupStructReturnType(retVal, origCall->gtRetClsHnd, origCall->GetUnmanagedCallConv());
}
}
else
{
JITDUMP("No return value so using null pointer as arg\n");
retValArg = gtNewZeroConNode(TYP_I_IMPL);
}
callDispatcherNode->gtCallArgs = gtPrependNewCallArg(retValArg, callDispatcherNode->gtCallArgs);
// Add callTarget
callDispatcherNode->gtCallArgs =
gtPrependNewCallArg(new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, callTargetStubHnd),
callDispatcherNode->gtCallArgs);
// Add the caller's return address slot.
if (lvaRetAddrVar == BAD_VAR_NUM)
{
lvaRetAddrVar = lvaGrabTemp(false DEBUGARG("Return address"));
lvaTable[lvaRetAddrVar].lvType = TYP_I_IMPL;
lvaSetVarAddrExposed(lvaRetAddrVar DEBUGARG(AddressExposedReason::DISPATCH_RET_BUF));
}
GenTree* retAddrSlot = gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaRetAddrVar, TYP_I_IMPL));
callDispatcherNode->gtCallArgs = gtPrependNewCallArg(retAddrSlot, callDispatcherNode->gtCallArgs);
GenTree* finalTree = callDispatcherNode;
if (copyToRetBufNode != nullptr)
{
finalTree = gtNewOperNode(GT_COMMA, TYP_VOID, callDispatcherNode, copyToRetBufNode);
}
if (origCall->gtType == TYP_VOID)
{
return finalTree;
}
assert(retVal != nullptr);
finalTree = gtNewOperNode(GT_COMMA, origCall->TypeGet(), finalTree, retVal);
// The JIT seems to want to CSE this comma and messes up multi-reg ret
// values in the process. Just avoid CSE'ing this tree entirely in that
// case.
if (origCall->HasMultiRegRetVal())
{
finalTree->gtFlags |= GTF_DONT_CSE;
}
return finalTree;
}
//------------------------------------------------------------------------
// getLookupTree: get a lookup tree
//
// Arguments:
// pResolvedToken - resolved token of the call
// pLookup - the lookup to get the tree for
// handleFlags - flags to set on the result node
// compileTimeHandle - compile-time handle corresponding to the lookup
//
// Return Value:
// A node representing the lookup tree
//
GenTree* Compiler::getLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_LOOKUP* pLookup,
GenTreeFlags handleFlags,
void* compileTimeHandle)
{
if (!pLookup->lookupKind.needsRuntimeLookup)
{
// No runtime lookup is required.
// Access is direct or memory-indirect (of a fixed address) reference
CORINFO_GENERIC_HANDLE handle = nullptr;
void* pIndirection = nullptr;
assert(pLookup->constLookup.accessType != IAT_PPVALUE && pLookup->constLookup.accessType != IAT_RELPVALUE);
if (pLookup->constLookup.accessType == IAT_VALUE)
{
handle = pLookup->constLookup.handle;
}
else if (pLookup->constLookup.accessType == IAT_PVALUE)
{
pIndirection = pLookup->constLookup.addr;
}
return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle);
}
return getRuntimeLookupTree(pResolvedToken, pLookup, compileTimeHandle);
}
//------------------------------------------------------------------------
// getRuntimeLookupTree: get a tree for a runtime lookup
//
// Arguments:
// pResolvedToken - resolved token of the call
// pLookup - the lookup to get the tree for
// compileTimeHandle - compile-time handle corresponding to the lookup
//
// Return Value:
// A node representing the runtime lookup tree
//
GenTree* Compiler::getRuntimeLookupTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_LOOKUP* pLookup,
void* compileTimeHandle)
{
assert(!compIsForInlining());
CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup;
// If pRuntimeLookup->indirections is equal to CORINFO_USEHELPER, it specifies that a run-time helper should be
// used; otherwise, it specifies the number of indirections via pRuntimeLookup->offsets array.
if ((pRuntimeLookup->indirections == CORINFO_USEHELPER) || pRuntimeLookup->testForNull ||
pRuntimeLookup->testForFixup)
{
// If the first condition is true, runtime lookup tree is available only via the run-time helper function.
// TODO-CQ If the second or third condition is true, we are always using the slow path since we can't
// introduce control flow at this point. See impRuntimeLookupToTree for the logic to avoid calling the helper.
// The long-term solution is to introduce a new node representing a runtime lookup, create instances
// of that node both in the importer and here, and expand the node in lower (introducing control flow if
// necessary).
return gtNewRuntimeLookupHelperCallNode(pRuntimeLookup,
getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind),
compileTimeHandle);
}
GenTree* result = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind);
ArrayStack<GenTree*> stmts(getAllocator(CMK_ArrayStack));
auto cloneTree = [&](GenTree** tree DEBUGARG(const char* reason)) -> GenTree* {
if (!((*tree)->gtFlags & GTF_GLOB_EFFECT))
{
GenTree* clone = gtClone(*tree, true);
if (clone)
{
return clone;
}
}
unsigned temp = lvaGrabTemp(true DEBUGARG(reason));
stmts.Push(gtNewTempAssign(temp, *tree));
*tree = gtNewLclvNode(temp, lvaGetActualType(temp));
return gtNewLclvNode(temp, lvaGetActualType(temp));
};
// Apply repeated indirections
for (WORD i = 0; i < pRuntimeLookup->indirections; i++)
{
GenTree* preInd = nullptr;
if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
{
preInd = cloneTree(&result DEBUGARG("getRuntimeLookupTree indirectOffset"));
}
if (i != 0)
{
result = gtNewOperNode(GT_IND, TYP_I_IMPL, result);
result->gtFlags |= GTF_IND_NONFAULTING;
result->gtFlags |= GTF_IND_INVARIANT;
}
if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
{
result = gtNewOperNode(GT_ADD, TYP_I_IMPL, preInd, result);
}
if (pRuntimeLookup->offsets[i] != 0)
{
result = gtNewOperNode(GT_ADD, TYP_I_IMPL, result, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL));
}
}
assert(!pRuntimeLookup->testForNull);
if (pRuntimeLookup->indirections > 0)
{
assert(!pRuntimeLookup->testForFixup);
result = gtNewOperNode(GT_IND, TYP_I_IMPL, result);
result->gtFlags |= GTF_IND_NONFAULTING;
}
// Produces GT_COMMA(stmt1, GT_COMMA(stmt2, ... GT_COMMA(stmtN, result)))
while (!stmts.Empty())
{
result = gtNewOperNode(GT_COMMA, TYP_I_IMPL, stmts.Pop(), result);
}
DISPTREE(result);
return result;
}
//------------------------------------------------------------------------
// getVirtMethodPointerTree: get a tree for a virtual method pointer
//
// Arguments:
// thisPtr - tree representing `this` pointer
// pResolvedToken - pointer to the resolved token of the method
// pCallInfo - pointer to call info
//
// Return Value:
// A node representing the virtual method pointer
GenTree* Compiler::getVirtMethodPointerTree(GenTree* thisPtr,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_CALL_INFO* pCallInfo)
{
GenTree* exactTypeDesc = getTokenHandleTree(pResolvedToken, true);
GenTree* exactMethodDesc = getTokenHandleTree(pResolvedToken, false);
GenTreeCall::Use* helpArgs = gtNewCallArgs(thisPtr, exactTypeDesc, exactMethodDesc);
return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, helpArgs);
}
//------------------------------------------------------------------------
// getTokenHandleTree: get a handle tree for a token
//
// Arguments:
// pResolvedToken - token to get a handle for
// parent - whether parent should be imported
//
// Return Value:
// A node representing the virtual method pointer
GenTree* Compiler::getTokenHandleTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool parent)
{
CORINFO_GENERICHANDLE_RESULT embedInfo;
info.compCompHnd->embedGenericHandle(pResolvedToken, parent, &embedInfo);
GenTree* result = getLookupTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token),
embedInfo.compileTimeHandle);
// If we have a result and it requires runtime lookup, wrap it in a runtime lookup node.
if ((result != nullptr) && embedInfo.lookup.lookupKind.needsRuntimeLookup)
{
result = gtNewRuntimeLookup(embedInfo.compileTimeHandle, embedInfo.handleType, result);
}
return result;
}
/*****************************************************************************
*
* Transform the given GT_CALL tree for tail call via JIT helper.
*/
void Compiler::fgMorphTailCallViaJitHelper(GenTreeCall* call)
{
JITDUMP("fgMorphTailCallViaJitHelper (before):\n");
DISPTREE(call);
// For the helper-assisted tail calls, we need to push all the arguments
// into a single list, and then add a few extra at the beginning or end.
//
// For x86, the tailcall helper is defined as:
//
// JIT_TailCall(<function args>, int numberOfOldStackArgsWords, int numberOfNewStackArgsWords, int flags, void*
// callTarget)
//
// Note that the special arguments are on the stack, whereas the function arguments follow
// the normal convention: there might be register arguments in ECX and EDX. The stack will
// look like (highest address at the top):
// first normal stack argument
// ...
// last normal stack argument
// numberOfOldStackArgs
// numberOfNewStackArgs
// flags
// callTarget
//
// Each special arg is 4 bytes.
//
// 'flags' is a bitmask where:
// 1 == restore callee-save registers (EDI,ESI,EBX). The JIT always saves all
// callee-saved registers for tailcall functions. Note that the helper assumes
// that the callee-saved registers live immediately below EBP, and must have been
// pushed in this order: EDI, ESI, EBX.
// 2 == call target is a virtual stub dispatch.
//
// The x86 tail call helper lives in VM\i386\jithelp.asm. See that function for more details
// on the custom calling convention.
// Check for PInvoke call types that we don't handle in codegen yet.
assert(!call->IsUnmanaged());
assert(call->IsVirtual() || (call->gtCallType != CT_INDIRECT) || (call->gtCallCookie == nullptr));
// Don't support tail calling helper methods
assert(call->gtCallType != CT_HELPER);
// We come this route only for tail prefixed calls that cannot be dispatched as
// fast tail calls
assert(!call->IsImplicitTailCall());
// We want to use the following assert, but it can modify the IR in some cases, so we
// can't do that in an assert.
// assert(!fgCanFastTailCall(call, nullptr));
// First move the 'this' pointer (if any) onto the regular arg list. We do this because
// we are going to prepend special arguments onto the argument list (for non-x86 platforms),
// and thus shift where the 'this' pointer will be passed to a later argument slot. In
// addition, for all platforms, we are going to change the call into a helper call. Our code
// generation code for handling calls to helpers does not handle 'this' pointers. So, when we
// do this transformation, we must explicitly create a null 'this' pointer check, if required,
// since special 'this' pointer handling will no longer kick in.
//
// Some call types, such as virtual vtable calls, require creating a call address expression
// that involves the "this" pointer. Lowering will sometimes create an embedded statement
// to create a temporary that is assigned to the "this" pointer expression, and then use
// that temp to create the call address expression. This temp creation embedded statement
// will occur immediately before the "this" pointer argument, and then will be used for both
// the "this" pointer argument as well as the call address expression. In the normal ordering,
// the embedded statement establishing the "this" pointer temp will execute before both uses
// of the temp. However, for tail calls via a helper, we move the "this" pointer onto the
// normal call argument list, and insert a placeholder which will hold the call address
// expression. For non-x86, things are ok, because the order of execution of these is not
// altered. However, for x86, the call address expression is inserted as the *last* argument
// in the argument list, *after* the "this" pointer. It will be put on the stack, and be
// evaluated first. To ensure we don't end up with out-of-order temp definition and use,
// for those cases where call lowering creates an embedded form temp of "this", we will
// create a temp here, early, that will later get morphed correctly.
if (call->gtCallThisArg != nullptr)
{
GenTree* thisPtr = nullptr;
GenTree* objp = call->gtCallThisArg->GetNode();
call->gtCallThisArg = nullptr;
if ((call->IsDelegateInvoke() || call->IsVirtualVtable()) && !objp->OperIs(GT_LCL_VAR))
{
// tmp = "this"
unsigned lclNum = lvaGrabTemp(true DEBUGARG("tail call thisptr"));
GenTree* asg = gtNewTempAssign(lclNum, objp);
// COMMA(tmp = "this", tmp)
var_types vt = objp->TypeGet();
GenTree* tmp = gtNewLclvNode(lclNum, vt);
thisPtr = gtNewOperNode(GT_COMMA, vt, asg, tmp);
objp = thisPtr;
}
if (call->NeedsNullCheck())
{
// clone "this" if "this" has no side effects.
if ((thisPtr == nullptr) && !(objp->gtFlags & GTF_SIDE_EFFECT))
{
thisPtr = gtClone(objp, true);
}
var_types vt = objp->TypeGet();
if (thisPtr == nullptr)
{
// create a temp if either "this" has side effects or "this" is too complex to clone.
// tmp = "this"
unsigned lclNum = lvaGrabTemp(true DEBUGARG("tail call thisptr"));
GenTree* asg = gtNewTempAssign(lclNum, objp);
// COMMA(tmp = "this", deref(tmp))
GenTree* tmp = gtNewLclvNode(lclNum, vt);
GenTree* nullcheck = gtNewNullCheck(tmp, compCurBB);
asg = gtNewOperNode(GT_COMMA, TYP_VOID, asg, nullcheck);
// COMMA(COMMA(tmp = "this", deref(tmp)), tmp)
thisPtr = gtNewOperNode(GT_COMMA, vt, asg, gtNewLclvNode(lclNum, vt));
}
else
{
// thisPtr = COMMA(deref("this"), "this")
GenTree* nullcheck = gtNewNullCheck(thisPtr, compCurBB);
thisPtr = gtNewOperNode(GT_COMMA, vt, nullcheck, gtClone(objp, true));
}
call->gtFlags &= ~GTF_CALL_NULLCHECK;
}
else
{
thisPtr = objp;
}
// TODO-Cleanup: we leave it as a virtual stub call to
// use logic in `LowerVirtualStubCall`, clear GTF_CALL_VIRT_KIND_MASK here
// and change `LowerCall` to recognize it as a direct call.
// During rationalization tmp="this" and null check will
// materialize as embedded stmts in right execution order.
assert(thisPtr != nullptr);
call->gtCallArgs = gtPrependNewCallArg(thisPtr, call->gtCallArgs);
}
// Find the end of the argument list. ppArg will point at the last pointer; setting *ppArg will
// append to the list.
GenTreeCall::Use** ppArg = &call->gtCallArgs;
for (GenTreeCall::Use& use : call->Args())
{
ppArg = &use.NextRef();
}
assert(ppArg != nullptr);
assert(*ppArg == nullptr);
unsigned nOldStkArgsWords =
(compArgSize - (codeGen->intRegState.rsCalleeRegArgCount * REGSIZE_BYTES)) / REGSIZE_BYTES;
GenTree* arg3 = gtNewIconNode((ssize_t)nOldStkArgsWords, TYP_I_IMPL);
*ppArg = gtNewCallArgs(arg3); // numberOfOldStackArgs
ppArg = &((*ppArg)->NextRef());
// Inject a placeholder for the count of outgoing stack arguments that the Lowering phase will generate.
// The constant will be replaced.
GenTree* arg2 = gtNewIconNode(9, TYP_I_IMPL);
*ppArg = gtNewCallArgs(arg2); // numberOfNewStackArgs
ppArg = &((*ppArg)->NextRef());
// Inject a placeholder for the flags.
// The constant will be replaced.
GenTree* arg1 = gtNewIconNode(8, TYP_I_IMPL);
*ppArg = gtNewCallArgs(arg1);
ppArg = &((*ppArg)->NextRef());
// Inject a placeholder for the real call target that the Lowering phase will generate.
// The constant will be replaced.
GenTree* arg0 = gtNewIconNode(7, TYP_I_IMPL);
*ppArg = gtNewCallArgs(arg0);
// It is now a varargs tail call.
call->gtCallMoreFlags |= GTF_CALL_M_VARARGS;
call->gtFlags &= ~GTF_CALL_POP_ARGS;
// The function is responsible for doing explicit null check when it is necessary.
assert(!call->NeedsNullCheck());
JITDUMP("fgMorphTailCallViaJitHelper (after):\n");
DISPTREE(call);
}
//------------------------------------------------------------------------
// fgGetStubAddrArg: Return the virtual stub address for the given call.
//
// Notes:
// the JIT must place the address of the stub used to load the call target,
// the "stub indirection cell", in special call argument with special register.
//
// Arguments:
// call - a call that needs virtual stub dispatching.
//
// Return Value:
// addr tree with set resister requirements.
//
GenTree* Compiler::fgGetStubAddrArg(GenTreeCall* call)
{
assert(call->IsVirtualStub());
GenTree* stubAddrArg;
if (call->gtCallType == CT_INDIRECT)
{
stubAddrArg = gtClone(call->gtCallAddr, true);
}
else
{
assert(call->gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT);
ssize_t addr = ssize_t(call->gtStubCallStubAddr);
stubAddrArg = gtNewIconHandleNode(addr, GTF_ICON_FTN_ADDR);
#ifdef DEBUG
stubAddrArg->AsIntCon()->gtTargetHandle = (size_t)call->gtCallMethHnd;
#endif
}
assert(stubAddrArg != nullptr);
stubAddrArg->SetRegNum(virtualStubParamInfo->GetReg());
return stubAddrArg;
}
//------------------------------------------------------------------------------
// fgGetArgTabEntryParameterLclNum : Get the lcl num for the parameter that
// corresponds to the argument to a recursive call.
//
// Notes:
// Due to non-standard args this is not just fgArgTabEntry::argNum.
// For example, in R2R compilations we will have added a non-standard
// arg for the R2R indirection cell.
//
// Arguments:
// argTabEntry - the arg
//
unsigned Compiler::fgGetArgTabEntryParameterLclNum(GenTreeCall* call, fgArgTabEntry* argTabEntry)
{
fgArgInfo* argInfo = call->fgArgInfo;
unsigned argCount = argInfo->ArgCount();
fgArgTabEntry** argTable = argInfo->ArgTable();
unsigned numToRemove = 0;
for (unsigned i = 0; i < argCount; i++)
{
fgArgTabEntry* arg = argTable[i];
// Late added args add extra args that do not map to IL parameters and that we should not reassign.
if (!arg->isNonStandard() || !arg->isNonStandardArgAddedLate())
continue;
if (arg->argNum < argTabEntry->argNum)
numToRemove++;
}
return argTabEntry->argNum - numToRemove;
}
//------------------------------------------------------------------------------
// fgMorphRecursiveFastTailCallIntoLoop : Transform a recursive fast tail call into a loop.
//
//
// Arguments:
// block - basic block ending with a recursive fast tail call
// recursiveTailCall - recursive tail call to transform
//
// Notes:
// The legality of the transformation is ensured by the checks in endsWithTailCallConvertibleToLoop.
void Compiler::fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCall* recursiveTailCall)
{
assert(recursiveTailCall->IsTailCallConvertibleToLoop());
Statement* lastStmt = block->lastStmt();
assert(recursiveTailCall == lastStmt->GetRootNode());
// Transform recursive tail call into a loop.
Statement* earlyArgInsertionPoint = lastStmt;
const DebugInfo& callDI = lastStmt->GetDebugInfo();
// Hoist arg setup statement for the 'this' argument.
GenTreeCall::Use* thisArg = recursiveTailCall->gtCallThisArg;
if ((thisArg != nullptr) && !thisArg->GetNode()->IsNothingNode() && !thisArg->GetNode()->IsArgPlaceHolderNode())
{
Statement* thisArgStmt = gtNewStmt(thisArg->GetNode(), callDI);
fgInsertStmtBefore(block, earlyArgInsertionPoint, thisArgStmt);
}
// All arguments whose trees may involve caller parameter local variables need to be assigned to temps first;
// then the temps need to be assigned to the method parameters. This is done so that the caller
// parameters are not re-assigned before call arguments depending on them are evaluated.
// tmpAssignmentInsertionPoint and paramAssignmentInsertionPoint keep track of
// where the next temp or parameter assignment should be inserted.
// In the example below the first call argument (arg1 - 1) needs to be assigned to a temp first
// while the second call argument (const 1) doesn't.
// Basic block before tail recursion elimination:
// ***** BB04, stmt 1 (top level)
// [000037] ------------ * stmtExpr void (top level) (IL 0x00A...0x013)
// [000033] --C - G------ - \--* call void RecursiveMethod
// [000030] ------------ | / --* const int - 1
// [000031] ------------arg0 in rcx + --* +int
// [000029] ------------ | \--* lclVar int V00 arg1
// [000032] ------------arg1 in rdx \--* const int 1
//
//
// Basic block after tail recursion elimination :
// ***** BB04, stmt 1 (top level)
// [000051] ------------ * stmtExpr void (top level) (IL 0x00A... ? ? ? )
// [000030] ------------ | / --* const int - 1
// [000031] ------------ | / --* +int
// [000029] ------------ | | \--* lclVar int V00 arg1
// [000050] - A---------- \--* = int
// [000049] D------N---- \--* lclVar int V02 tmp0
//
// ***** BB04, stmt 2 (top level)
// [000055] ------------ * stmtExpr void (top level) (IL 0x00A... ? ? ? )
// [000052] ------------ | / --* lclVar int V02 tmp0
// [000054] - A---------- \--* = int
// [000053] D------N---- \--* lclVar int V00 arg0
// ***** BB04, stmt 3 (top level)
// [000058] ------------ * stmtExpr void (top level) (IL 0x00A... ? ? ? )
// [000032] ------------ | / --* const int 1
// [000057] - A---------- \--* = int
// [000056] D------N---- \--* lclVar int V01 arg1
Statement* tmpAssignmentInsertionPoint = lastStmt;
Statement* paramAssignmentInsertionPoint = lastStmt;
// Process early args. They may contain both setup statements for late args and actual args.
// Early args don't include 'this' arg. We need to account for that so that the call to gtArgEntryByArgNum
// below has the correct second argument.
int earlyArgIndex = (thisArg == nullptr) ? 0 : 1;
for (GenTreeCall::Use& use : recursiveTailCall->Args())
{
GenTree* earlyArg = use.GetNode();
if (!earlyArg->IsNothingNode() && !earlyArg->IsArgPlaceHolderNode())
{
if ((earlyArg->gtFlags & GTF_LATE_ARG) != 0)
{
// This is a setup node so we need to hoist it.
Statement* earlyArgStmt = gtNewStmt(earlyArg, callDI);
fgInsertStmtBefore(block, earlyArgInsertionPoint, earlyArgStmt);
}
else
{
// This is an actual argument that needs to be assigned to the corresponding caller parameter.
fgArgTabEntry* curArgTabEntry = gtArgEntryByArgNum(recursiveTailCall, earlyArgIndex);
// Late-added non-standard args are extra args that are not passed as locals, so skip those
if (!curArgTabEntry->isNonStandard() || !curArgTabEntry->isNonStandardArgAddedLate())
{
Statement* paramAssignStmt =
fgAssignRecursiveCallArgToCallerParam(earlyArg, curArgTabEntry,
fgGetArgTabEntryParameterLclNum(recursiveTailCall,
curArgTabEntry),
block, callDI, tmpAssignmentInsertionPoint,
paramAssignmentInsertionPoint);
if ((tmpAssignmentInsertionPoint == lastStmt) && (paramAssignStmt != nullptr))
{
// All temp assignments will happen before the first param assignment.
tmpAssignmentInsertionPoint = paramAssignStmt;
}
}
}
}
earlyArgIndex++;
}
// Process late args.
int lateArgIndex = 0;
for (GenTreeCall::Use& use : recursiveTailCall->LateArgs())
{
// A late argument is an actual argument that needs to be assigned to the corresponding caller's parameter.
GenTree* lateArg = use.GetNode();
fgArgTabEntry* curArgTabEntry = gtArgEntryByLateArgIndex(recursiveTailCall, lateArgIndex);
// Late-added non-standard args are extra args that are not passed as locals, so skip those
if (!curArgTabEntry->isNonStandard() || !curArgTabEntry->isNonStandardArgAddedLate())
{
Statement* paramAssignStmt =
fgAssignRecursiveCallArgToCallerParam(lateArg, curArgTabEntry,
fgGetArgTabEntryParameterLclNum(recursiveTailCall,
curArgTabEntry),
block, callDI, tmpAssignmentInsertionPoint,
paramAssignmentInsertionPoint);
if ((tmpAssignmentInsertionPoint == lastStmt) && (paramAssignStmt != nullptr))
{
// All temp assignments will happen before the first param assignment.
tmpAssignmentInsertionPoint = paramAssignStmt;
}
}
lateArgIndex++;
}
// If the method has starg.s 0 or ldarga.s 0 a special local (lvaArg0Var) is created so that
// compThisArg stays immutable. Normally it's assigned in fgFirstBBScratch block. Since that
// block won't be in the loop (it's assumed to have no predecessors), we need to update the special local here.
if (!info.compIsStatic && (lvaArg0Var != info.compThisArg))
{
var_types thisType = lvaTable[info.compThisArg].TypeGet();
GenTree* arg0 = gtNewLclvNode(lvaArg0Var, thisType);
GenTree* arg0Assignment = gtNewAssignNode(arg0, gtNewLclvNode(info.compThisArg, thisType));
Statement* arg0AssignmentStmt = gtNewStmt(arg0Assignment, callDI);
fgInsertStmtBefore(block, paramAssignmentInsertionPoint, arg0AssignmentStmt);
}
// If compInitMem is set, we may need to zero-initialize some locals. Normally it's done in the prolog
// but this loop can't include the prolog. Since we don't have liveness information, we insert zero-initialization
// for all non-parameter IL locals as well as temp structs with GC fields.
// Liveness phase will remove unnecessary initializations.
if (info.compInitMem || compSuppressedZeroInit)
{
unsigned varNum;
LclVarDsc* varDsc;
for (varNum = 0, varDsc = lvaTable; varNum < lvaCount; varNum++, varDsc++)
{
#if FEATURE_FIXED_OUT_ARGS
if (varNum == lvaOutgoingArgSpaceVar)
{
continue;
}
#endif // FEATURE_FIXED_OUT_ARGS
if (!varDsc->lvIsParam)
{
var_types lclType = varDsc->TypeGet();
bool isUserLocal = (varNum < info.compLocalsCount);
bool structWithGCFields = ((lclType == TYP_STRUCT) && varDsc->GetLayout()->HasGCPtr());
bool hadSuppressedInit = varDsc->lvSuppressedZeroInit;
if ((info.compInitMem && (isUserLocal || structWithGCFields)) || hadSuppressedInit)
{
GenTree* lcl = gtNewLclvNode(varNum, lclType);
GenTree* init = nullptr;
if (varTypeIsStruct(lclType))
{
const bool isVolatile = false;
const bool isCopyBlock = false;
init = gtNewBlkOpNode(lcl, gtNewIconNode(0), isVolatile, isCopyBlock);
init = fgMorphInitBlock(init);
}
else
{
GenTree* zero = gtNewZeroConNode(genActualType(lclType));
init = gtNewAssignNode(lcl, zero);
}
Statement* initStmt = gtNewStmt(init, callDI);
fgInsertStmtBefore(block, lastStmt, initStmt);
}
}
}
}
// Remove the call
fgRemoveStmt(block, lastStmt);
// Set the loop edge.
if (opts.IsOSR())
{
// Todo: this may not look like a viable loop header.
// Might need the moral equivalent of a scratch BB.
block->bbJumpDest = fgEntryBB;
}
else
{
// Ensure we have a scratch block and then target the next
// block. Loop detection needs to see a pred out of the loop,
// so mark the scratch block BBF_DONT_REMOVE to prevent empty
// block removal on it.
fgEnsureFirstBBisScratch();
fgFirstBB->bbFlags |= BBF_DONT_REMOVE;
block->bbJumpDest = fgFirstBB->bbNext;
}
// Finish hooking things up.
block->bbJumpKind = BBJ_ALWAYS;
fgAddRefPred(block->bbJumpDest, block);
block->bbFlags &= ~BBF_HAS_JMP;
}
//------------------------------------------------------------------------------
// fgAssignRecursiveCallArgToCallerParam : Assign argument to a recursive call to the corresponding caller parameter.
//
//
// Arguments:
// arg - argument to assign
// argTabEntry - argument table entry corresponding to arg
// lclParamNum - the lcl num of the parameter
// block --- basic block the call is in
// callILOffset - IL offset of the call
// tmpAssignmentInsertionPoint - tree before which temp assignment should be inserted (if necessary)
// paramAssignmentInsertionPoint - tree before which parameter assignment should be inserted
//
// Return Value:
// parameter assignment statement if one was inserted; nullptr otherwise.
Statement* Compiler::fgAssignRecursiveCallArgToCallerParam(GenTree* arg,
fgArgTabEntry* argTabEntry,
unsigned lclParamNum,
BasicBlock* block,
const DebugInfo& callDI,
Statement* tmpAssignmentInsertionPoint,
Statement* paramAssignmentInsertionPoint)
{
// Call arguments should be assigned to temps first and then the temps should be assigned to parameters because
// some argument trees may reference parameters directly.
GenTree* argInTemp = nullptr;
bool needToAssignParameter = true;
// TODO-CQ: enable calls with struct arguments passed in registers.
noway_assert(!varTypeIsStruct(arg->TypeGet()));
if ((argTabEntry->isTmp) || arg->IsCnsIntOrI() || arg->IsCnsFltOrDbl())
{
// The argument is already assigned to a temp or is a const.
argInTemp = arg;
}
else if (arg->OperGet() == GT_LCL_VAR)
{
unsigned lclNum = arg->AsLclVar()->GetLclNum();
LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (!varDsc->lvIsParam)
{
// The argument is a non-parameter local so it doesn't need to be assigned to a temp.
argInTemp = arg;
}
else if (lclNum == lclParamNum)
{
// The argument is the same parameter local that we were about to assign so
// we can skip the assignment.
needToAssignParameter = false;
}
}
// TODO: We don't need temp assignments if we can prove that the argument tree doesn't involve
// any caller parameters. Some common cases are handled above but we may be able to eliminate
// more temp assignments.
Statement* paramAssignStmt = nullptr;
if (needToAssignParameter)
{
if (argInTemp == nullptr)
{
// The argument is not assigned to a temp. We need to create a new temp and insert an assignment.
// TODO: we can avoid a temp assignment if we can prove that the argument tree
// doesn't involve any caller parameters.
unsigned tmpNum = lvaGrabTemp(true DEBUGARG("arg temp"));
lvaTable[tmpNum].lvType = arg->gtType;
GenTree* tempSrc = arg;
GenTree* tempDest = gtNewLclvNode(tmpNum, tempSrc->gtType);
GenTree* tmpAssignNode = gtNewAssignNode(tempDest, tempSrc);
Statement* tmpAssignStmt = gtNewStmt(tmpAssignNode, callDI);
fgInsertStmtBefore(block, tmpAssignmentInsertionPoint, tmpAssignStmt);
argInTemp = gtNewLclvNode(tmpNum, tempSrc->gtType);
}
// Now assign the temp to the parameter.
const LclVarDsc* paramDsc = lvaGetDesc(lclParamNum);
assert(paramDsc->lvIsParam);
GenTree* paramDest = gtNewLclvNode(lclParamNum, paramDsc->lvType);
GenTree* paramAssignNode = gtNewAssignNode(paramDest, argInTemp);
paramAssignStmt = gtNewStmt(paramAssignNode, callDI);
fgInsertStmtBefore(block, paramAssignmentInsertionPoint, paramAssignStmt);
}
return paramAssignStmt;
}
/*****************************************************************************
*
* Transform the given GT_CALL tree for code generation.
*/
GenTree* Compiler::fgMorphCall(GenTreeCall* call)
{
if (call->CanTailCall())
{
GenTree* newNode = fgMorphPotentialTailCall(call);
if (newNode != nullptr)
{
return newNode;
}
assert(!call->CanTailCall());
#if FEATURE_MULTIREG_RET
if (fgGlobalMorph && call->HasMultiRegRetVal() && varTypeIsStruct(call->TypeGet()))
{
// The tail call has been rejected so we must finish the work deferred
// by impFixupCallStructReturn for multi-reg-returning calls and transform
// ret call
// into
// temp = call
// ret temp
// Force re-evaluating the argInfo as the return argument has changed.
call->ResetArgInfo();
// Create a new temp.
unsigned tmpNum =
lvaGrabTemp(false DEBUGARG("Return value temp for multi-reg return (rejected tail call)."));
lvaTable[tmpNum].lvIsMultiRegRet = true;
CORINFO_CLASS_HANDLE structHandle = call->gtRetClsHnd;
assert(structHandle != NO_CLASS_HANDLE);
const bool unsafeValueClsCheck = false;
lvaSetStruct(tmpNum, structHandle, unsafeValueClsCheck);
var_types structType = lvaTable[tmpNum].lvType;
GenTree* dst = gtNewLclvNode(tmpNum, structType);
GenTree* assg = gtNewAssignNode(dst, call);
assg = fgMorphTree(assg);
// Create the assignment statement and insert it before the current statement.
Statement* assgStmt = gtNewStmt(assg, compCurStmt->GetDebugInfo());
fgInsertStmtBefore(compCurBB, compCurStmt, assgStmt);
// Return the temp.
GenTree* result = gtNewLclvNode(tmpNum, lvaTable[tmpNum].lvType);
result->gtFlags |= GTF_DONT_CSE;
compCurBB->bbFlags |= BBF_HAS_CALL; // This block has a call
#ifdef DEBUG
if (verbose)
{
printf("\nInserting assignment of a multi-reg call result to a temp:\n");
gtDispStmt(assgStmt);
}
result->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif // DEBUG
return result;
}
#endif
}
if ((call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) == 0 &&
(call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_VIRTUAL_FUNC_PTR)
#ifdef FEATURE_READYTORUN
|| call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR)
#endif
) &&
(call == fgMorphStmt->GetRootNode()))
{
// This is call to CORINFO_HELP_VIRTUAL_FUNC_PTR with ignored result.
// Transform it into a null check.
GenTree* thisPtr = call->gtCallArgs->GetNode();
GenTree* nullCheck = gtNewNullCheck(thisPtr, compCurBB);
return fgMorphTree(nullCheck);
}
noway_assert(call->gtOper == GT_CALL);
//
// Only count calls once (only in the global morph phase)
//
if (fgGlobalMorph)
{
if (call->gtCallType == CT_INDIRECT)
{
optCallCount++;
optIndirectCallCount++;
}
else if (call->gtCallType == CT_USER_FUNC)
{
optCallCount++;
if (call->IsVirtual())
{
optIndirectCallCount++;
}
}
}
// Couldn't inline - remember that this BB contains method calls
// Mark the block as a GC safe point for the call if possible.
// In the event the call indicates the block isn't a GC safe point
// and the call is unmanaged with a GC transition suppression request
// then insert a GC poll.
CLANG_FORMAT_COMMENT_ANCHOR;
if (IsGcSafePoint(call))
{
compCurBB->bbFlags |= BBF_GC_SAFE_POINT;
}
// Regardless of the state of the basic block with respect to GC safe point,
// we will always insert a GC Poll for scenarios involving a suppressed GC
// transition. Only mark the block for GC Poll insertion on the first morph.
if (fgGlobalMorph && call->IsUnmanaged() && call->IsSuppressGCTransition())
{
compCurBB->bbFlags |= (BBF_HAS_SUPPRESSGC_CALL | BBF_GC_SAFE_POINT);
optMethodFlags |= OMF_NEEDS_GCPOLLS;
}
// Morph Type.op_Equality, Type.op_Inequality, and Enum.HasFlag
//
// We need to do these before the arguments are morphed
if ((call->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC))
{
// See if this is foldable
GenTree* optTree = gtFoldExprCall(call);
// If we optimized, morph the result
if (optTree != call)
{
return fgMorphTree(optTree);
}
}
compCurBB->bbFlags |= BBF_HAS_CALL; // This block has a call
// Process the "normal" argument list
call = fgMorphArgs(call);
noway_assert(call->gtOper == GT_CALL);
// Assign DEF flags if it produces a definition from "return buffer".
fgAssignSetVarDef(call);
if (call->OperRequiresAsgFlag())
{
call->gtFlags |= GTF_ASG;
}
// Should we expand this virtual method call target early here?
//
if (call->IsExpandedEarly() && call->IsVirtualVtable())
{
// We only expand the Vtable Call target once in the global morph phase
if (fgGlobalMorph)
{
assert(call->gtControlExpr == nullptr); // We only call this method and assign gtControlExpr once
call->gtControlExpr = fgExpandVirtualVtableCallTarget(call);
}
// We always have to morph or re-morph the control expr
//
call->gtControlExpr = fgMorphTree(call->gtControlExpr);
// Propagate any gtFlags into the call
call->gtFlags |= call->gtControlExpr->gtFlags;
}
// Morph stelem.ref helper call to store a null value, into a store into an array without the helper.
// This needs to be done after the arguments are morphed to ensure constant propagation has already taken place.
if (opts.OptimizationEnabled() && (call->gtCallType == CT_HELPER) &&
(call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_ARRADDR_ST)))
{
GenTree* value = gtArgEntryByArgNum(call, 2)->GetNode();
if (value->IsIntegralConst(0))
{
assert(value->OperGet() == GT_CNS_INT);
GenTree* arr = gtArgEntryByArgNum(call, 0)->GetNode();
GenTree* index = gtArgEntryByArgNum(call, 1)->GetNode();
// Either or both of the array and index arguments may have been spilled to temps by `fgMorphArgs`. Copy
// the spill trees as well if necessary.
GenTreeOp* argSetup = nullptr;
for (GenTreeCall::Use& use : call->Args())
{
GenTree* const arg = use.GetNode();
if (arg->OperGet() != GT_ASG)
{
continue;
}
assert(arg != arr);
assert(arg != index);
arg->gtFlags &= ~GTF_LATE_ARG;
GenTree* op1 = argSetup;
if (op1 == nullptr)
{
op1 = gtNewNothingNode();
#if DEBUG
op1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif // DEBUG
}
argSetup = new (this, GT_COMMA) GenTreeOp(GT_COMMA, TYP_VOID, op1, arg);
#if DEBUG
argSetup->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif // DEBUG
}
#ifdef DEBUG
auto resetMorphedFlag = [](GenTree** slot, fgWalkData* data) -> fgWalkResult {
(*slot)->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED;
return WALK_CONTINUE;
};
fgWalkTreePost(&arr, resetMorphedFlag);
fgWalkTreePost(&index, resetMorphedFlag);
fgWalkTreePost(&value, resetMorphedFlag);
#endif // DEBUG
GenTree* const arrIndexNode = gtNewIndexRef(TYP_REF, arr, index);
GenTree* const arrStore = gtNewAssignNode(arrIndexNode, value);
GenTree* result = fgMorphTree(arrStore);
if (argSetup != nullptr)
{
result = new (this, GT_COMMA) GenTreeOp(GT_COMMA, TYP_VOID, argSetup, result);
#if DEBUG
result->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif // DEBUG
}
return result;
}
}
if (call->IsNoReturn())
{
//
// If we know that the call does not return then we can set fgRemoveRestOfBlock
// to remove all subsequent statements and change the call's basic block to BBJ_THROW.
// As a result the compiler won't need to preserve live registers across the call.
//
// This isn't need for tail calls as there shouldn't be any code after the call anyway.
// Besides, the tail call code is part of the epilog and converting the block to
// BBJ_THROW would result in the tail call being dropped as the epilog is generated
// only for BBJ_RETURN blocks.
//
if (!call->IsTailCall())
{
fgRemoveRestOfBlock = true;
}
}
return call;
}
/*****************************************************************************
*
* Expand and return the call target address for a VirtualCall
* The code here should match that generated by LowerVirtualVtableCall
*/
GenTree* Compiler::fgExpandVirtualVtableCallTarget(GenTreeCall* call)
{
GenTree* result;
JITDUMP("Expanding virtual call target for %d.%s:\n", call->gtTreeID, GenTree::OpName(call->gtOper));
noway_assert(call->gtCallType == CT_USER_FUNC);
// get a reference to the thisPtr being passed
fgArgTabEntry* thisArgTabEntry = gtArgEntryByArgNum(call, 0);
GenTree* thisPtr = thisArgTabEntry->GetNode();
// fgMorphArgs must enforce this invariant by creating a temp
//
assert(thisPtr->OperIsLocal());
// Make a copy of the thisPtr by cloning
//
thisPtr = gtClone(thisPtr, true);
noway_assert(thisPtr != nullptr);
// Get hold of the vtable offset
unsigned vtabOffsOfIndirection;
unsigned vtabOffsAfterIndirection;
bool isRelative;
info.compCompHnd->getMethodVTableOffset(call->gtCallMethHnd, &vtabOffsOfIndirection, &vtabOffsAfterIndirection,
&isRelative);
// Dereference the this pointer to obtain the method table, it is called vtab below
GenTree* vtab;
assert(VPTR_OFFS == 0); // We have to add this value to the thisPtr to get the methodTable
vtab = gtNewOperNode(GT_IND, TYP_I_IMPL, thisPtr);
vtab->gtFlags |= GTF_IND_INVARIANT;
// Get the appropriate vtable chunk
if (vtabOffsOfIndirection != CORINFO_VIRTUALCALL_NO_CHUNK)
{
// Note this isRelative code path is currently never executed
// as the VM doesn't ever return: isRelative == true
//
if (isRelative)
{
// MethodTable offset is a relative pointer.
//
// Additional temporary variable is used to store virtual table pointer.
// Address of method is obtained by the next computations:
//
// Save relative offset to tmp (vtab is virtual table pointer, vtabOffsOfIndirection is offset of
// vtable-1st-level-indirection):
// tmp = vtab
//
// Save address of method to result (vtabOffsAfterIndirection is offset of vtable-2nd-level-indirection):
// result = [tmp + vtabOffsOfIndirection + vtabOffsAfterIndirection + [tmp + vtabOffsOfIndirection]]
//
//
// When isRelative is true we need to setup two temporary variables
// var1 = vtab
// var2 = var1 + vtabOffsOfIndirection + vtabOffsAfterIndirection + [var1 + vtabOffsOfIndirection]
// result = [var2] + var2
//
unsigned varNum1 = lvaGrabTemp(true DEBUGARG("var1 - vtab"));
unsigned varNum2 = lvaGrabTemp(true DEBUGARG("var2 - relative"));
GenTree* asgVar1 = gtNewTempAssign(varNum1, vtab); // var1 = vtab
// [tmp + vtabOffsOfIndirection]
GenTree* tmpTree1 = gtNewOperNode(GT_ADD, TYP_I_IMPL, gtNewLclvNode(varNum1, TYP_I_IMPL),
gtNewIconNode(vtabOffsOfIndirection, TYP_I_IMPL));
tmpTree1 = gtNewOperNode(GT_IND, TYP_I_IMPL, tmpTree1, false);
tmpTree1->gtFlags |= GTF_IND_NONFAULTING;
tmpTree1->gtFlags |= GTF_IND_INVARIANT;
// var1 + vtabOffsOfIndirection + vtabOffsAfterIndirection
GenTree* tmpTree2 =
gtNewOperNode(GT_ADD, TYP_I_IMPL, gtNewLclvNode(varNum1, TYP_I_IMPL),
gtNewIconNode(vtabOffsOfIndirection + vtabOffsAfterIndirection, TYP_I_IMPL));
// var1 + vtabOffsOfIndirection + vtabOffsAfterIndirection + [var1 + vtabOffsOfIndirection]
tmpTree2 = gtNewOperNode(GT_ADD, TYP_I_IMPL, tmpTree2, tmpTree1);
GenTree* asgVar2 = gtNewTempAssign(varNum2, tmpTree2); // var2 = <expression>
// This last indirection is not invariant, but is non-faulting
result = gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewLclvNode(varNum2, TYP_I_IMPL), false); // [var2]
result->gtFlags |= GTF_IND_NONFAULTING;
result = gtNewOperNode(GT_ADD, TYP_I_IMPL, result, gtNewLclvNode(varNum2, TYP_I_IMPL)); // [var2] + var2
// Now stitch together the two assignment and the calculation of result into a single tree
GenTree* commaTree = gtNewOperNode(GT_COMMA, TYP_I_IMPL, asgVar2, result);
result = gtNewOperNode(GT_COMMA, TYP_I_IMPL, asgVar1, commaTree);
}
else
{
// result = [vtab + vtabOffsOfIndirection]
result = gtNewOperNode(GT_ADD, TYP_I_IMPL, vtab, gtNewIconNode(vtabOffsOfIndirection, TYP_I_IMPL));
result = gtNewOperNode(GT_IND, TYP_I_IMPL, result, false);
result->gtFlags |= GTF_IND_NONFAULTING;
result->gtFlags |= GTF_IND_INVARIANT;
}
}
else
{
result = vtab;
assert(!isRelative);
}
if (!isRelative)
{
// Load the function address
// result = [result + vtabOffsAfterIndirection]
result = gtNewOperNode(GT_ADD, TYP_I_IMPL, result, gtNewIconNode(vtabOffsAfterIndirection, TYP_I_IMPL));
// This last indirection is not invariant, but is non-faulting
result = gtNewOperNode(GT_IND, TYP_I_IMPL, result, false);
result->gtFlags |= GTF_IND_NONFAULTING;
}
return result;
}
/*****************************************************************************
*
* Transform the given constant tree for code generation.
*/
GenTree* Compiler::fgMorphConst(GenTree* tree)
{
assert(tree->OperIsConst());
/* Clear any exception flags or other unnecessary flags
* that may have been set before folding this node to a constant */
tree->gtFlags &= ~(GTF_ALL_EFFECT | GTF_REVERSE_OPS);
if (!tree->OperIs(GT_CNS_STR))
{
return tree;
}
if (tree->AsStrCon()->IsStringEmptyField())
{
LPVOID pValue;
InfoAccessType iat = info.compCompHnd->emptyStringLiteral(&pValue);
return fgMorphTree(gtNewStringLiteralNode(iat, pValue));
}
// TODO-CQ: Do this for compCurBB->isRunRarely(). Doing that currently will
// guarantee slow performance for that block. Instead cache the return value
// of CORINFO_HELP_STRCNS and go to cache first giving reasonable perf.
bool useLazyStrCns = false;
if (compCurBB->bbJumpKind == BBJ_THROW)
{
useLazyStrCns = true;
}
else if (fgGlobalMorph && compCurStmt->GetRootNode()->IsCall())
{
// Quick check: if the root node of the current statement happens to be a noreturn call.
GenTreeCall* call = compCurStmt->GetRootNode()->AsCall();
useLazyStrCns = call->IsNoReturn() || fgIsThrow(call);
}
if (useLazyStrCns)
{
CorInfoHelpFunc helper = info.compCompHnd->getLazyStringLiteralHelper(tree->AsStrCon()->gtScpHnd);
if (helper != CORINFO_HELP_UNDEF)
{
// For un-important blocks, we want to construct the string lazily
GenTreeCall::Use* args;
if (helper == CORINFO_HELP_STRCNS_CURRENT_MODULE)
{
args = gtNewCallArgs(gtNewIconNode(RidFromToken(tree->AsStrCon()->gtSconCPX), TYP_INT));
}
else
{
args = gtNewCallArgs(gtNewIconNode(RidFromToken(tree->AsStrCon()->gtSconCPX), TYP_INT),
gtNewIconEmbScpHndNode(tree->AsStrCon()->gtScpHnd));
}
tree = gtNewHelperCallNode(helper, TYP_REF, args);
return fgMorphTree(tree);
}
}
assert(tree->AsStrCon()->gtScpHnd == info.compScopeHnd || !IsUninitialized(tree->AsStrCon()->gtScpHnd));
LPVOID pValue;
InfoAccessType iat =
info.compCompHnd->constructStringLiteral(tree->AsStrCon()->gtScpHnd, tree->AsStrCon()->gtSconCPX, &pValue);
tree = gtNewStringLiteralNode(iat, pValue);
return fgMorphTree(tree);
}
//------------------------------------------------------------------------
// fgMorphTryFoldObjAsLclVar: try to fold an Obj node as a LclVar.
//
// Arguments:
// obj - the obj node.
// destroyNodes -- destroy nodes that are optimized away
//
// Return value:
// GenTreeLclVar if the obj can be replaced by it, null otherwise.
//
// Notes:
// TODO-CQ: currently this transformation is done only under copy block,
// but it is benefitial to do for each OBJ node. However, `PUT_ARG_STACK`
// for some platforms does not expect struct `LCL_VAR` as a source, so
// it needs more work.
//
GenTreeLclVar* Compiler::fgMorphTryFoldObjAsLclVar(GenTreeObj* obj, bool destroyNodes)
{
if (opts.OptimizationEnabled())
{
GenTree* op1 = obj->Addr();
assert(!op1->OperIs(GT_LCL_VAR_ADDR) && "missed an opt opportunity");
if (op1->OperIs(GT_ADDR))
{
GenTreeUnOp* addr = op1->AsUnOp();
GenTree* addrOp = addr->gtGetOp1();
if (addrOp->TypeIs(obj->TypeGet()) && addrOp->OperIs(GT_LCL_VAR))
{
GenTreeLclVar* lclVar = addrOp->AsLclVar();
ClassLayout* lclVarLayout = lvaGetDesc(lclVar)->GetLayout();
ClassLayout* objLayout = obj->GetLayout();
if (ClassLayout::AreCompatible(lclVarLayout, objLayout))
{
#ifdef DEBUG
CORINFO_CLASS_HANDLE objClsHandle = obj->GetLayout()->GetClassHandle();
assert(objClsHandle != NO_CLASS_HANDLE);
if (verbose)
{
CORINFO_CLASS_HANDLE lclClsHnd = gtGetStructHandle(lclVar);
printf("fold OBJ(ADDR(X)) [%06u] into X [%06u], ", dspTreeID(obj), dspTreeID(lclVar));
printf("with %s handles\n", ((lclClsHnd == objClsHandle) ? "matching" : "different"));
}
#endif
// Keep the DONT_CSE flag in sync
// (as the addr always marks it for its op1)
lclVar->gtFlags &= ~GTF_DONT_CSE;
lclVar->gtFlags |= (obj->gtFlags & GTF_DONT_CSE);
if (destroyNodes)
{
DEBUG_DESTROY_NODE(obj);
DEBUG_DESTROY_NODE(addr);
}
return lclVar;
}
}
}
}
return nullptr;
}
/*****************************************************************************
*
* Transform the given GTK_LEAF tree for code generation.
*/
GenTree* Compiler::fgMorphLeaf(GenTree* tree)
{
assert(tree->OperKind() & GTK_LEAF);
if (tree->gtOper == GT_LCL_VAR)
{
const bool forceRemorph = false;
return fgMorphLocalVar(tree, forceRemorph);
}
else if (tree->gtOper == GT_LCL_FLD)
{
if (lvaGetDesc(tree->AsLclFld())->IsAddressExposed())
{
tree->gtFlags |= GTF_GLOB_REF;
}
#ifdef TARGET_X86
if (info.compIsVarArgs)
{
GenTree* newTree = fgMorphStackArgForVarArgs(tree->AsLclFld()->GetLclNum(), tree->TypeGet(),
tree->AsLclFld()->GetLclOffs());
if (newTree != nullptr)
{
if (newTree->OperIsBlk() && ((tree->gtFlags & GTF_VAR_DEF) == 0))
{
newTree->SetOper(GT_IND);
}
return newTree;
}
}
#endif // TARGET_X86
}
else if (tree->gtOper == GT_FTN_ADDR)
{
GenTreeFptrVal* fptrValTree = tree->AsFptrVal();
// A function pointer address is being used. Let the VM know if this is the
// target of a Delegate or a raw function pointer.
bool isUnsafeFunctionPointer = !fptrValTree->gtFptrDelegateTarget;
CORINFO_CONST_LOOKUP addrInfo;
#ifdef FEATURE_READYTORUN
if (fptrValTree->gtEntryPoint.addr != nullptr)
{
addrInfo = fptrValTree->gtEntryPoint;
}
else
#endif
{
info.compCompHnd->getFunctionFixedEntryPoint(fptrValTree->gtFptrMethod, isUnsafeFunctionPointer, &addrInfo);
}
GenTree* indNode = nullptr;
switch (addrInfo.accessType)
{
case IAT_PPVALUE:
indNode = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)addrInfo.handle, GTF_ICON_CONST_PTR, true);
// Add the second indirection
indNode = gtNewOperNode(GT_IND, TYP_I_IMPL, indNode);
// This indirection won't cause an exception.
indNode->gtFlags |= GTF_IND_NONFAULTING;
// This indirection also is invariant.
indNode->gtFlags |= GTF_IND_INVARIANT;
break;
case IAT_PVALUE:
indNode = gtNewIndOfIconHandleNode(TYP_I_IMPL, (size_t)addrInfo.handle, GTF_ICON_FTN_ADDR, true);
break;
case IAT_VALUE:
// Refer to gtNewIconHandleNode() as the template for constructing a constant handle
//
tree->SetOper(GT_CNS_INT);
tree->AsIntConCommon()->SetIconValue(ssize_t(addrInfo.handle));
tree->gtFlags |= GTF_ICON_FTN_ADDR;
break;
default:
noway_assert(!"Unknown addrInfo.accessType");
}
if (indNode != nullptr)
{
DEBUG_DESTROY_NODE(tree);
tree = fgMorphTree(indNode);
}
}
return tree;
}
void Compiler::fgAssignSetVarDef(GenTree* tree)
{
GenTreeLclVarCommon* lclVarCmnTree;
bool isEntire = false;
if (tree->DefinesLocal(this, &lclVarCmnTree, &isEntire))
{
if (isEntire)
{
lclVarCmnTree->gtFlags |= GTF_VAR_DEF;
}
else
{
// We consider partial definitions to be modeled as uses followed by definitions.
// This captures the idea that precedings defs are not necessarily made redundant
// by this definition.
lclVarCmnTree->gtFlags |= (GTF_VAR_DEF | GTF_VAR_USEASG);
}
}
}
//------------------------------------------------------------------------
// fgMorphOneAsgBlockOp: Attempt to replace a block assignment with a scalar assignment
//
// Arguments:
// tree - The block assignment to be possibly morphed
//
// Return Value:
// The modified tree if successful, nullptr otherwise.
//
// Assumptions:
// 'tree' must be a block assignment.
//
// Notes:
// If successful, this method always returns the incoming tree, modifying only
// its arguments.
//
GenTree* Compiler::fgMorphOneAsgBlockOp(GenTree* tree)
{
// This must be a block assignment.
noway_assert(tree->OperIsBlkOp());
var_types asgType = tree->TypeGet();
GenTree* asg = tree;
GenTree* dest = asg->gtGetOp1();
GenTree* src = asg->gtGetOp2();
unsigned destVarNum = BAD_VAR_NUM;
LclVarDsc* destVarDsc = nullptr;
GenTree* destLclVarTree = nullptr;
bool isCopyBlock = asg->OperIsCopyBlkOp();
bool isInitBlock = !isCopyBlock;
unsigned size = 0;
CORINFO_CLASS_HANDLE clsHnd = NO_CLASS_HANDLE;
if (dest->gtEffectiveVal()->OperIsBlk())
{
GenTreeBlk* lhsBlk = dest->gtEffectiveVal()->AsBlk();
size = lhsBlk->Size();
if (impIsAddressInLocal(lhsBlk->Addr(), &destLclVarTree))
{
destVarNum = destLclVarTree->AsLclVarCommon()->GetLclNum();
destVarDsc = lvaGetDesc(destVarNum);
}
if (lhsBlk->OperGet() == GT_OBJ)
{
clsHnd = lhsBlk->AsObj()->GetLayout()->GetClassHandle();
}
}
else
{
// Is this an enregisterable struct that is already a simple assignment?
// This can happen if we are re-morphing.
// Note that we won't do this straightaway if this is a SIMD type, since it
// may be a promoted lclVar (sometimes we promote the individual float fields of
// fixed-size SIMD).
if (dest->OperGet() == GT_IND)
{
noway_assert(asgType != TYP_STRUCT);
if (varTypeIsStruct(asgType))
{
destLclVarTree = fgIsIndirOfAddrOfLocal(dest);
}
if (isCopyBlock && destLclVarTree == nullptr && !src->OperIs(GT_LCL_VAR))
{
fgMorphBlockOperand(src, asgType, genTypeSize(asgType), false /*isBlkReqd*/);
dest->gtFlags |= GTF_DONT_CSE;
return tree;
}
}
else
{
noway_assert(dest->OperIsLocal());
destLclVarTree = dest;
}
if (destLclVarTree != nullptr)
{
destVarNum = destLclVarTree->AsLclVarCommon()->GetLclNum();
destVarDsc = lvaGetDesc(destVarNum);
if (asgType == TYP_STRUCT)
{
clsHnd = destVarDsc->GetStructHnd();
size = destVarDsc->lvExactSize;
}
}
if (asgType != TYP_STRUCT)
{
size = genTypeSize(asgType);
}
}
if (size == 0)
{
return nullptr;
}
if ((destVarDsc != nullptr) && varTypeIsStruct(destLclVarTree) && destVarDsc->lvPromoted)
{
// Let fgMorphCopyBlock handle it.
return nullptr;
}
if (src->IsCall() || src->OperIsSIMD())
{
// Can't take ADDR from these nodes, let fgMorphCopyBlock handle it, #11413.
return nullptr;
}
if ((destVarDsc != nullptr) && !varTypeIsStruct(destVarDsc->TypeGet()))
{
//
// See if we can do a simple transformation:
//
// GT_ASG <TYP_size>
// / \.
// GT_IND GT_IND or CNS_INT
// | |
// [dest] [src]
//
if (asgType == TYP_STRUCT)
{
// It is possible to use `initobj` to init a primitive type on the stack,
// like `ldloca.s 1; initobj 1B000003` where `V01` has type `ref`;
// in this case we generate `ASG struct(BLK<8> struct(ADDR byref(LCL_VAR ref)), 0)`
// and this code path transforms it into `ASG ref(LCL_VARref, 0)` because it is not a real
// struct assignment.
if (size == REGSIZE_BYTES)
{
if (clsHnd == NO_CLASS_HANDLE)
{
// A register-sized cpblk can be treated as an integer asignment.
asgType = TYP_I_IMPL;
}
else
{
BYTE gcPtr;
info.compCompHnd->getClassGClayout(clsHnd, &gcPtr);
asgType = getJitGCType(gcPtr);
}
}
else
{
switch (size)
{
case 1:
asgType = TYP_BYTE;
break;
case 2:
asgType = TYP_SHORT;
break;
#ifdef TARGET_64BIT
case 4:
asgType = TYP_INT;
break;
#endif // TARGET_64BIT
}
}
}
}
GenTree* srcLclVarTree = nullptr;
LclVarDsc* srcVarDsc = nullptr;
if (isCopyBlock)
{
if (src->OperGet() == GT_LCL_VAR)
{
srcLclVarTree = src;
srcVarDsc = lvaGetDesc(src->AsLclVarCommon());
}
else if (src->OperIsIndir() && impIsAddressInLocal(src->AsOp()->gtOp1, &srcLclVarTree))
{
srcVarDsc = lvaGetDesc(srcLclVarTree->AsLclVarCommon());
}
if ((srcVarDsc != nullptr) && varTypeIsStruct(srcLclVarTree) && srcVarDsc->lvPromoted)
{
// Let fgMorphCopyBlock handle it.
return nullptr;
}
}
if (asgType != TYP_STRUCT)
{
noway_assert((size <= REGSIZE_BYTES) || varTypeIsSIMD(asgType));
// For initBlk, a non constant source is not going to allow us to fiddle
// with the bits to create a single assigment.
// Nor do we (for now) support transforming an InitBlock of SIMD type, unless
// it is a direct assignment to a lclVar and the value is zero.
if (isInitBlock)
{
if (!src->IsConstInitVal())
{
return nullptr;
}
if (varTypeIsSIMD(asgType) && (!src->IsIntegralConst(0) || (destVarDsc == nullptr)))
{
return nullptr;
}
}
if (destVarDsc != nullptr)
{
// Kill everything about dest
if (optLocalAssertionProp)
{
if (optAssertionCount > 0)
{
fgKillDependentAssertions(destVarNum DEBUGARG(tree));
}
}
// A previous incarnation of this code also required the local not to be
// address-exposed(=taken). That seems orthogonal to the decision of whether
// to do field-wise assignments: being address-exposed will cause it to be
// "dependently" promoted, so it will be in the right memory location. One possible
// further reason for avoiding field-wise stores is that the struct might have alignment-induced
// holes, whose contents could be meaningful in unsafe code. If we decide that's a valid
// concern, then we could compromise, and say that address-exposed + fields do not completely cover the
// memory of the struct prevent field-wise assignments. Same situation exists for the "src" decision.
if (varTypeIsStruct(destLclVarTree) && destVarDsc->lvPromoted)
{
// Let fgMorphInitBlock handle it. (Since we'll need to do field-var-wise assignments.)
return nullptr;
}
else if (!varTypeIsFloating(destLclVarTree->TypeGet()) && (size == genTypeSize(destVarDsc)))
{
// Use the dest local var directly, as well as its type.
dest = destLclVarTree;
asgType = destVarDsc->lvType;
// If the block operation had been a write to a local var of a small int type,
// of the exact size of the small int type, and the var is NormalizeOnStore,
// we would have labeled it GTF_VAR_USEASG, because the block operation wouldn't
// have done that normalization. If we're now making it into an assignment,
// the NormalizeOnStore will work, and it can be a full def.
if (destVarDsc->lvNormalizeOnStore())
{
dest->gtFlags &= (~GTF_VAR_USEASG);
}
}
else
{
// Could be a non-promoted struct, or a floating point type local, or
// an int subject to a partial write. Don't enregister.
lvaSetVarDoNotEnregister(destVarNum DEBUGARG(DoNotEnregisterReason::OneAsgRetyping));
// Mark the local var tree as a definition point of the local.
destLclVarTree->gtFlags |= GTF_VAR_DEF;
if (size < destVarDsc->lvExactSize)
{ // If it's not a full-width assignment....
destLclVarTree->gtFlags |= GTF_VAR_USEASG;
}
if (dest == destLclVarTree)
{
GenTree* addr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
dest = gtNewIndir(asgType, addr);
}
}
}
// Check to ensure we don't have a reducible *(& ... )
if (dest->OperIsIndir() && dest->AsIndir()->Addr()->OperGet() == GT_ADDR)
{
// If dest is an Indir or Block, and it has a child that is a Addr node
//
GenTree* addrNode = dest->AsIndir()->Addr(); // known to be a GT_ADDR
// Can we just remove the Ind(Addr(destOp)) and operate directly on 'destOp'?
//
GenTree* destOp = addrNode->gtGetOp1();
var_types destOpType = destOp->TypeGet();
// We can if we have a primitive integer type and the sizes are exactly the same.
//
if ((varTypeIsIntegralOrI(destOp) && (size == genTypeSize(destOpType))))
{
dest = destOp;
asgType = destOpType;
}
}
if (dest->gtEffectiveVal()->OperIsIndir())
{
// If we have no information about the destination, we have to assume it could
// live anywhere (not just in the GC heap).
// Mark the GT_IND node so that we use the correct write barrier helper in case
// the field is a GC ref.
if (!fgIsIndirOfAddrOfLocal(dest))
{
dest->gtFlags |= (GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
tree->gtFlags |= GTF_GLOB_REF;
}
dest->SetIndirExceptionFlags(this);
tree->gtFlags |= (dest->gtFlags & GTF_EXCEPT);
}
if (isCopyBlock)
{
if (srcVarDsc != nullptr)
{
// Handled above.
assert(!varTypeIsStruct(srcLclVarTree) || !srcVarDsc->lvPromoted);
if (!varTypeIsFloating(srcLclVarTree->TypeGet()) &&
size == genTypeSize(genActualType(srcLclVarTree->TypeGet())))
{
// Use the src local var directly.
src = srcLclVarTree;
}
else
{
// The source argument of the copyblk can potentially be accessed only through indir(addr(lclVar))
// or indir(lclVarAddr) so it must be on the stack.
unsigned lclVarNum = srcLclVarTree->AsLclVarCommon()->GetLclNum();
lvaSetVarDoNotEnregister(lclVarNum DEBUGARG(DoNotEnregisterReason::OneAsgRetyping));
GenTree* srcAddr;
if (src == srcLclVarTree)
{
srcAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, src);
src = gtNewOperNode(GT_IND, asgType, srcAddr);
}
else
{
assert(src->OperIsIndir());
}
}
}
if (src->OperIsIndir())
{
if (!fgIsIndirOfAddrOfLocal(src))
{
// If we have no information about the src, we have to assume it could
// live anywhere (not just in the GC heap).
// Mark the GT_IND node so that we use the correct write barrier helper in case
// the field is a GC ref.
src->gtFlags |= (GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
}
src->SetIndirExceptionFlags(this);
}
}
else // InitBlk
{
#ifdef FEATURE_SIMD
if (varTypeIsSIMD(asgType))
{
assert(!isCopyBlock); // Else we would have returned the tree above.
noway_assert(src->IsIntegralConst(0));
noway_assert(destVarDsc != nullptr);
src = gtNewSIMDNode(asgType, src, SIMDIntrinsicInit, destVarDsc->GetSimdBaseJitType(), size);
}
else
#endif
{
if (src->OperIsInitVal())
{
src = src->gtGetOp1();
}
assert(src->IsCnsIntOrI());
// This will mutate the integer constant, in place, to be the correct
// value for the type we are using in the assignment.
src->AsIntCon()->FixupInitBlkValue(asgType);
}
}
// Ensure that the dest is setup appropriately.
if (dest->gtEffectiveVal()->OperIsIndir())
{
dest = fgMorphBlockOperand(dest, asgType, size, false /*isBlkReqd*/);
}
// Ensure that the rhs is setup appropriately.
if (isCopyBlock)
{
src = fgMorphBlockOperand(src, asgType, size, false /*isBlkReqd*/);
}
// Set the lhs and rhs on the assignment.
if (dest != tree->AsOp()->gtOp1)
{
asg->AsOp()->gtOp1 = dest;
}
if (src != asg->AsOp()->gtOp2)
{
asg->AsOp()->gtOp2 = src;
}
asg->ChangeType(asgType);
dest->gtFlags |= GTF_DONT_CSE;
asg->gtFlags &= ~GTF_EXCEPT;
asg->gtFlags |= ((dest->gtFlags | src->gtFlags) & GTF_ALL_EFFECT);
// Un-set GTF_REVERSE_OPS, and it will be set later if appropriate.
asg->gtFlags &= ~GTF_REVERSE_OPS;
#ifdef DEBUG
if (verbose)
{
printf("fgMorphOneAsgBlock (after):\n");
gtDispTree(tree);
}
#endif
return tree;
}
return nullptr;
}
//------------------------------------------------------------------------
// fgMorphPromoteLocalInitBlock: Attempts to promote a local block init tree
// to a tree of promoted field initialization assignments.
//
// Arguments:
// destLclNode - The destination LclVar node
// initVal - The initialization value
// blockSize - The amount of bytes to initialize
//
// Return Value:
// A tree that performs field by field initialization of the destination
// struct variable if various conditions are met, nullptr otherwise.
//
// Notes:
// This transforms a single block initialization assignment like:
//
// * ASG struct (init)
// +--* BLK(12) struct
// | \--* ADDR long
// | \--* LCL_VAR struct(P) V02 loc0
// | \--* int V02.a (offs=0x00) -> V06 tmp3
// | \--* ubyte V02.c (offs=0x04) -> V07 tmp4
// | \--* float V02.d (offs=0x08) -> V08 tmp5
// \--* INIT_VAL int
// \--* CNS_INT int 42
//
// into a COMMA tree of assignments that initialize each promoted struct
// field:
//
// * COMMA void
// +--* COMMA void
// | +--* ASG int
// | | +--* LCL_VAR int V06 tmp3
// | | \--* CNS_INT int 0x2A2A2A2A
// | \--* ASG ubyte
// | +--* LCL_VAR ubyte V07 tmp4
// | \--* CNS_INT int 42
// \--* ASG float
// +--* LCL_VAR float V08 tmp5
// \--* CNS_DBL float 1.5113661732714390e-13
//
GenTree* Compiler::fgMorphPromoteLocalInitBlock(GenTreeLclVar* destLclNode, GenTree* initVal, unsigned blockSize)
{
assert(destLclNode->OperIs(GT_LCL_VAR));
LclVarDsc* destLclVar = lvaGetDesc(destLclNode);
assert(varTypeIsStruct(destLclVar->TypeGet()));
assert(destLclVar->lvPromoted);
if (blockSize == 0)
{
JITDUMP(" size is zero or unknown.\n");
return nullptr;
}
if (destLclVar->IsAddressExposed() && destLclVar->lvContainsHoles)
{
JITDUMP(" dest is address exposed and contains holes.\n");
return nullptr;
}
if (destLclVar->lvCustomLayout && destLclVar->lvContainsHoles)
{
// TODO-1stClassStructs: there are no reasons for this pessimization, delete it.
JITDUMP(" dest has custom layout and contains holes.\n");
return nullptr;
}
if (destLclVar->lvExactSize != blockSize)
{
JITDUMP(" dest size mismatch.\n");
return nullptr;
}
if (!initVal->OperIs(GT_CNS_INT))
{
JITDUMP(" source is not constant.\n");
return nullptr;
}
const int64_t initPattern = (initVal->AsIntCon()->IconValue() & 0xFF) * 0x0101010101010101LL;
if (initPattern != 0)
{
for (unsigned i = 0; i < destLclVar->lvFieldCnt; ++i)
{
LclVarDsc* fieldDesc = lvaGetDesc(destLclVar->lvFieldLclStart + i);
if (varTypeIsSIMD(fieldDesc->TypeGet()) || varTypeIsGC(fieldDesc->TypeGet()))
{
// Cannot initialize GC or SIMD types with a non-zero constant.
// The former is completly bogus. The later restriction could be
// lifted by supporting non-zero SIMD constants or by generating
// field initialization code that converts an integer constant to
// the appropiate SIMD value. Unlikely to be very useful, though.
JITDUMP(" dest contains GC and/or SIMD fields and source constant is not 0.\n");
return nullptr;
}
}
}
JITDUMP(" using field by field initialization.\n");
GenTree* tree = nullptr;
for (unsigned i = 0; i < destLclVar->lvFieldCnt; ++i)
{
unsigned fieldLclNum = destLclVar->lvFieldLclStart + i;
LclVarDsc* fieldDesc = lvaGetDesc(fieldLclNum);
GenTree* dest = gtNewLclvNode(fieldLclNum, fieldDesc->TypeGet());
// If it had been labeled a "USEASG", assignments to the individual promoted fields are not.
dest->gtFlags |= (destLclNode->gtFlags & ~(GTF_NODE_MASK | GTF_VAR_USEASG));
GenTree* src;
switch (dest->TypeGet())
{
case TYP_BOOL:
case TYP_BYTE:
case TYP_UBYTE:
case TYP_SHORT:
case TYP_USHORT:
// Promoted fields are expected to be "normalize on load". If that changes then
// we may need to adjust this code to widen the constant correctly.
assert(fieldDesc->lvNormalizeOnLoad());
FALLTHROUGH;
case TYP_INT:
{
int64_t mask = (int64_t(1) << (genTypeSize(dest->TypeGet()) * 8)) - 1;
src = gtNewIconNode(static_cast<int32_t>(initPattern & mask));
break;
}
case TYP_LONG:
src = gtNewLconNode(initPattern);
break;
case TYP_FLOAT:
float floatPattern;
memcpy(&floatPattern, &initPattern, sizeof(floatPattern));
src = gtNewDconNode(floatPattern, dest->TypeGet());
break;
case TYP_DOUBLE:
double doublePattern;
memcpy(&doublePattern, &initPattern, sizeof(doublePattern));
src = gtNewDconNode(doublePattern, dest->TypeGet());
break;
case TYP_REF:
case TYP_BYREF:
#ifdef FEATURE_SIMD
case TYP_SIMD8:
case TYP_SIMD12:
case TYP_SIMD16:
case TYP_SIMD32:
#endif // FEATURE_SIMD
assert(initPattern == 0);
src = gtNewIconNode(0, dest->TypeGet());
break;
default:
unreached();
}
GenTree* asg = gtNewAssignNode(dest, src);
if (optLocalAssertionProp)
{
optAssertionGen(asg);
}
if (tree != nullptr)
{
tree = gtNewOperNode(GT_COMMA, TYP_VOID, tree, asg);
}
else
{
tree = asg;
}
}
return tree;
}
//------------------------------------------------------------------------
// fgMorphGetStructAddr: Gets the address of a struct object
//
// Arguments:
// pTree - the parent's pointer to the struct object node
// clsHnd - the class handle for the struct type
// isRValue - true if this is a source (not dest)
//
// Return Value:
// Returns the address of the struct value, possibly modifying the existing tree to
// sink the address below any comma nodes (this is to canonicalize for value numbering).
// If this is a source, it will morph it to an GT_IND before taking its address,
// since it may not be remorphed (and we don't want blk nodes as rvalues).
GenTree* Compiler::fgMorphGetStructAddr(GenTree** pTree, CORINFO_CLASS_HANDLE clsHnd, bool isRValue)
{
GenTree* addr;
GenTree* tree = *pTree;
// If this is an indirection, we can return its address.
if (tree->OperIsIndir())
{
addr = tree->AsOp()->gtOp1;
}
else if (tree->gtOper == GT_COMMA)
{
// If this is a comma, we're going to "sink" the GT_ADDR below it.
(void)fgMorphGetStructAddr(&(tree->AsOp()->gtOp2), clsHnd, isRValue);
tree->gtType = TYP_BYREF;
addr = tree;
}
else
{
switch (tree->gtOper)
{
case GT_LCL_FLD:
case GT_LCL_VAR:
case GT_INDEX:
case GT_FIELD:
case GT_ARR_ELEM:
addr = gtNewOperNode(GT_ADDR, TYP_BYREF, tree);
break;
case GT_INDEX_ADDR:
addr = tree;
break;
default:
{
// TODO: Consider using lvaGrabTemp and gtNewTempAssign instead, since we're
// not going to use "temp"
GenTree* temp = fgInsertCommaFormTemp(pTree, clsHnd);
unsigned lclNum = temp->gtEffectiveVal()->AsLclVar()->GetLclNum();
lvaSetVarDoNotEnregister(lclNum DEBUG_ARG(DoNotEnregisterReason::VMNeedsStackAddr));
addr = fgMorphGetStructAddr(pTree, clsHnd, isRValue);
break;
}
}
}
*pTree = addr;
return addr;
}
//------------------------------------------------------------------------
// fgMorphBlockOperand: Canonicalize an operand of a block assignment
//
// Arguments:
// tree - The block operand
// asgType - The type of the assignment
// blockWidth - The size of the block
// isBlkReqd - true iff this operand must remain a block node
//
// Return Value:
// Returns the morphed block operand
//
// Notes:
// This does the following:
// - Ensures that a struct operand is a block node or lclVar.
// - Ensures that any COMMAs are above ADDR nodes.
// Although 'tree' WAS an operand of a block assignment, the assignment
// may have been retyped to be a scalar assignment.
GenTree* Compiler::fgMorphBlockOperand(GenTree* tree, var_types asgType, unsigned blockWidth, bool isBlkReqd)
{
GenTree* effectiveVal = tree->gtEffectiveVal();
if (asgType != TYP_STRUCT)
{
if (effectiveVal->OperIsIndir())
{
if (!isBlkReqd)
{
GenTree* addr = effectiveVal->AsIndir()->Addr();
if ((addr->OperGet() == GT_ADDR) && (addr->gtGetOp1()->TypeGet() == asgType))
{
effectiveVal = addr->gtGetOp1();
}
else if (effectiveVal->OperIsBlk())
{
effectiveVal->SetOper(GT_IND);
}
}
effectiveVal->gtType = asgType;
}
else if (effectiveVal->TypeGet() != asgType)
{
if (effectiveVal->IsCall())
{
#ifdef DEBUG
GenTreeCall* call = effectiveVal->AsCall();
assert(call->TypeGet() == TYP_STRUCT);
assert(blockWidth == info.compCompHnd->getClassSize(call->gtRetClsHnd));
#endif
}
else
{
GenTree* addr = gtNewOperNode(GT_ADDR, TYP_BYREF, effectiveVal);
effectiveVal = gtNewIndir(asgType, addr);
}
}
}
else
{
GenTreeIndir* indirTree = nullptr;
GenTreeLclVarCommon* lclNode = nullptr;
bool needsIndirection = true;
if (effectiveVal->OperIsIndir())
{
indirTree = effectiveVal->AsIndir();
GenTree* addr = effectiveVal->AsIndir()->Addr();
if ((addr->OperGet() == GT_ADDR) && (addr->gtGetOp1()->OperGet() == GT_LCL_VAR))
{
lclNode = addr->gtGetOp1()->AsLclVarCommon();
}
}
else if (effectiveVal->OperGet() == GT_LCL_VAR)
{
lclNode = effectiveVal->AsLclVarCommon();
}
else if (effectiveVal->IsCall())
{
needsIndirection = false;
#ifdef DEBUG
GenTreeCall* call = effectiveVal->AsCall();
assert(call->TypeGet() == TYP_STRUCT);
assert(blockWidth == info.compCompHnd->getClassSize(call->gtRetClsHnd));
#endif
}
#ifdef TARGET_ARM64
else if (effectiveVal->OperIsHWIntrinsic())
{
needsIndirection = false;
#ifdef DEBUG
GenTreeHWIntrinsic* intrinsic = effectiveVal->AsHWIntrinsic();
assert(intrinsic->TypeGet() == TYP_STRUCT);
assert(HWIntrinsicInfo::IsMultiReg(intrinsic->GetHWIntrinsicId()));
#endif
}
#endif // TARGET_ARM64
if (lclNode != nullptr)
{
const LclVarDsc* varDsc = lvaGetDesc(lclNode);
if (varTypeIsStruct(varDsc) && (varDsc->lvExactSize == blockWidth) && (varDsc->lvType == asgType))
{
if (effectiveVal != lclNode)
{
JITDUMP("Replacing block node [%06d] with lclVar V%02u\n", dspTreeID(tree), lclNode->GetLclNum());
effectiveVal = lclNode;
}
needsIndirection = false;
}
else
{
// This may be a lclVar that was determined to be address-exposed.
effectiveVal->gtFlags |= (lclNode->gtFlags & GTF_ALL_EFFECT);
}
}
if (needsIndirection)
{
if (indirTree != nullptr)
{
// If we have an indirection and a block is required, it should already be a block.
assert(indirTree->OperIsBlk() || !isBlkReqd);
effectiveVal->gtType = asgType;
}
else
{
GenTree* newTree;
GenTree* addr = gtNewOperNode(GT_ADDR, TYP_BYREF, effectiveVal);
if (isBlkReqd)
{
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleIfPresent(effectiveVal);
if (clsHnd == NO_CLASS_HANDLE)
{
newTree = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, addr, typGetBlkLayout(blockWidth));
}
else
{
newTree = gtNewObjNode(clsHnd, addr);
gtSetObjGcInfo(newTree->AsObj());
}
}
else
{
newTree = gtNewIndir(asgType, addr);
}
effectiveVal = newTree;
}
}
}
assert(effectiveVal->TypeIs(asgType) || (varTypeIsSIMD(asgType) && varTypeIsStruct(effectiveVal)));
tree = effectiveVal;
return tree;
}
//------------------------------------------------------------------------
// fgMorphCanUseLclFldForCopy: check if we can access LclVar2 using LclVar1's fields.
//
// Arguments:
// lclNum1 - a promoted lclVar that is used in fieldwise assignment;
// lclNum2 - the local variable on the other side of ASG, can be BAD_VAR_NUM.
//
// Return Value:
// True if the second local is valid and has the same struct handle as the first,
// false otherwise.
//
// Notes:
// This check is needed to avoid accessing LCL_VARs with incorrect
// CORINFO_FIELD_HANDLE that would confuse VN optimizations.
//
bool Compiler::fgMorphCanUseLclFldForCopy(unsigned lclNum1, unsigned lclNum2)
{
assert(lclNum1 != BAD_VAR_NUM);
if (lclNum2 == BAD_VAR_NUM)
{
return false;
}
const LclVarDsc* varDsc1 = lvaGetDesc(lclNum1);
const LclVarDsc* varDsc2 = lvaGetDesc(lclNum2);
assert(varTypeIsStruct(varDsc1));
if (!varTypeIsStruct(varDsc2))
{
return false;
}
CORINFO_CLASS_HANDLE struct1 = varDsc1->GetStructHnd();
CORINFO_CLASS_HANDLE struct2 = varDsc2->GetStructHnd();
assert(struct1 != NO_CLASS_HANDLE);
assert(struct2 != NO_CLASS_HANDLE);
if (struct1 != struct2)
{
return false;
}
return true;
}
// insert conversions and normalize to make tree amenable to register
// FP architectures
GenTree* Compiler::fgMorphForRegisterFP(GenTree* tree)
{
if (tree->OperIsArithmetic())
{
if (varTypeIsFloating(tree))
{
GenTree* op1 = tree->AsOp()->gtOp1;
GenTree* op2 = tree->gtGetOp2();
assert(varTypeIsFloating(op1->TypeGet()) && varTypeIsFloating(op2->TypeGet()));
if (op1->TypeGet() != tree->TypeGet())
{
tree->AsOp()->gtOp1 = gtNewCastNode(tree->TypeGet(), op1, false, tree->TypeGet());
}
if (op2->TypeGet() != tree->TypeGet())
{
tree->AsOp()->gtOp2 = gtNewCastNode(tree->TypeGet(), op2, false, tree->TypeGet());
}
}
}
else if (tree->OperIsCompare())
{
GenTree* op1 = tree->AsOp()->gtOp1;
if (varTypeIsFloating(op1))
{
GenTree* op2 = tree->gtGetOp2();
assert(varTypeIsFloating(op2));
if (op1->TypeGet() != op2->TypeGet())
{
// both had better be floating, just one bigger than other
if (op1->TypeGet() == TYP_FLOAT)
{
assert(op2->TypeGet() == TYP_DOUBLE);
tree->AsOp()->gtOp1 = gtNewCastNode(TYP_DOUBLE, op1, false, TYP_DOUBLE);
}
else if (op2->TypeGet() == TYP_FLOAT)
{
assert(op1->TypeGet() == TYP_DOUBLE);
tree->AsOp()->gtOp2 = gtNewCastNode(TYP_DOUBLE, op2, false, TYP_DOUBLE);
}
}
}
}
return tree;
}
#ifdef FEATURE_SIMD
//--------------------------------------------------------------------------------------------------------------
// getSIMDStructFromField:
// Checking whether the field belongs to a simd struct or not. If it is, return the GenTree* for
// the struct node, also base type, field index and simd size. If it is not, just return nullptr.
// Usually if the tree node is from a simd lclvar which is not used in any SIMD intrinsic, then we
// should return nullptr, since in this case we should treat SIMD struct as a regular struct.
// However if no matter what, you just want get simd struct node, you can set the ignoreUsedInSIMDIntrinsic
// as true. Then there will be no IsUsedInSIMDIntrinsic checking, and it will return SIMD struct node
// if the struct is a SIMD struct.
//
// Arguments:
// tree - GentreePtr. This node will be checked to see this is a field which belongs to a simd
// struct used for simd intrinsic or not.
// simdBaseJitTypeOut - CorInfoType pointer, if the tree node is the tree we want, we set *simdBaseJitTypeOut
// to simd lclvar's base JIT type.
// indexOut - unsigned pointer, if the tree is used for simd intrinsic, we will set *indexOut
// equals to the index number of this field.
// simdSizeOut - unsigned pointer, if the tree is used for simd intrinsic, set the *simdSizeOut
// equals to the simd struct size which this tree belongs to.
// ignoreUsedInSIMDIntrinsic - bool. If this is set to true, then this function will ignore
// the UsedInSIMDIntrinsic check.
//
// return value:
// A GenTree* which points the simd lclvar tree belongs to. If the tree is not the simd
// instrinic related field, return nullptr.
//
GenTree* Compiler::getSIMDStructFromField(GenTree* tree,
CorInfoType* simdBaseJitTypeOut,
unsigned* indexOut,
unsigned* simdSizeOut,
bool ignoreUsedInSIMDIntrinsic /*false*/)
{
GenTree* ret = nullptr;
if (tree->OperGet() == GT_FIELD)
{
GenTree* objRef = tree->AsField()->GetFldObj();
if (objRef != nullptr)
{
GenTree* obj = nullptr;
if (objRef->gtOper == GT_ADDR)
{
obj = objRef->AsOp()->gtOp1;
}
else if (ignoreUsedInSIMDIntrinsic)
{
obj = objRef;
}
else
{
return nullptr;
}
if (isSIMDTypeLocal(obj))
{
LclVarDsc* varDsc = lvaGetDesc(obj->AsLclVarCommon());
if (varDsc->lvIsUsedInSIMDIntrinsic() || ignoreUsedInSIMDIntrinsic)
{
*simdSizeOut = varDsc->lvExactSize;
*simdBaseJitTypeOut = getBaseJitTypeOfSIMDLocal(obj);
ret = obj;
}
}
else if (obj->OperGet() == GT_SIMD)
{
ret = obj;
GenTreeSIMD* simdNode = obj->AsSIMD();
*simdSizeOut = simdNode->GetSimdSize();
*simdBaseJitTypeOut = simdNode->GetSimdBaseJitType();
}
#ifdef FEATURE_HW_INTRINSICS
else if (obj->OperIsHWIntrinsic())
{
ret = obj;
GenTreeHWIntrinsic* simdNode = obj->AsHWIntrinsic();
*simdSizeOut = simdNode->GetSimdSize();
*simdBaseJitTypeOut = simdNode->GetSimdBaseJitType();
}
#endif // FEATURE_HW_INTRINSICS
}
}
if (ret != nullptr)
{
var_types fieldType = tree->TypeGet();
if (fieldType == TYP_LONG)
{
// Vector2/3/4 expose public float fields while Vector<T>
// and Vector64/128/256<T> have internal ulong fields. So
// we should only ever encounter accesses for TYP_FLOAT or
// TYP_LONG and in the case of the latter we don't want the
// generic type since we are executing some algorithm on the
// raw underlying bits instead.
*simdBaseJitTypeOut = CORINFO_TYPE_ULONG;
}
else
{
assert(fieldType == TYP_FLOAT);
}
unsigned baseTypeSize = genTypeSize(JITtype2varType(*simdBaseJitTypeOut));
*indexOut = tree->AsField()->gtFldOffset / baseTypeSize;
}
return ret;
}
/*****************************************************************************
* If a read operation tries to access simd struct field, then transform the operation
* to the SimdGetElementNode, and return the new tree. Otherwise, return the old tree.
* Argument:
* tree - GenTree*. If this pointer points to simd struct which is used for simd
* intrinsic, we will morph it as simd intrinsic NI_Vector128_GetElement.
* Return:
* A GenTree* which points to the new tree. If the tree is not for simd intrinsic,
* return nullptr.
*/
GenTree* Compiler::fgMorphFieldToSimdGetElement(GenTree* tree)
{
unsigned index = 0;
CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF;
unsigned simdSize = 0;
GenTree* simdStructNode = getSIMDStructFromField(tree, &simdBaseJitType, &index, &simdSize);
if (simdStructNode != nullptr)
{
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
GenTree* op2 = gtNewIconNode(index, TYP_INT);
assert(simdSize <= 16);
assert(simdSize >= ((index + 1) * genTypeSize(simdBaseType)));
#if defined(TARGET_XARCH)
switch (simdBaseType)
{
case TYP_BYTE:
case TYP_UBYTE:
case TYP_INT:
case TYP_UINT:
case TYP_LONG:
case TYP_ULONG:
{
if (!compOpportunisticallyDependsOn(InstructionSet_SSE41))
{
return tree;
}
break;
}
case TYP_DOUBLE:
case TYP_FLOAT:
case TYP_SHORT:
case TYP_USHORT:
{
if (!compOpportunisticallyDependsOn(InstructionSet_SSE2))
{
return tree;
}
break;
}
default:
{
unreached();
}
}
#elif defined(TARGET_ARM64)
if (!compOpportunisticallyDependsOn(InstructionSet_AdvSimd))
{
return tree;
}
#endif // !TARGET_XARCH && !TARGET_ARM64
tree = gtNewSimdGetElementNode(simdBaseType, simdStructNode, op2, simdBaseJitType, simdSize,
/* isSimdAsHWIntrinsic */ true);
}
return tree;
}
/*****************************************************************************
* Transform an assignment of a SIMD struct field to SimdWithElementNode, and
* return a new tree. If it is not such an assignment, then return the old tree.
* Argument:
* tree - GenTree*. If this pointer points to simd struct which is used for simd
* intrinsic, we will morph it as simd intrinsic set.
* Return:
* A GenTree* which points to the new tree. If the tree is not for simd intrinsic,
* return nullptr.
*/
GenTree* Compiler::fgMorphFieldAssignToSimdSetElement(GenTree* tree)
{
assert(tree->OperGet() == GT_ASG);
unsigned index = 0;
CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF;
unsigned simdSize = 0;
GenTree* simdStructNode = getSIMDStructFromField(tree->gtGetOp1(), &simdBaseJitType, &index, &simdSize);
if (simdStructNode != nullptr)
{
var_types simdType = simdStructNode->gtType;
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
assert(simdSize <= 16);
assert(simdSize >= ((index + 1) * genTypeSize(simdBaseType)));
GenTree* op2 = gtNewIconNode(index, TYP_INT);
GenTree* op3 = tree->gtGetOp2();
NamedIntrinsic intrinsicId = NI_Vector128_WithElement;
GenTree* target = gtClone(simdStructNode);
assert(target != nullptr);
GenTree* simdTree = gtNewSimdWithElementNode(simdType, simdStructNode, op2, op3, simdBaseJitType, simdSize,
/* isSimdAsHWIntrinsic */ true);
tree->AsOp()->gtOp1 = target;
tree->AsOp()->gtOp2 = simdTree;
// fgMorphTree has already called fgMorphImplicitByRefArgs() on this assignment, but the source
// and target have not yet been morphed.
// Therefore, in case the source and/or target are now implicit byrefs, we need to call it again.
if (fgMorphImplicitByRefArgs(tree))
{
if (tree->gtGetOp1()->OperIsBlk())
{
assert(tree->gtGetOp1()->TypeGet() == simdType);
tree->gtGetOp1()->SetOper(GT_IND);
tree->gtGetOp1()->gtType = simdType;
}
}
#ifdef DEBUG
tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
}
return tree;
}
#endif // FEATURE_SIMD
//------------------------------------------------------------------------------
// fgMorphCommutative : Try to simplify "(X op C1) op C2" to "X op C3"
// for commutative operators.
//
// Arguments:
// tree - node to fold
//
// return value:
// A folded GenTree* instance or nullptr if something prevents folding.
//
GenTreeOp* Compiler::fgMorphCommutative(GenTreeOp* tree)
{
assert(varTypeIsIntegralOrI(tree->TypeGet()));
assert(tree->OperIs(GT_ADD, GT_MUL, GT_OR, GT_AND, GT_XOR));
// op1 can be GT_COMMA, in this case we're going to fold
// "(op (COMMA(... (op X C1))) C2)" to "(COMMA(... (op X C3)))"
GenTree* op1 = tree->gtGetOp1()->gtEffectiveVal(true);
genTreeOps oper = tree->OperGet();
if (!op1->OperIs(oper) || !tree->gtGetOp2()->IsCnsIntOrI() || !op1->gtGetOp2()->IsCnsIntOrI() ||
op1->gtGetOp1()->IsCnsIntOrI())
{
return nullptr;
}
if (!fgGlobalMorph && (op1 != tree->gtGetOp1()))
{
// Since 'tree->gtGetOp1()' can have complex structure (e.g. COMMA(..(COMMA(..,op1)))
// don't run the optimization for such trees outside of global morph.
// Otherwise, there is a chance of violating VNs invariants and/or modifying a tree
// that is an active CSE candidate.
return nullptr;
}
if (gtIsActiveCSE_Candidate(tree) || gtIsActiveCSE_Candidate(op1))
{
// The optimization removes 'tree' from IR and changes the value of 'op1'.
return nullptr;
}
if (tree->OperMayOverflow() && (tree->gtOverflow() || op1->gtOverflow()))
{
return nullptr;
}
GenTreeIntCon* cns1 = op1->gtGetOp2()->AsIntCon();
GenTreeIntCon* cns2 = tree->gtGetOp2()->AsIntCon();
if (!varTypeIsIntegralOrI(tree->TypeGet()) || cns1->TypeIs(TYP_REF) || !cns1->TypeIs(cns2->TypeGet()))
{
return nullptr;
}
if (gtIsActiveCSE_Candidate(cns1) || gtIsActiveCSE_Candidate(cns2))
{
// The optimization removes 'cns2' from IR and changes the value of 'cns1'.
return nullptr;
}
GenTree* folded = gtFoldExprConst(gtNewOperNode(oper, cns1->TypeGet(), cns1, cns2));
if (!folded->IsCnsIntOrI())
{
// Give up if we can't fold "C1 op C2"
return nullptr;
}
auto foldedCns = folded->AsIntCon();
cns1->SetIconValue(foldedCns->IconValue());
cns1->SetVNsFromNode(foldedCns);
cns1->gtFieldSeq = foldedCns->gtFieldSeq;
op1 = tree->gtGetOp1();
op1->SetVNsFromNode(tree);
DEBUG_DESTROY_NODE(tree);
DEBUG_DESTROY_NODE(cns2);
DEBUG_DESTROY_NODE(foldedCns);
INDEBUG(cns1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
return op1->AsOp();
}
//------------------------------------------------------------------------------
// fgMorphCastedBitwiseOp : Try to simplify "(T)x op (T)y" to "(T)(x op y)".
//
// Arguments:
// tree - node to fold
//
// Return Value:
// A folded GenTree* instance, or nullptr if it couldn't be folded
GenTree* Compiler::fgMorphCastedBitwiseOp(GenTreeOp* tree)
{
// This transform does not preserve VNs and deletes a node.
assert(fgGlobalMorph);
assert(varTypeIsIntegralOrI(tree));
assert(tree->OperIs(GT_OR, GT_AND, GT_XOR));
GenTree* op1 = tree->gtGetOp1();
GenTree* op2 = tree->gtGetOp2();
genTreeOps oper = tree->OperGet();
// see whether both ops are casts, with matching to and from types.
if (op1->OperIs(GT_CAST) && op2->OperIs(GT_CAST))
{
// bail if either operand is a checked cast
if (op1->gtOverflow() || op2->gtOverflow())
{
return nullptr;
}
var_types fromType = op1->AsCast()->CastOp()->TypeGet();
var_types toType = op1->AsCast()->CastToType();
bool isUnsigned = op1->IsUnsigned();
if (varTypeIsFloating(fromType) || (op2->CastFromType() != fromType) || (op2->CastToType() != toType) ||
(op2->IsUnsigned() != isUnsigned))
{
return nullptr;
}
/*
// Reuse gentree nodes:
//
// tree op1
// / \ |
// op1 op2 ==> tree
// | | / \.
// x y x y
//
// (op2 becomes garbage)
*/
tree->gtOp1 = op1->AsCast()->CastOp();
tree->gtOp2 = op2->AsCast()->CastOp();
tree->gtType = genActualType(fromType);
op1->gtType = genActualType(toType);
op1->AsCast()->gtOp1 = tree;
op1->AsCast()->CastToType() = toType;
op1->SetAllEffectsFlags(tree);
// no need to update isUnsigned
DEBUG_DESTROY_NODE(op2);
INDEBUG(op1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
return op1;
}
return nullptr;
}
/*****************************************************************************
*
* Transform the given GTK_SMPOP tree for code generation.
*/
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
#endif
GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac)
{
ALLOCA_CHECK();
assert(tree->OperKind() & GTK_SMPOP);
/* The steps in this function are :
o Perform required preorder processing
o Process the first, then second operand, if any
o Perform required postorder morphing
o Perform optional postorder morphing if optimizing
*/
bool isQmarkColon = false;
AssertionIndex origAssertionCount = DUMMY_INIT(0);
AssertionDsc* origAssertionTab = DUMMY_INIT(NULL);
AssertionIndex thenAssertionCount = DUMMY_INIT(0);
AssertionDsc* thenAssertionTab = DUMMY_INIT(NULL);
if (fgGlobalMorph)
{
tree = fgMorphForRegisterFP(tree);
}
genTreeOps oper = tree->OperGet();
var_types typ = tree->TypeGet();
GenTree* op1 = tree->AsOp()->gtOp1;
GenTree* op2 = tree->gtGetOp2IfPresent();
/*-------------------------------------------------------------------------
* First do any PRE-ORDER processing
*/
switch (oper)
{
// Some arithmetic operators need to use a helper call to the EE
int helper;
case GT_ASG:
tree = fgDoNormalizeOnStore(tree);
/* fgDoNormalizeOnStore can change op2 */
noway_assert(op1 == tree->AsOp()->gtOp1);
op2 = tree->AsOp()->gtOp2;
#ifdef FEATURE_SIMD
if (IsBaselineSimdIsaSupported())
{
// We should check whether op2 should be assigned to a SIMD field or not.
// If it is, we should tranlate the tree to simd intrinsic.
assert(!fgGlobalMorph || ((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) == 0));
GenTree* newTree = fgMorphFieldAssignToSimdSetElement(tree);
typ = tree->TypeGet();
op1 = tree->gtGetOp1();
op2 = tree->gtGetOp2();
#ifdef DEBUG
assert((tree == newTree) && (tree->OperGet() == oper));
if ((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) != 0)
{
tree->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED;
}
#endif // DEBUG
}
#endif
// We can't CSE the LHS of an assignment. Only r-values can be CSEed.
// Previously, the "lhs" (addr) of a block op was CSE'd. So, to duplicate the former
// behavior, allow CSE'ing if is a struct type (or a TYP_REF transformed from a struct type)
// TODO-1stClassStructs: improve this.
if (op1->IsLocal() || (op1->TypeGet() != TYP_STRUCT))
{
op1->gtFlags |= GTF_DONT_CSE;
}
break;
case GT_ADDR:
/* op1 of a GT_ADDR is an l-value. Only r-values can be CSEed */
op1->gtFlags |= GTF_DONT_CSE;
break;
case GT_QMARK:
case GT_JTRUE:
noway_assert(op1);
if (op1->OperIsCompare())
{
/* Mark the comparison node with GTF_RELOP_JMP_USED so it knows that it does
not need to materialize the result as a 0 or 1. */
/* We also mark it as DONT_CSE, as we don't handle QMARKs with nonRELOP op1s */
op1->gtFlags |= (GTF_RELOP_JMP_USED | GTF_DONT_CSE);
// Request that the codegen for op1 sets the condition flags
// when it generates the code for op1.
//
// Codegen for op1 must set the condition flags if
// this method returns true.
//
op1->gtRequestSetFlags();
}
else
{
GenTree* effOp1 = op1->gtEffectiveVal();
noway_assert((effOp1->gtOper == GT_CNS_INT) &&
(effOp1->IsIntegralConst(0) || effOp1->IsIntegralConst(1)));
}
break;
case GT_COLON:
if (optLocalAssertionProp)
{
isQmarkColon = true;
}
break;
case GT_FIELD:
return fgMorphField(tree, mac);
case GT_INDEX:
return fgMorphArrayIndex(tree);
case GT_CAST:
{
GenTree* morphedCast = fgMorphExpandCast(tree->AsCast());
if (morphedCast != nullptr)
{
return morphedCast;
}
op1 = tree->AsCast()->CastOp();
}
break;
case GT_MUL:
noway_assert(op2 != nullptr);
if (opts.OptimizationEnabled() && !optValnumCSE_phase && !tree->gtOverflow())
{
// MUL(NEG(a), C) => MUL(a, NEG(C))
if (op1->OperIs(GT_NEG) && !op1->gtGetOp1()->IsCnsIntOrI() && op2->IsCnsIntOrI() &&
!op2->IsIconHandle())
{
GenTree* newOp1 = op1->gtGetOp1();
GenTree* newConst = gtNewIconNode(-op2->AsIntCon()->IconValue(), op2->TypeGet());
DEBUG_DESTROY_NODE(op1);
DEBUG_DESTROY_NODE(op2);
tree->AsOp()->gtOp1 = newOp1;
tree->AsOp()->gtOp2 = newConst;
return fgMorphSmpOp(tree, mac);
}
}
#ifndef TARGET_64BIT
if (typ == TYP_LONG)
{
// For (long)int1 * (long)int2, we dont actually do the
// casts, and just multiply the 32 bit values, which will
// give us the 64 bit result in edx:eax.
if (tree->Is64RsltMul())
{
// We are seeing this node again.
// Morph only the children of casts,
// so as to avoid losing them.
tree = fgMorphLongMul(tree->AsOp());
goto DONE_MORPHING_CHILDREN;
}
tree = fgRecognizeAndMorphLongMul(tree->AsOp());
op1 = tree->AsOp()->gtGetOp1();
op2 = tree->AsOp()->gtGetOp2();
if (tree->Is64RsltMul())
{
goto DONE_MORPHING_CHILDREN;
}
else
{
if (tree->gtOverflow())
helper = tree->IsUnsigned() ? CORINFO_HELP_ULMUL_OVF : CORINFO_HELP_LMUL_OVF;
else
helper = CORINFO_HELP_LMUL;
goto USE_HELPER_FOR_ARITH;
}
}
#endif // !TARGET_64BIT
break;
case GT_ARR_LENGTH:
if (op1->OperIs(GT_CNS_STR))
{
// Optimize `ldstr + String::get_Length()` to CNS_INT
// e.g. "Hello".Length => 5
GenTreeIntCon* iconNode = gtNewStringLiteralLength(op1->AsStrCon());
if (iconNode != nullptr)
{
INDEBUG(iconNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
return iconNode;
}
}
break;
case GT_DIV:
// Replace "val / dcon" with "val * (1.0 / dcon)" if dcon is a power of two.
// Powers of two within range are always exactly represented,
// so multiplication by the reciprocal is safe in this scenario
if (fgGlobalMorph && op2->IsCnsFltOrDbl())
{
double divisor = op2->AsDblCon()->gtDconVal;
if (((typ == TYP_DOUBLE) && FloatingPointUtils::hasPreciseReciprocal(divisor)) ||
((typ == TYP_FLOAT) && FloatingPointUtils::hasPreciseReciprocal(forceCastToFloat(divisor))))
{
oper = GT_MUL;
tree->ChangeOper(oper);
op2->AsDblCon()->gtDconVal = 1.0 / divisor;
}
}
// Convert DIV to UDIV if boths op1 and op2 are known to be never negative
if (!gtIsActiveCSE_Candidate(tree) && varTypeIsIntegral(tree) && op1->IsNeverNegative(this) &&
op2->IsNeverNegative(this))
{
assert(tree->OperIs(GT_DIV));
tree->ChangeOper(GT_UDIV, GenTree::PRESERVE_VN);
return fgMorphSmpOp(tree, mac);
}
#ifndef TARGET_64BIT
if (typ == TYP_LONG)
{
helper = CORINFO_HELP_LDIV;
goto USE_HELPER_FOR_ARITH;
}
#if USE_HELPERS_FOR_INT_DIV
if (typ == TYP_INT)
{
helper = CORINFO_HELP_DIV;
goto USE_HELPER_FOR_ARITH;
}
#endif
#endif // !TARGET_64BIT
break;
case GT_UDIV:
#ifndef TARGET_64BIT
if (typ == TYP_LONG)
{
helper = CORINFO_HELP_ULDIV;
goto USE_HELPER_FOR_ARITH;
}
#if USE_HELPERS_FOR_INT_DIV
if (typ == TYP_INT)
{
helper = CORINFO_HELP_UDIV;
goto USE_HELPER_FOR_ARITH;
}
#endif
#endif // TARGET_64BIT
break;
case GT_MOD:
if (varTypeIsFloating(typ))
{
helper = CORINFO_HELP_DBLREM;
noway_assert(op2);
if (op1->TypeGet() == TYP_FLOAT)
{
if (op2->TypeGet() == TYP_FLOAT)
{
helper = CORINFO_HELP_FLTREM;
}
else
{
tree->AsOp()->gtOp1 = op1 = gtNewCastNode(TYP_DOUBLE, op1, false, TYP_DOUBLE);
}
}
else if (op2->TypeGet() == TYP_FLOAT)
{
tree->AsOp()->gtOp2 = op2 = gtNewCastNode(TYP_DOUBLE, op2, false, TYP_DOUBLE);
}
goto USE_HELPER_FOR_ARITH;
}
// Convert MOD to UMOD if boths op1 and op2 are known to be never negative
if (!gtIsActiveCSE_Candidate(tree) && varTypeIsIntegral(tree) && op1->IsNeverNegative(this) &&
op2->IsNeverNegative(this))
{
assert(tree->OperIs(GT_MOD));
tree->ChangeOper(GT_UMOD, GenTree::PRESERVE_VN);
return fgMorphSmpOp(tree, mac);
}
// Do not use optimizations (unlike UMOD's idiv optimizing during codegen) for signed mod.
// A similar optimization for signed mod will not work for a negative perfectly divisible
// HI-word. To make it correct, we would need to divide without the sign and then flip the
// result sign after mod. This requires 18 opcodes + flow making it not worthy to inline.
goto ASSIGN_HELPER_FOR_MOD;
case GT_UMOD:
#ifdef TARGET_ARMARCH
//
// Note for TARGET_ARMARCH we don't have a remainder instruction, so we don't do this optimization
//
#else // TARGET_XARCH
// If this is an unsigned long mod with a constant divisor,
// then don't morph to a helper call - it can be done faster inline using idiv.
noway_assert(op2);
if ((typ == TYP_LONG) && opts.OptEnabled(CLFLG_CONSTANTFOLD))
{
if (op2->OperIs(GT_CNS_NATIVELONG) && op2->AsIntConCommon()->LngValue() >= 2 &&
op2->AsIntConCommon()->LngValue() <= 0x3fffffff)
{
tree->AsOp()->gtOp1 = op1 = fgMorphTree(op1);
noway_assert(op1->TypeIs(TYP_LONG));
// Update flags for op1 morph.
tree->gtFlags &= ~GTF_ALL_EFFECT;
// Only update with op1 as op2 is a constant.
tree->gtFlags |= (op1->gtFlags & GTF_ALL_EFFECT);
// If op1 is a constant, then do constant folding of the division operator.
if (op1->OperIs(GT_CNS_NATIVELONG))
{
tree = gtFoldExpr(tree);
}
if (!tree->OperIsConst())
{
tree->AsOp()->CheckDivideByConstOptimized(this);
}
return tree;
}
}
#endif // TARGET_XARCH
ASSIGN_HELPER_FOR_MOD:
// For "val % 1", return 0 if op1 doesn't have any side effects
// and we are not in the CSE phase, we cannot discard 'tree'
// because it may contain CSE expressions that we haven't yet examined.
//
if (((op1->gtFlags & GTF_SIDE_EFFECT) == 0) && !optValnumCSE_phase)
{
if (op2->IsIntegralConst(1))
{
GenTree* zeroNode = gtNewZeroConNode(typ);
#ifdef DEBUG
zeroNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
DEBUG_DESTROY_NODE(tree);
return zeroNode;
}
}
#ifndef TARGET_64BIT
if (typ == TYP_LONG)
{
helper = (oper == GT_UMOD) ? CORINFO_HELP_ULMOD : CORINFO_HELP_LMOD;
goto USE_HELPER_FOR_ARITH;
}
#if USE_HELPERS_FOR_INT_DIV
if (typ == TYP_INT)
{
if (oper == GT_UMOD)
{
helper = CORINFO_HELP_UMOD;
goto USE_HELPER_FOR_ARITH;
}
else if (oper == GT_MOD)
{
helper = CORINFO_HELP_MOD;
goto USE_HELPER_FOR_ARITH;
}
}
#endif
#endif // !TARGET_64BIT
if (!optValnumCSE_phase)
{
#ifdef TARGET_ARM64
if (tree->OperIs(GT_UMOD) && op2->IsIntegralConstUnsignedPow2())
{
// Transformation: a % b = a & (b - 1);
tree = fgMorphUModToAndSub(tree->AsOp());
op1 = tree->AsOp()->gtOp1;
op2 = tree->AsOp()->gtOp2;
}
// ARM64 architecture manual suggests this transformation
// for the mod operator.
// However, we do skip this optimization for ARM64 if the second operand
// is an integral constant power of 2 because there is an even better
// optimization in lowering that is specific for ARM64.
else if (!(tree->OperIs(GT_MOD) && op2->IsIntegralConstPow2()))
#else
// XARCH only applies this transformation if we know
// that magic division will be used - which is determined
// when 'b' is not a power of 2 constant and mod operator is signed.
// Lowering for XARCH does this optimization already,
// but is also done here to take advantage of CSE.
if (tree->OperIs(GT_MOD) && op2->IsIntegralConst() && !op2->IsIntegralConstAbsPow2())
#endif
{
// Transformation: a % b = a - (a / b) * b;
tree = fgMorphModToSubMulDiv(tree->AsOp());
op1 = tree->AsOp()->gtOp1;
op2 = tree->AsOp()->gtOp2;
}
}
break;
USE_HELPER_FOR_ARITH:
{
// TODO: this comment is wrong now, do an appropriate fix.
/* We have to morph these arithmetic operations into helper calls
before morphing the arguments (preorder), else the arguments
won't get correct values of fgPtrArgCntCur.
However, try to fold the tree first in case we end up with a
simple node which won't need a helper call at all */
noway_assert(tree->OperIsBinary());
GenTree* oldTree = tree;
tree = gtFoldExpr(tree);
// Were we able to fold it ?
// Note that gtFoldExpr may return a non-leaf even if successful
// e.g. for something like "expr / 1" - see also bug #290853
if (tree->OperIsLeaf() || (oldTree != tree))
{
return (oldTree != tree) ? fgMorphTree(tree) : fgMorphLeaf(tree);
}
// Did we fold it into a comma node with throw?
if (tree->gtOper == GT_COMMA)
{
noway_assert(fgIsCommaThrow(tree));
return fgMorphTree(tree);
}
}
return fgMorphIntoHelperCall(tree, helper, gtNewCallArgs(op1, op2));
case GT_RETURN:
if (!tree->TypeIs(TYP_VOID))
{
if (op1->OperIs(GT_OBJ, GT_BLK, GT_IND))
{
op1 = fgMorphRetInd(tree->AsUnOp());
}
if (op1->OperIs(GT_LCL_VAR))
{
// With a `genReturnBB` this `RETURN(src)` tree will be replaced by a `ASG(genReturnLocal, src)`
// and `ASG` will be tranformed into field by field copy without parent local referencing if
// possible.
GenTreeLclVar* lclVar = op1->AsLclVar();
unsigned lclNum = lclVar->GetLclNum();
if ((genReturnLocal == BAD_VAR_NUM) || (genReturnLocal == lclNum))
{
LclVarDsc* varDsc = lvaGetDesc(lclVar);
if (varDsc->CanBeReplacedWithItsField(this))
{
// We can replace the struct with its only field and allow copy propagation to replace
// return value that was written as a field.
unsigned fieldLclNum = varDsc->lvFieldLclStart;
LclVarDsc* fieldDsc = lvaGetDesc(fieldLclNum);
JITDUMP("Replacing an independently promoted local var V%02u with its only field "
"V%02u for "
"the return [%06u]\n",
lclVar->GetLclNum(), fieldLclNum, dspTreeID(tree));
lclVar->SetLclNum(fieldLclNum);
lclVar->ChangeType(fieldDsc->lvType);
}
}
}
}
// normalize small integer return values
if (fgGlobalMorph && varTypeIsSmall(info.compRetType) && (op1 != nullptr) && !op1->TypeIs(TYP_VOID) &&
fgCastNeeded(op1, info.compRetType))
{
// Small-typed return values are normalized by the callee
op1 = gtNewCastNode(TYP_INT, op1, false, info.compRetType);
// Propagate GTF_COLON_COND
op1->gtFlags |= (tree->gtFlags & GTF_COLON_COND);
tree->AsOp()->gtOp1 = fgMorphTree(op1);
// Propagate side effect flags
tree->SetAllEffectsFlags(tree->AsOp()->gtGetOp1());
return tree;
}
break;
case GT_EQ:
case GT_NE:
{
GenTree* optimizedTree = gtFoldTypeCompare(tree);
if (optimizedTree != tree)
{
return fgMorphTree(optimizedTree);
}
// Pattern-matching optimization:
// (a % c) ==/!= 0
// for power-of-2 constant `c`
// =>
// a & (c - 1) ==/!= 0
// For integer `a`, even if negative.
if (opts.OptimizationEnabled() && !optValnumCSE_phase)
{
assert(tree->OperIs(GT_EQ, GT_NE));
if (op1->OperIs(GT_MOD) && varTypeIsIntegral(op1) && op2->IsIntegralConst(0))
{
GenTree* op1op2 = op1->AsOp()->gtOp2;
if (op1op2->IsCnsIntOrI())
{
const ssize_t modValue = op1op2->AsIntCon()->IconValue();
if (isPow2(modValue))
{
JITDUMP("\nTransforming:\n");
DISPTREE(tree);
op1->SetOper(GT_AND); // Change % => &
op1op2->AsIntConCommon()->SetIconValue(modValue - 1); // Change c => c - 1
fgUpdateConstTreeValueNumber(op1op2);
JITDUMP("\ninto:\n");
DISPTREE(tree);
}
}
}
}
}
FALLTHROUGH;
case GT_GT:
{
// Try and optimize nullable boxes feeding compares
GenTree* optimizedTree = gtFoldBoxNullable(tree);
if (optimizedTree->OperGet() != tree->OperGet())
{
return optimizedTree;
}
else
{
tree = optimizedTree;
}
op1 = tree->AsOp()->gtOp1;
op2 = tree->gtGetOp2IfPresent();
break;
}
case GT_RUNTIMELOOKUP:
return fgMorphTree(op1);
#ifdef TARGET_ARM
case GT_INTRINSIC:
if (tree->AsIntrinsic()->gtIntrinsicName == NI_System_Math_Round)
{
switch (tree->TypeGet())
{
case TYP_DOUBLE:
return fgMorphIntoHelperCall(tree, CORINFO_HELP_DBLROUND, gtNewCallArgs(op1));
case TYP_FLOAT:
return fgMorphIntoHelperCall(tree, CORINFO_HELP_FLTROUND, gtNewCallArgs(op1));
default:
unreached();
}
}
break;
#endif
case GT_PUTARG_TYPE:
return fgMorphTree(tree->AsUnOp()->gtGetOp1());
case GT_NULLCHECK:
{
op1 = tree->AsUnOp()->gtGetOp1();
if (op1->IsCall())
{
GenTreeCall* const call = op1->AsCall();
if (call->IsHelperCall() && s_helperCallProperties.NonNullReturn(eeGetHelperNum(call->gtCallMethHnd)))
{
JITDUMP("\nNULLCHECK on [%06u] will always succeed\n", dspTreeID(call));
// TODO: Can we also remove the call?
//
return fgMorphTree(call);
}
}
}
break;
default:
break;
}
if (opts.OptimizationEnabled() && fgGlobalMorph)
{
GenTree* morphed = fgMorphReduceAddOps(tree);
if (morphed != tree)
return fgMorphTree(morphed);
}
/*-------------------------------------------------------------------------
* Process the first operand, if any
*/
if (op1)
{
// If we are entering the "then" part of a Qmark-Colon we must
// save the state of the current copy assignment table
// so that we can restore this state when entering the "else" part
if (isQmarkColon)
{
noway_assert(optLocalAssertionProp);
if (optAssertionCount)
{
noway_assert(optAssertionCount <= optMaxAssertionCount); // else ALLOCA() is a bad idea
unsigned tabSize = optAssertionCount * sizeof(AssertionDsc);
origAssertionTab = (AssertionDsc*)ALLOCA(tabSize);
origAssertionCount = optAssertionCount;
memcpy(origAssertionTab, optAssertionTabPrivate, tabSize);
}
else
{
origAssertionCount = 0;
origAssertionTab = nullptr;
}
}
// We might need a new MorphAddressContext context. (These are used to convey
// parent context about how addresses being calculated will be used; see the
// specification comment for MorphAddrContext for full details.)
// Assume it's an Ind context to start.
MorphAddrContext subIndMac1(MACK_Ind);
MorphAddrContext* subMac1 = mac;
if (subMac1 == nullptr || subMac1->m_kind == MACK_Ind)
{
switch (tree->gtOper)
{
case GT_ADDR:
// A non-null mac here implies this node is part of an address computation.
// If so, we need to pass the existing mac down to the child node.
//
// Otherwise, use a new mac.
if (subMac1 == nullptr)
{
subMac1 = &subIndMac1;
subMac1->m_kind = MACK_Addr;
}
break;
case GT_COMMA:
// In a comma, the incoming context only applies to the rightmost arg of the
// comma list. The left arg (op1) gets a fresh context.
subMac1 = nullptr;
break;
case GT_OBJ:
case GT_BLK:
case GT_IND:
// A non-null mac here implies this node is part of an address computation (the tree parent is
// GT_ADDR).
// If so, we need to pass the existing mac down to the child node.
//
// Otherwise, use a new mac.
if (subMac1 == nullptr)
{
subMac1 = &subIndMac1;
}
break;
default:
break;
}
}
// For additions, if we're in an IND context keep track of whether
// all offsets added to the address are constant, and their sum.
if (tree->gtOper == GT_ADD && subMac1 != nullptr)
{
assert(subMac1->m_kind == MACK_Ind || subMac1->m_kind == MACK_Addr); // Can't be a CopyBlock.
GenTree* otherOp = tree->AsOp()->gtOp2;
// Is the other operator a constant?
if (otherOp->IsCnsIntOrI())
{
ClrSafeInt<size_t> totalOffset(subMac1->m_totalOffset);
totalOffset += otherOp->AsIntConCommon()->IconValue();
if (totalOffset.IsOverflow())
{
// We will consider an offset so large as to overflow as "not a constant" --
// we will do a null check.
subMac1->m_allConstantOffsets = false;
}
else
{
subMac1->m_totalOffset += otherOp->AsIntConCommon()->IconValue();
}
}
else
{
subMac1->m_allConstantOffsets = false;
}
}
// If op1 is a GT_FIELD or indir, we need to pass down the mac if
// its parent is GT_ADDR, since the address of op1
// is part of an ongoing address computation. Otherwise
// op1 represents the value of the field and so any address
// calculations it does are in a new context.
if (((op1->gtOper == GT_FIELD) || op1->OperIsIndir()) && (tree->gtOper != GT_ADDR))
{
subMac1 = nullptr;
// The impact of op1's value to any ongoing
// address computation is handled below when looking
// at op2.
}
tree->AsOp()->gtOp1 = op1 = fgMorphTree(op1, subMac1);
// If we are exiting the "then" part of a Qmark-Colon we must
// save the state of the current copy assignment table
// so that we can merge this state with the "else" part exit
if (isQmarkColon)
{
noway_assert(optLocalAssertionProp);
if (optAssertionCount)
{
noway_assert(optAssertionCount <= optMaxAssertionCount); // else ALLOCA() is a bad idea
unsigned tabSize = optAssertionCount * sizeof(AssertionDsc);
thenAssertionTab = (AssertionDsc*)ALLOCA(tabSize);
thenAssertionCount = optAssertionCount;
memcpy(thenAssertionTab, optAssertionTabPrivate, tabSize);
}
else
{
thenAssertionCount = 0;
thenAssertionTab = nullptr;
}
}
/* Morphing along with folding and inlining may have changed the
* side effect flags, so we have to reset them
*
* NOTE: Don't reset the exception flags on nodes that may throw */
assert(tree->gtOper != GT_CALL);
if (!tree->OperRequiresCallFlag(this))
{
tree->gtFlags &= ~GTF_CALL;
}
/* Propagate the new flags */
tree->gtFlags |= (op1->gtFlags & GTF_ALL_EFFECT);
// &aliasedVar doesn't need GTF_GLOB_REF, though alisasedVar does
// Similarly for clsVar
if (oper == GT_ADDR && (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CLS_VAR))
{
tree->gtFlags &= ~GTF_GLOB_REF;
}
} // if (op1)
/*-------------------------------------------------------------------------
* Process the second operand, if any
*/
if (op2)
{
// If we are entering the "else" part of a Qmark-Colon we must
// reset the state of the current copy assignment table
if (isQmarkColon)
{
noway_assert(optLocalAssertionProp);
optAssertionReset(0);
if (origAssertionCount)
{
size_t tabSize = origAssertionCount * sizeof(AssertionDsc);
memcpy(optAssertionTabPrivate, origAssertionTab, tabSize);
optAssertionReset(origAssertionCount);
}
}
// We might need a new MorphAddressContext context to use in evaluating op2.
// (These are used to convey parent context about how addresses being calculated
// will be used; see the specification comment for MorphAddrContext for full details.)
// Assume it's an Ind context to start.
switch (tree->gtOper)
{
case GT_ADD:
if (mac != nullptr && mac->m_kind == MACK_Ind)
{
GenTree* otherOp = tree->AsOp()->gtOp1;
// Is the other operator a constant?
if (otherOp->IsCnsIntOrI())
{
mac->m_totalOffset += otherOp->AsIntConCommon()->IconValue();
}
else
{
mac->m_allConstantOffsets = false;
}
}
break;
default:
break;
}
// If op2 is a GT_FIELD or indir, we must be taking its value,
// so it should evaluate its address in a new context.
if ((op2->gtOper == GT_FIELD) || op2->OperIsIndir())
{
// The impact of op2's value to any ongoing
// address computation is handled above when looking
// at op1.
mac = nullptr;
}
tree->AsOp()->gtOp2 = op2 = fgMorphTree(op2, mac);
/* Propagate the side effect flags from op2 */
tree->gtFlags |= (op2->gtFlags & GTF_ALL_EFFECT);
// If we are exiting the "else" part of a Qmark-Colon we must
// merge the state of the current copy assignment table with
// that of the exit of the "then" part.
if (isQmarkColon)
{
noway_assert(optLocalAssertionProp);
// If either exit table has zero entries then
// the merged table also has zero entries
if (optAssertionCount == 0 || thenAssertionCount == 0)
{
optAssertionReset(0);
}
else
{
size_t tabSize = optAssertionCount * sizeof(AssertionDsc);
if ((optAssertionCount != thenAssertionCount) ||
(memcmp(thenAssertionTab, optAssertionTabPrivate, tabSize) != 0))
{
// Yes they are different so we have to find the merged set
// Iterate over the copy asgn table removing any entries
// that do not have an exact match in the thenAssertionTab
AssertionIndex index = 1;
while (index <= optAssertionCount)
{
AssertionDsc* curAssertion = optGetAssertion(index);
for (unsigned j = 0; j < thenAssertionCount; j++)
{
AssertionDsc* thenAssertion = &thenAssertionTab[j];
// Do the left sides match?
if ((curAssertion->op1.lcl.lclNum == thenAssertion->op1.lcl.lclNum) &&
(curAssertion->assertionKind == thenAssertion->assertionKind))
{
// Do the right sides match?
if ((curAssertion->op2.kind == thenAssertion->op2.kind) &&
(curAssertion->op2.lconVal == thenAssertion->op2.lconVal))
{
goto KEEP;
}
else
{
goto REMOVE;
}
}
}
//
// If we fall out of the loop above then we didn't find
// any matching entry in the thenAssertionTab so it must
// have been killed on that path so we remove it here
//
REMOVE:
// The data at optAssertionTabPrivate[i] is to be removed
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
if (verbose)
{
printf("The QMARK-COLON ");
printTreeID(tree);
printf(" removes assertion candidate #%d\n", index);
}
#endif
optAssertionRemove(index);
continue;
KEEP:
// The data at optAssertionTabPrivate[i] is to be kept
index++;
}
}
}
}
} // if (op2)
#ifndef TARGET_64BIT
DONE_MORPHING_CHILDREN:
#endif // !TARGET_64BIT
if (tree->OperIsIndirOrArrLength())
{
tree->SetIndirExceptionFlags(this);
}
else
{
if (tree->OperMayThrow(this))
{
// Mark the tree node as potentially throwing an exception
tree->gtFlags |= GTF_EXCEPT;
}
else
{
if (((op1 == nullptr) || ((op1->gtFlags & GTF_EXCEPT) == 0)) &&
((op2 == nullptr) || ((op2->gtFlags & GTF_EXCEPT) == 0)))
{
tree->gtFlags &= ~GTF_EXCEPT;
}
}
}
if (tree->OperRequiresAsgFlag())
{
tree->gtFlags |= GTF_ASG;
}
else
{
if (((op1 == nullptr) || ((op1->gtFlags & GTF_ASG) == 0)) &&
((op2 == nullptr) || ((op2->gtFlags & GTF_ASG) == 0)))
{
tree->gtFlags &= ~GTF_ASG;
}
}
if (tree->OperRequiresCallFlag(this))
{
tree->gtFlags |= GTF_CALL;
}
else
{
if (((op1 == nullptr) || ((op1->gtFlags & GTF_CALL) == 0)) &&
((op2 == nullptr) || ((op2->gtFlags & GTF_CALL) == 0)))
{
tree->gtFlags &= ~GTF_CALL;
}
}
/*-------------------------------------------------------------------------
* Now do POST-ORDER processing
*/
if (varTypeIsGC(tree->TypeGet()) && (op1 && !varTypeIsGC(op1->TypeGet())) && (op2 && !varTypeIsGC(op2->TypeGet())))
{
// The tree is really not GC but was marked as such. Now that the
// children have been unmarked, unmark the tree too.
// Remember that GT_COMMA inherits it's type only from op2
if (tree->gtOper == GT_COMMA)
{
tree->gtType = genActualType(op2->TypeGet());
}
else
{
tree->gtType = genActualType(op1->TypeGet());
}
}
GenTree* oldTree = tree;
GenTree* qmarkOp1 = nullptr;
GenTree* qmarkOp2 = nullptr;
if ((tree->OperGet() == GT_QMARK) && (tree->AsOp()->gtOp2->OperGet() == GT_COLON))
{
qmarkOp1 = oldTree->AsOp()->gtOp2->AsOp()->gtOp1;
qmarkOp2 = oldTree->AsOp()->gtOp2->AsOp()->gtOp2;
}
// Try to fold it, maybe we get lucky,
tree = gtFoldExpr(tree);
if (oldTree != tree)
{
/* if gtFoldExpr returned op1 or op2 then we are done */
if ((tree == op1) || (tree == op2) || (tree == qmarkOp1) || (tree == qmarkOp2))
{
return tree;
}
/* If we created a comma-throw tree then we need to morph op1 */
if (fgIsCommaThrow(tree))
{
tree->AsOp()->gtOp1 = fgMorphTree(tree->AsOp()->gtOp1);
fgMorphTreeDone(tree);
return tree;
}
return tree;
}
else if (tree->OperIsConst())
{
return tree;
}
/* gtFoldExpr could have used setOper to change the oper */
oper = tree->OperGet();
typ = tree->TypeGet();
/* gtFoldExpr could have changed op1 and op2 */
op1 = tree->AsOp()->gtOp1;
op2 = tree->gtGetOp2IfPresent();
// Do we have an integer compare operation?
//
if (tree->OperIsCompare() && varTypeIsIntegralOrI(tree->TypeGet()))
{
// Are we comparing against zero?
//
if (op2->IsIntegralConst(0))
{
// Request that the codegen for op1 sets the condition flags
// when it generates the code for op1.
//
// Codegen for op1 must set the condition flags if
// this method returns true.
//
op1->gtRequestSetFlags();
}
}
/*-------------------------------------------------------------------------
* Perform the required oper-specific postorder morphing
*/
GenTree* temp;
size_t ival1;
GenTree* lclVarTree;
GenTree* effectiveOp1;
FieldSeqNode* fieldSeq = nullptr;
switch (oper)
{
case GT_ASG:
if (op1->OperIs(GT_LCL_VAR) && ((op1->gtFlags & GTF_VAR_FOLDED_IND) != 0))
{
op1->gtFlags &= ~GTF_VAR_FOLDED_IND;
tree = fgDoNormalizeOnStore(tree);
op2 = tree->gtGetOp2();
}
lclVarTree = fgIsIndirOfAddrOfLocal(op1);
if (lclVarTree != nullptr)
{
lclVarTree->gtFlags |= GTF_VAR_DEF;
}
effectiveOp1 = op1->gtEffectiveVal();
// If we are storing a small type, we might be able to omit a cast.
if (effectiveOp1->OperIs(GT_IND, GT_CLS_VAR) && varTypeIsSmall(effectiveOp1))
{
if (!gtIsActiveCSE_Candidate(op2) && op2->OperIs(GT_CAST) &&
varTypeIsIntegral(op2->AsCast()->CastOp()) && !op2->gtOverflow())
{
var_types castType = op2->CastToType();
// If we are performing a narrowing cast and
// castType is larger or the same as op1's type
// then we can discard the cast.
if (varTypeIsSmall(castType) && (genTypeSize(castType) >= genTypeSize(effectiveOp1)))
{
tree->AsOp()->gtOp2 = op2 = op2->AsCast()->CastOp();
}
}
}
fgAssignSetVarDef(tree);
/* We can't CSE the LHS of an assignment */
/* We also must set in the pre-morphing phase, otherwise assertionProp doesn't see it */
if (op1->IsLocal() || (op1->TypeGet() != TYP_STRUCT))
{
op1->gtFlags |= GTF_DONT_CSE;
}
break;
case GT_CAST:
tree = fgOptimizeCast(tree->AsCast());
if (!tree->OperIsSimple())
{
return tree;
}
if (tree->OperIs(GT_CAST) && tree->gtOverflow())
{
fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_OVERFLOW);
}
typ = tree->TypeGet();
oper = tree->OperGet();
op1 = tree->AsOp()->gtGetOp1();
op2 = tree->gtGetOp2IfPresent();
break;
case GT_EQ:
case GT_NE:
// It is not safe to reorder/delete CSE's
if (!optValnumCSE_phase && op2->IsIntegralConst())
{
tree = fgOptimizeEqualityComparisonWithConst(tree->AsOp());
assert(tree->OperIsCompare());
oper = tree->OperGet();
op1 = tree->gtGetOp1();
op2 = tree->gtGetOp2();
}
goto COMPARE;
case GT_LT:
case GT_LE:
case GT_GE:
case GT_GT:
if (!optValnumCSE_phase && (op1->OperIs(GT_CAST) || op2->OperIs(GT_CAST)))
{
tree = fgOptimizeRelationalComparisonWithCasts(tree->AsOp());
oper = tree->OperGet();
op1 = tree->gtGetOp1();
op2 = tree->gtGetOp2();
}
// op2's value may be changed, so it cannot be a CSE candidate.
if (op2->IsIntegralConst() && !gtIsActiveCSE_Candidate(op2))
{
tree = fgOptimizeRelationalComparisonWithConst(tree->AsOp());
oper = tree->OperGet();
assert(op1 == tree->AsOp()->gtGetOp1());
assert(op2 == tree->AsOp()->gtGetOp2());
}
COMPARE:
noway_assert(tree->OperIsCompare());
break;
case GT_MUL:
#ifndef TARGET_64BIT
if (typ == TYP_LONG)
{
// This must be GTF_MUL_64RSLT
INDEBUG(tree->AsOp()->DebugCheckLongMul());
return tree;
}
#endif // TARGET_64BIT
goto CM_OVF_OP;
case GT_SUB:
if (tree->gtOverflow())
{
goto CM_OVF_OP;
}
// TODO #4104: there are a lot of other places where
// this condition is not checked before transformations.
if (fgGlobalMorph)
{
/* Check for "op1 - cns2" , we change it to "op1 + (-cns2)" */
noway_assert(op2);
if (op2->IsCnsIntOrI() && !op2->IsIconHandle())
{
// Negate the constant and change the node to be "+",
// except when `op2` is a const byref.
op2->AsIntConCommon()->SetIconValue(-op2->AsIntConCommon()->IconValue());
op2->AsIntConRef().gtFieldSeq = FieldSeqStore::NotAField();
oper = GT_ADD;
tree->ChangeOper(oper);
goto CM_ADD_OP;
}
/* Check for "cns1 - op2" , we change it to "(cns1 + (-op2))" */
noway_assert(op1);
if (op1->IsCnsIntOrI())
{
noway_assert(varTypeIsIntOrI(tree));
// The type of the new GT_NEG node cannot just be op2->TypeGet().
// Otherwise we may sign-extend incorrectly in cases where the GT_NEG
// node ends up feeding directly into a cast, for example in
// GT_CAST<ubyte>(GT_SUB(0, s_1.ubyte))
tree->AsOp()->gtOp2 = op2 = gtNewOperNode(GT_NEG, genActualType(op2->TypeGet()), op2);
fgMorphTreeDone(op2);
oper = GT_ADD;
tree->ChangeOper(oper);
goto CM_ADD_OP;
}
/* No match - exit */
}
// Skip optimization if non-NEG operand is constant.
// Both op1 and op2 are not constant because it was already checked above.
if (opts.OptimizationEnabled() && fgGlobalMorph)
{
// a - -b = > a + b
// SUB(a, (NEG(b)) => ADD(a, b)
if (!op1->OperIs(GT_NEG) && op2->OperIs(GT_NEG))
{
// tree: SUB
// op1: a
// op2: NEG
// op2Child: b
GenTree* op2Child = op2->AsOp()->gtOp1; // b
oper = GT_ADD;
tree->SetOper(oper, GenTree::PRESERVE_VN);
tree->AsOp()->gtOp2 = op2Child;
DEBUG_DESTROY_NODE(op2);
op2 = op2Child;
}
// -a - -b = > b - a
// SUB(NEG(a), (NEG(b)) => SUB(b, a)
else if (op1->OperIs(GT_NEG) && op2->OperIs(GT_NEG) && gtCanSwapOrder(op1, op2))
{
// tree: SUB
// op1: NEG
// op1Child: a
// op2: NEG
// op2Child: b
GenTree* op1Child = op1->AsOp()->gtOp1; // a
GenTree* op2Child = op2->AsOp()->gtOp1; // b
tree->AsOp()->gtOp1 = op2Child;
tree->AsOp()->gtOp2 = op1Child;
DEBUG_DESTROY_NODE(op1);
DEBUG_DESTROY_NODE(op2);
op1 = op2Child;
op2 = op1Child;
}
}
break;
#if defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)
case GT_DIV:
#ifdef TARGET_LOONGARCH64
case GT_MOD:
#endif
if (!varTypeIsFloating(tree->gtType))
{
// Codegen for this instruction needs to be able to throw two exceptions:
fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_OVERFLOW);
fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_DIV_BY_ZERO);
}
break;
case GT_UDIV:
#ifdef TARGET_LOONGARCH64
case GT_UMOD:
#endif
// Codegen for this instruction needs to be able to throw one exception:
fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_DIV_BY_ZERO);
break;
#endif // defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)
case GT_ADD:
CM_OVF_OP:
if (tree->gtOverflow())
{
tree->gtRequestSetFlags();
// Add the excptn-throwing basic block to jump to on overflow
fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_OVERFLOW);
// We can't do any commutative morphing for overflow instructions
break;
}
CM_ADD_OP:
FALLTHROUGH;
case GT_OR:
case GT_XOR:
case GT_AND:
tree = fgOptimizeCommutativeArithmetic(tree->AsOp());
if (!tree->OperIsSimple())
{
return tree;
}
typ = tree->TypeGet();
oper = tree->OperGet();
op1 = tree->gtGetOp1();
op2 = tree->gtGetOp2IfPresent();
break;
case GT_NOT:
case GT_NEG:
// Remove double negation/not.
// Note: this is not a safe tranformation if "tree" is a CSE candidate.
// Consider for example the following expression: NEG(NEG(OP)), where any
// NEG is a CSE candidate. Were we to morph this to just OP, CSE would fail to find
// the original NEG in the statement.
if (op1->OperIs(oper) && opts.OptimizationEnabled() && !gtIsActiveCSE_Candidate(tree) &&
!gtIsActiveCSE_Candidate(op1))
{
JITDUMP("Remove double negation/not\n")
GenTree* op1op1 = op1->gtGetOp1();
DEBUG_DESTROY_NODE(tree);
DEBUG_DESTROY_NODE(op1);
return op1op1;
}
// Distribute negation over simple multiplication/division expressions
if (opts.OptimizationEnabled() && !optValnumCSE_phase && tree->OperIs(GT_NEG) &&
op1->OperIs(GT_MUL, GT_DIV))
{
GenTreeOp* mulOrDiv = op1->AsOp();
GenTree* op1op1 = mulOrDiv->gtGetOp1();
GenTree* op1op2 = mulOrDiv->gtGetOp2();
if (!op1op1->IsCnsIntOrI() && op1op2->IsCnsIntOrI() && !op1op2->IsIconHandle())
{
// NEG(MUL(a, C)) => MUL(a, -C)
// NEG(DIV(a, C)) => DIV(a, -C), except when C = {-1, 1}
ssize_t constVal = op1op2->AsIntCon()->IconValue();
if ((mulOrDiv->OperIs(GT_DIV) && (constVal != -1) && (constVal != 1)) ||
(mulOrDiv->OperIs(GT_MUL) && !mulOrDiv->gtOverflow()))
{
GenTree* newOp1 = op1op1; // a
GenTree* newOp2 = gtNewIconNode(-constVal, op1op2->TypeGet()); // -C
mulOrDiv->gtOp1 = newOp1;
mulOrDiv->gtOp2 = newOp2;
mulOrDiv->SetVNsFromNode(tree);
DEBUG_DESTROY_NODE(tree);
DEBUG_DESTROY_NODE(op1op2);
return mulOrDiv;
}
}
}
/* Any constant cases should have been folded earlier */
noway_assert(!op1->OperIsConst() || !opts.OptEnabled(CLFLG_CONSTANTFOLD) || optValnumCSE_phase);
break;
case GT_CKFINITE:
noway_assert(varTypeIsFloating(op1->TypeGet()));
fgAddCodeRef(compCurBB, bbThrowIndex(compCurBB), SCK_ARITH_EXCPN);
break;
case GT_BOUNDS_CHECK:
fgSetRngChkTarget(tree);
break;
case GT_OBJ:
case GT_BLK:
case GT_IND:
{
// If we have IND(ADDR(X)) and X has GTF_GLOB_REF, we must set GTF_GLOB_REF on
// the OBJ. Note that the GTF_GLOB_REF will have been cleared on ADDR(X) where X
// is a local or CLS_VAR, even if it has been address-exposed.
if (op1->OperIs(GT_ADDR))
{
tree->gtFlags |= (op1->AsUnOp()->gtGetOp1()->gtFlags & GTF_GLOB_REF);
}
if (!tree->OperIs(GT_IND))
{
break;
}
// Can not remove a GT_IND if it is currently a CSE candidate.
if (gtIsActiveCSE_Candidate(tree))
{
break;
}
bool foldAndReturnTemp = false;
temp = nullptr;
ival1 = 0;
// Don't remove a volatile GT_IND, even if the address points to a local variable.
if ((tree->gtFlags & GTF_IND_VOLATILE) == 0)
{
/* Try to Fold *(&X) into X */
if (op1->gtOper == GT_ADDR)
{
// Can not remove a GT_ADDR if it is currently a CSE candidate.
if (gtIsActiveCSE_Candidate(op1))
{
break;
}
temp = op1->AsOp()->gtOp1; // X
// In the test below, if they're both TYP_STRUCT, this of course does *not* mean that
// they are the *same* struct type. In fact, they almost certainly aren't. If the
// address has an associated field sequence, that identifies this case; go through
// the "lcl_fld" path rather than this one.
FieldSeqNode* addrFieldSeq = nullptr; // This is an unused out parameter below.
if (typ == temp->TypeGet() && !GetZeroOffsetFieldMap()->Lookup(op1, &addrFieldSeq))
{
foldAndReturnTemp = true;
}
else if (temp->OperIsLocal())
{
unsigned lclNum = temp->AsLclVarCommon()->GetLclNum();
LclVarDsc* varDsc = lvaGetDesc(lclNum);
// We will try to optimize when we have a promoted struct promoted with a zero lvFldOffset
if (varDsc->lvPromoted && (varDsc->lvFldOffset == 0))
{
noway_assert(varTypeIsStruct(varDsc));
// We will try to optimize when we have a single field struct that is being struct promoted
if (varDsc->lvFieldCnt == 1)
{
unsigned lclNumFld = varDsc->lvFieldLclStart;
// just grab the promoted field
LclVarDsc* fieldVarDsc = lvaGetDesc(lclNumFld);
// Also make sure that the tree type matches the fieldVarType and that it's lvFldOffset
// is zero
if (fieldVarDsc->TypeGet() == typ && (fieldVarDsc->lvFldOffset == 0))
{
// We can just use the existing promoted field LclNum
temp->AsLclVarCommon()->SetLclNum(lclNumFld);
temp->gtType = fieldVarDsc->TypeGet();
foldAndReturnTemp = true;
}
}
}
// If the type of the IND (typ) is a "small int", and the type of the local has the
// same width, then we can reduce to just the local variable -- it will be
// correctly normalized.
//
// The below transformation cannot be applied if the local var needs to be normalized on load.
else if (varTypeIsSmall(typ) && (genTypeSize(varDsc) == genTypeSize(typ)) &&
!lvaTable[lclNum].lvNormalizeOnLoad())
{
const bool definitelyLoad = (tree->gtFlags & GTF_DONT_CSE) == 0;
const bool possiblyStore = !definitelyLoad;
if (possiblyStore || (varTypeIsUnsigned(varDsc) == varTypeIsUnsigned(typ)))
{
typ = temp->TypeGet();
tree->gtType = typ;
foldAndReturnTemp = true;
if (possiblyStore)
{
// This node can be on the left-hand-side of an assignment node.
// Mark this node with GTF_VAR_FOLDED_IND to make sure that fgDoNormalizeOnStore()
// is called on its parent in post-order morph.
temp->gtFlags |= GTF_VAR_FOLDED_IND;
}
}
}
// For matching types we can fold
else if (!varTypeIsStruct(typ) && (lvaTable[lclNum].lvType == typ) &&
!lvaTable[lclNum].lvNormalizeOnLoad())
{
tree->gtType = typ = temp->TypeGet();
foldAndReturnTemp = true;
}
else
{
// Assumes that when Lookup returns "false" it will leave "fieldSeq" unmodified (i.e.
// nullptr)
assert(fieldSeq == nullptr);
bool b = GetZeroOffsetFieldMap()->Lookup(op1, &fieldSeq);
assert(b || fieldSeq == nullptr);
if ((fieldSeq != nullptr) && (temp->OperGet() == GT_LCL_FLD))
{
// Append the field sequence, change the type.
temp->AsLclFld()->SetFieldSeq(
GetFieldSeqStore()->Append(temp->AsLclFld()->GetFieldSeq(), fieldSeq));
temp->gtType = typ;
foldAndReturnTemp = true;
}
}
// Otherwise will will fold this into a GT_LCL_FLD below
// where we check (temp != nullptr)
}
else // !temp->OperIsLocal()
{
// We don't try to fold away the GT_IND/GT_ADDR for this case
temp = nullptr;
}
}
else if (op1->OperGet() == GT_ADD)
{
#ifdef TARGET_ARM
// Check for a misalignment floating point indirection.
if (varTypeIsFloating(typ))
{
GenTree* addOp2 = op1->AsOp()->gtGetOp2();
if (addOp2->IsCnsIntOrI())
{
ssize_t offset = addOp2->AsIntCon()->gtIconVal;
if ((offset % emitTypeSize(TYP_FLOAT)) != 0)
{
tree->gtFlags |= GTF_IND_UNALIGNED;
}
}
}
#endif // TARGET_ARM
/* Try to change *(&lcl + cns) into lcl[cns] to prevent materialization of &lcl */
if (op1->AsOp()->gtOp1->OperGet() == GT_ADDR && op1->AsOp()->gtOp2->OperGet() == GT_CNS_INT &&
opts.OptimizationEnabled())
{
// No overflow arithmetic with pointers
noway_assert(!op1->gtOverflow());
temp = op1->AsOp()->gtOp1->AsOp()->gtOp1;
if (!temp->OperIsLocal())
{
temp = nullptr;
break;
}
// Can not remove the GT_ADDR if it is currently a CSE candidate.
if (gtIsActiveCSE_Candidate(op1->AsOp()->gtOp1))
{
break;
}
ival1 = op1->AsOp()->gtOp2->AsIntCon()->gtIconVal;
fieldSeq = op1->AsOp()->gtOp2->AsIntCon()->gtFieldSeq;
// Does the address have an associated zero-offset field sequence?
FieldSeqNode* addrFieldSeq = nullptr;
if (GetZeroOffsetFieldMap()->Lookup(op1->AsOp()->gtOp1, &addrFieldSeq))
{
fieldSeq = GetFieldSeqStore()->Append(addrFieldSeq, fieldSeq);
}
if (ival1 == 0 && typ == temp->TypeGet() && temp->TypeGet() != TYP_STRUCT)
{
noway_assert(!varTypeIsGC(temp->TypeGet()));
foldAndReturnTemp = true;
}
else
{
// The emitter can't handle large offsets
if (ival1 != (unsigned short)ival1)
{
break;
}
// The emitter can get confused by invalid offsets
if (ival1 >= Compiler::lvaLclSize(temp->AsLclVarCommon()->GetLclNum()))
{
break;
}
}
// Now we can fold this into a GT_LCL_FLD below
// where we check (temp != nullptr)
}
}
}
// At this point we may have a lclVar or lclFld that might be foldable with a bit of extra massaging:
// - We may have a load of a local where the load has a different type than the local
// - We may have a load of a local plus an offset
//
// In these cases, we will change the lclVar or lclFld into a lclFld of the appropriate type and
// offset if doing so is legal. The only cases in which this transformation is illegal are if the load
// begins before the local or if the load extends beyond the end of the local (i.e. if the load is
// out-of-bounds w.r.t. the local).
if ((temp != nullptr) && !foldAndReturnTemp)
{
assert(temp->OperIsLocal());
const unsigned lclNum = temp->AsLclVarCommon()->GetLclNum();
LclVarDsc* const varDsc = lvaGetDesc(lclNum);
const var_types tempTyp = temp->TypeGet();
const bool useExactSize = varTypeIsStruct(tempTyp) || (tempTyp == TYP_BLK) || (tempTyp == TYP_LCLBLK);
const unsigned varSize = useExactSize ? varDsc->lvExactSize : genTypeSize(temp);
// Make sure we do not enregister this lclVar.
lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::LocalField));
// If the size of the load is greater than the size of the lclVar, we cannot fold this access into
// a lclFld: the access represented by an lclFld node must begin at or after the start of the
// lclVar and must not extend beyond the end of the lclVar.
if ((ival1 >= 0) && ((ival1 + genTypeSize(typ)) <= varSize))
{
GenTreeLclFld* lclFld;
// We will turn a GT_LCL_VAR into a GT_LCL_FLD with an gtLclOffs of 'ival'
// or if we already have a GT_LCL_FLD we will adjust the gtLclOffs by adding 'ival'
// Then we change the type of the GT_LCL_FLD to match the orginal GT_IND type.
//
if (temp->OperGet() == GT_LCL_FLD)
{
lclFld = temp->AsLclFld();
lclFld->SetLclOffs(lclFld->GetLclOffs() + static_cast<unsigned>(ival1));
lclFld->SetFieldSeq(GetFieldSeqStore()->Append(lclFld->GetFieldSeq(), fieldSeq));
}
else // We have a GT_LCL_VAR.
{
assert(temp->OperGet() == GT_LCL_VAR);
temp->ChangeOper(GT_LCL_FLD); // Note that this makes the gtFieldSeq "NotAField".
lclFld = temp->AsLclFld();
lclFld->SetLclOffs(static_cast<unsigned>(ival1));
if (fieldSeq != nullptr)
{
// If it does represent a field, note that.
lclFld->SetFieldSeq(fieldSeq);
}
}
temp->gtType = tree->gtType;
foldAndReturnTemp = true;
}
}
if (foldAndReturnTemp)
{
assert(temp != nullptr);
assert(temp->TypeGet() == typ);
assert((op1->OperGet() == GT_ADD) || (op1->OperGet() == GT_ADDR));
// Copy the value of GTF_DONT_CSE from the original tree to `temp`: it can be set for
// 'temp' because a GT_ADDR always marks it for its operand.
temp->gtFlags &= ~GTF_DONT_CSE;
temp->gtFlags |= (tree->gtFlags & GTF_DONT_CSE);
if (op1->OperGet() == GT_ADD)
{
DEBUG_DESTROY_NODE(op1->AsOp()->gtOp1); // GT_ADDR
DEBUG_DESTROY_NODE(op1->AsOp()->gtOp2); // GT_CNS_INT
}
DEBUG_DESTROY_NODE(op1); // GT_ADD or GT_ADDR
DEBUG_DESTROY_NODE(tree); // GT_IND
// If the result of the fold is a local var, we may need to perform further adjustments e.g. for
// normalization.
if (temp->OperIs(GT_LCL_VAR))
{
#ifdef DEBUG
// We clear this flag on `temp` because `fgMorphLocalVar` may assert that this bit is clear
// and the node in question must have this bit set (as it has already been morphed).
temp->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED;
#endif // DEBUG
const bool forceRemorph = true;
temp = fgMorphLocalVar(temp, forceRemorph);
#ifdef DEBUG
// We then set this flag on `temp` because `fgMorhpLocalVar` may not set it itself, and the
// caller of `fgMorphSmpOp` may assert that this flag is set on `temp` once this function
// returns.
temp->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif // DEBUG
}
return temp;
}
// Only do this optimization when we are in the global optimizer. Doing this after value numbering
// could result in an invalid value number for the newly generated GT_IND node.
if ((op1->OperGet() == GT_COMMA) && fgGlobalMorph)
{
// Perform the transform IND(COMMA(x, ..., z)) == COMMA(x, ..., IND(z)).
// TBD: this transformation is currently necessary for correctness -- it might
// be good to analyze the failures that result if we don't do this, and fix them
// in other ways. Ideally, this should be optional.
GenTree* commaNode = op1;
GenTreeFlags treeFlags = tree->gtFlags;
commaNode->gtType = typ;
commaNode->gtFlags = (treeFlags & ~GTF_REVERSE_OPS); // Bashing the GT_COMMA flags here is
// dangerous, clear the GTF_REVERSE_OPS at
// least.
#ifdef DEBUG
commaNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
while (commaNode->AsOp()->gtOp2->gtOper == GT_COMMA)
{
commaNode = commaNode->AsOp()->gtOp2;
commaNode->gtType = typ;
commaNode->gtFlags =
(treeFlags & ~GTF_REVERSE_OPS & ~GTF_ASG & ~GTF_CALL); // Bashing the GT_COMMA flags here is
// dangerous, clear the GTF_REVERSE_OPS, GT_ASG, and GT_CALL at
// least.
commaNode->gtFlags |= ((commaNode->AsOp()->gtOp1->gtFlags | commaNode->AsOp()->gtOp2->gtFlags) &
(GTF_ASG | GTF_CALL));
#ifdef DEBUG
commaNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
}
tree = op1;
GenTree* addr = commaNode->AsOp()->gtOp2;
// TODO-1stClassStructs: we often create a struct IND without a handle, fix it.
op1 = gtNewIndir(typ, addr);
// This is very conservative
op1->gtFlags |= treeFlags & ~GTF_ALL_EFFECT & ~GTF_IND_NONFAULTING;
op1->gtFlags |= (addr->gtFlags & GTF_ALL_EFFECT);
#ifdef DEBUG
op1->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
commaNode->AsOp()->gtOp2 = op1;
commaNode->gtFlags |= (op1->gtFlags & GTF_ALL_EFFECT);
return tree;
}
break;
}
case GT_ADDR:
// Can not remove op1 if it is currently a CSE candidate.
if (gtIsActiveCSE_Candidate(op1))
{
break;
}
if (op1->OperGet() == GT_IND)
{
// Can not remove a GT_ADDR if it is currently a CSE candidate.
if (gtIsActiveCSE_Candidate(tree))
{
break;
}
// Perform the transform ADDR(IND(...)) == (...).
GenTree* addr = op1->AsOp()->gtOp1;
// If tree has a zero field sequence annotation, update the annotation
// on addr node.
FieldSeqNode* zeroFieldSeq = nullptr;
if (GetZeroOffsetFieldMap()->Lookup(tree, &zeroFieldSeq))
{
fgAddFieldSeqForZeroOffset(addr, zeroFieldSeq);
}
noway_assert(varTypeIsGC(addr->gtType) || addr->gtType == TYP_I_IMPL);
DEBUG_DESTROY_NODE(op1);
DEBUG_DESTROY_NODE(tree);
return addr;
}
else if (op1->OperGet() == GT_OBJ)
{
// Can not remove a GT_ADDR if it is currently a CSE candidate.
if (gtIsActiveCSE_Candidate(tree))
{
break;
}
// Perform the transform ADDR(OBJ(...)) == (...).
GenTree* addr = op1->AsObj()->Addr();
noway_assert(varTypeIsGC(addr->gtType) || addr->gtType == TYP_I_IMPL);
DEBUG_DESTROY_NODE(op1);
DEBUG_DESTROY_NODE(tree);
return addr;
}
else if ((op1->gtOper == GT_COMMA) && !optValnumCSE_phase)
{
// Perform the transform ADDR(COMMA(x, ..., z)) == COMMA(x, ..., ADDR(z)).
// (Be sure to mark "z" as an l-value...)
ArrayStack<GenTree*> commas(getAllocator(CMK_ArrayStack));
for (GenTree* comma = op1; comma != nullptr && comma->gtOper == GT_COMMA; comma = comma->gtGetOp2())
{
commas.Push(comma);
}
GenTree* commaNode = commas.Top();
// The top-level addr might be annotated with a zeroOffset field.
FieldSeqNode* zeroFieldSeq = nullptr;
bool isZeroOffset = GetZeroOffsetFieldMap()->Lookup(tree, &zeroFieldSeq);
tree = op1;
commaNode->AsOp()->gtOp2->gtFlags |= GTF_DONT_CSE;
// If the node we're about to put under a GT_ADDR is an indirection, it
// doesn't need to be materialized, since we only want the addressing mode. Because
// of this, this GT_IND is not a faulting indirection and we don't have to extract it
// as a side effect.
GenTree* commaOp2 = commaNode->AsOp()->gtOp2;
if (commaOp2->OperIsBlk())
{
commaOp2->SetOper(GT_IND);
}
if (commaOp2->gtOper == GT_IND)
{
commaOp2->gtFlags |= GTF_IND_NONFAULTING;
commaOp2->gtFlags &= ~GTF_EXCEPT;
commaOp2->gtFlags |= (commaOp2->AsOp()->gtOp1->gtFlags & GTF_EXCEPT);
}
op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, commaOp2);
if (isZeroOffset)
{
// Transfer the annotation to the new GT_ADDR node.
fgAddFieldSeqForZeroOffset(op1, zeroFieldSeq);
}
commaNode->AsOp()->gtOp2 = op1;
// Originally, I gave all the comma nodes type "byref". But the ADDR(IND(x)) == x transform
// might give op1 a type different from byref (like, say, native int). So now go back and give
// all the comma nodes the type of op1.
while (!commas.Empty())
{
GenTree* comma = commas.Pop();
comma->gtType = op1->gtType;
#ifdef DEBUG
comma->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
gtUpdateNodeSideEffects(comma);
}
return tree;
}
break;
case GT_COLON:
if (fgGlobalMorph)
{
/* Mark the nodes that are conditionally executed */
fgWalkTreePre(&tree, gtMarkColonCond);
}
/* Since we're doing this postorder we clear this if it got set by a child */
fgRemoveRestOfBlock = false;
break;
case GT_COMMA:
/* Special case: trees that don't produce a value */
if (op2->OperIs(GT_ASG) || (op2->OperGet() == GT_COMMA && op2->TypeGet() == TYP_VOID) || fgIsThrow(op2))
{
typ = tree->gtType = TYP_VOID;
}
// If we are in the Valuenum CSE phase then don't morph away anything as these
// nodes may have CSE defs/uses in them.
//
if (!optValnumCSE_phase)
{
// Extract the side effects from the left side of the comma. Since they don't "go" anywhere, this
// is all we need.
GenTree* op1SideEffects = nullptr;
// The addition of "GTF_MAKE_CSE" below prevents us from throwing away (for example)
// hoisted expressions in loops.
gtExtractSideEffList(op1, &op1SideEffects, (GTF_SIDE_EFFECT | GTF_MAKE_CSE));
if (op1SideEffects)
{
// Replace the left hand side with the side effect list.
op1 = op1SideEffects;
tree->AsOp()->gtOp1 = op1SideEffects;
gtUpdateNodeSideEffects(tree);
}
else
{
op2->gtFlags |= (tree->gtFlags & (GTF_DONT_CSE | GTF_LATE_ARG));
DEBUG_DESTROY_NODE(tree);
DEBUG_DESTROY_NODE(op1);
return op2;
}
// If the right operand is just a void nop node, throw it away. Unless this is a
// comma throw, in which case we want the top-level morphing loop to recognize it.
if (op2->IsNothingNode() && op1->TypeIs(TYP_VOID) && !fgIsCommaThrow(tree))
{
op1->gtFlags |= (tree->gtFlags & (GTF_DONT_CSE | GTF_LATE_ARG));
DEBUG_DESTROY_NODE(tree);
DEBUG_DESTROY_NODE(op2);
return op1;
}
}
break;
case GT_JTRUE:
/* Special case if fgRemoveRestOfBlock is set to true */
if (fgRemoveRestOfBlock)
{
if (fgIsCommaThrow(op1, true))
{
GenTree* throwNode = op1->AsOp()->gtOp1;
JITDUMP("Removing [%06d] GT_JTRUE as the block now unconditionally throws an exception.\n",
dspTreeID(tree));
DEBUG_DESTROY_NODE(tree);
return throwNode;
}
noway_assert(op1->OperIsCompare());
noway_assert(op1->gtFlags & GTF_EXCEPT);
// We need to keep op1 for the side-effects. Hang it off
// a GT_COMMA node
JITDUMP("Keeping side-effects by bashing [%06d] GT_JTRUE into a GT_COMMA.\n", dspTreeID(tree));
tree->ChangeOper(GT_COMMA);
tree->AsOp()->gtOp2 = op2 = gtNewNothingNode();
// Additionally since we're eliminating the JTRUE
// codegen won't like it if op1 is a RELOP of longs, floats or doubles.
// So we change it into a GT_COMMA as well.
JITDUMP("Also bashing [%06d] (a relop) into a GT_COMMA.\n", dspTreeID(op1));
op1->ChangeOper(GT_COMMA);
op1->gtFlags &= ~GTF_UNSIGNED; // Clear the unsigned flag if it was set on the relop
op1->gtType = op1->AsOp()->gtOp1->gtType;
return tree;
}
break;
case GT_INTRINSIC:
if (tree->AsIntrinsic()->gtIntrinsicName ==
NI_System_Runtime_CompilerServices_RuntimeHelpers_IsKnownConstant)
{
// Should be expanded by the time it reaches CSE phase
assert(!optValnumCSE_phase);
JITDUMP("\nExpanding RuntimeHelpers.IsKnownConstant to ");
if (op1->OperIsConst())
{
// We're lucky to catch a constant here while importer was not
JITDUMP("true\n");
DEBUG_DESTROY_NODE(tree, op1);
tree = gtNewIconNode(1);
}
else
{
GenTree* op1SideEffects = nullptr;
gtExtractSideEffList(op1, &op1SideEffects, GTF_ALL_EFFECT);
if (op1SideEffects != nullptr)
{
DEBUG_DESTROY_NODE(tree);
// Keep side-effects of op1
tree = gtNewOperNode(GT_COMMA, TYP_INT, op1SideEffects, gtNewIconNode(0));
JITDUMP("false with side effects:\n")
DISPTREE(tree);
}
else
{
JITDUMP("false\n");
DEBUG_DESTROY_NODE(tree, op1);
tree = gtNewIconNode(0);
}
}
INDEBUG(tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
return tree;
}
break;
default:
break;
}
assert(oper == tree->gtOper);
// Propagate comma throws.
// If we are in the Valuenum CSE phase then don't morph away anything as these
// nodes may have CSE defs/uses in them.
if (fgGlobalMorph && (oper != GT_ASG) && (oper != GT_COLON))
{
if ((op1 != nullptr) && fgIsCommaThrow(op1, true))
{
GenTree* propagatedThrow = fgPropagateCommaThrow(tree, op1->AsOp(), GTF_EMPTY);
if (propagatedThrow != nullptr)
{
return propagatedThrow;
}
}
if ((op2 != nullptr) && fgIsCommaThrow(op2, true))
{
GenTree* propagatedThrow = fgPropagateCommaThrow(tree, op2->AsOp(), op1->gtFlags & GTF_ALL_EFFECT);
if (propagatedThrow != nullptr)
{
return propagatedThrow;
}
}
}
/*-------------------------------------------------------------------------
* Optional morphing is done if tree transformations is permitted
*/
if ((opts.compFlags & CLFLG_TREETRANS) == 0)
{
return tree;
}
tree = fgMorphSmpOpOptional(tree->AsOp());
return tree;
}
//------------------------------------------------------------------------
// fgOptimizeCast: Optimizes the supplied GT_CAST tree.
//
// Tries to get rid of the cast, its operand, the GTF_OVERFLOW flag, calls
// calls "optNarrowTree". Called in post-order by "fgMorphSmpOp".
//
// Arguments:
// tree - the cast tree to optimize
//
// Return Value:
// The optimized tree (that can have any shape).
//
GenTree* Compiler::fgOptimizeCast(GenTreeCast* cast)
{
GenTree* src = cast->CastOp();
if (gtIsActiveCSE_Candidate(cast) || gtIsActiveCSE_Candidate(src))
{
return cast;
}
// See if we can discard the cast.
if (varTypeIsIntegral(cast) && varTypeIsIntegral(src))
{
IntegralRange srcRange = IntegralRange::ForNode(src, this);
IntegralRange noOvfRange = IntegralRange::ForCastInput(cast);
if (noOvfRange.Contains(srcRange))
{
// Casting between same-sized types is a no-op,
// given we have proven this cast cannot overflow.
if (genActualType(cast) == genActualType(src))
{
return src;
}
cast->ClearOverflow();
cast->SetAllEffectsFlags(src);
// Try and see if we can make this cast into a cheaper zero-extending version.
if (genActualTypeIsInt(src) && cast->TypeIs(TYP_LONG) && srcRange.IsPositive())
{
cast->SetUnsigned();
}
}
// For checked casts, we're done.
if (cast->gtOverflow())
{
return cast;
}
var_types castToType = cast->CastToType();
// For indir-like nodes, we may be able to change their type to satisfy (and discard) the cast.
if (varTypeIsSmall(castToType) && (genTypeSize(castToType) == genTypeSize(src)) &&
src->OperIs(GT_IND, GT_CLS_VAR, GT_LCL_FLD))
{
// We're changing the type here so we need to update the VN;
// in other cases we discard the cast without modifying src
// so the VN doesn't change.
src->ChangeType(castToType);
src->SetVNsFromNode(cast);
return src;
}
// Try to narrow the operand of the cast and discard the cast.
if (opts.OptEnabled(CLFLG_TREETRANS) && (genTypeSize(src) > genTypeSize(castToType)) &&
optNarrowTree(src, src->TypeGet(), castToType, cast->gtVNPair, false))
{
optNarrowTree(src, src->TypeGet(), castToType, cast->gtVNPair, true);
// "optNarrowTree" may leave a dead cast behind.
if (src->OperIs(GT_CAST) && (src->AsCast()->CastToType() == genActualType(src->AsCast()->CastOp())))
{
src = src->AsCast()->CastOp();
}
return src;
}
// Check for two consecutive casts, we may be able to discard the intermediate one.
if (opts.OptimizationEnabled() && src->OperIs(GT_CAST) && !src->gtOverflow())
{
var_types dstCastToType = castToType;
var_types srcCastToType = src->AsCast()->CastToType();
// CAST(ubyte <- CAST(short <- X)): CAST(ubyte <- X).
// CAST(ushort <- CAST(short <- X)): CAST(ushort <- X).
if (varTypeIsSmall(srcCastToType) && (genTypeSize(dstCastToType) <= genTypeSize(srcCastToType)))
{
cast->CastOp() = src->AsCast()->CastOp();
DEBUG_DESTROY_NODE(src);
}
}
}
return cast;
}
//------------------------------------------------------------------------
// fgOptimizeEqualityComparisonWithConst: optimizes various EQ/NE(OP, CONST) patterns.
//
// Arguments:
// cmp - The GT_NE/GT_EQ tree the second operand of which is an integral constant
//
// Return Value:
// The optimized tree, "cmp" in case no optimizations were done.
// Currently only returns relop trees.
//
GenTree* Compiler::fgOptimizeEqualityComparisonWithConst(GenTreeOp* cmp)
{
assert(cmp->OperIs(GT_EQ, GT_NE));
assert(cmp->gtGetOp2()->IsIntegralConst());
assert(!optValnumCSE_phase);
GenTree* op1 = cmp->gtGetOp1();
GenTreeIntConCommon* op2 = cmp->gtGetOp2()->AsIntConCommon();
// Check for "(expr +/- icon1) ==/!= (non-zero-icon2)".
if (op2->IsCnsIntOrI() && (op2->IconValue() != 0))
{
// Since this can occur repeatedly we use a while loop.
while (op1->OperIs(GT_ADD, GT_SUB) && op1->AsOp()->gtGetOp2()->IsCnsIntOrI() && op1->TypeIs(TYP_INT) &&
!op1->gtOverflow())
{
// Got it; change "x + icon1 == icon2" to "x == icon2 - icon1".
ssize_t op1Value = op1->AsOp()->gtGetOp2()->AsIntCon()->IconValue();
ssize_t op2Value = op2->IconValue();
if (op1->OperIs(GT_ADD))
{
op2Value -= op1Value;
}
else
{
op2Value += op1Value;
}
op1 = op1->AsOp()->gtGetOp1();
op2->SetIconValue(static_cast<int32_t>(op2Value));
}
cmp->gtOp1 = op1;
fgUpdateConstTreeValueNumber(op2);
}
// Here we look for the following tree
//
// EQ/NE
// / \.
// op1 CNS 0/1
//
if (op2->IsIntegralConst(0) || op2->IsIntegralConst(1))
{
ssize_t op2Value = static_cast<ssize_t>(op2->IntegralValue());
if (op1->OperIsCompare())
{
// Here we look for the following tree
//
// EQ/NE -> RELOP/!RELOP
// / \ / \.
// RELOP CNS 0/1
// / \.
//
// Note that we will remove/destroy the EQ/NE node and move
// the RELOP up into it's location.
// Here we reverse the RELOP if necessary.
bool reverse = ((op2Value == 0) == (cmp->OperIs(GT_EQ)));
if (reverse)
{
gtReverseCond(op1);
}
noway_assert((op1->gtFlags & GTF_RELOP_JMP_USED) == 0);
op1->gtFlags |= cmp->gtFlags & (GTF_RELOP_JMP_USED | GTF_DONT_CSE);
op1->SetVNsFromNode(cmp);
DEBUG_DESTROY_NODE(cmp);
return op1;
}
//
// Now we check for a compare with the result of an '&' operator
//
// Here we look for the following transformation:
//
// EQ/NE EQ/NE
// / \ / \.
// AND CNS 0/1 -> AND CNS 0
// / \ / \.
// RSZ/RSH CNS 1 x CNS (1 << y)
// / \.
// x CNS_INT +y
if (fgGlobalMorph && op1->OperIs(GT_AND) && op1->AsOp()->gtGetOp1()->OperIs(GT_RSZ, GT_RSH))
{
GenTreeOp* andOp = op1->AsOp();
GenTreeOp* rshiftOp = andOp->gtGetOp1()->AsOp();
if (!rshiftOp->gtGetOp2()->IsCnsIntOrI())
{
goto SKIP;
}
ssize_t shiftAmount = rshiftOp->gtGetOp2()->AsIntCon()->IconValue();
if (shiftAmount < 0)
{
goto SKIP;
}
if (!andOp->gtGetOp2()->IsIntegralConst(1))
{
goto SKIP;
}
GenTreeIntConCommon* andMask = andOp->gtGetOp2()->AsIntConCommon();
if (andOp->TypeIs(TYP_INT))
{
if (shiftAmount > 31)
{
goto SKIP;
}
andMask->SetIconValue(static_cast<int32_t>(1 << shiftAmount));
// Reverse the condition if necessary.
if (op2Value == 1)
{
gtReverseCond(cmp);
op2->SetIconValue(0);
}
}
else if (andOp->TypeIs(TYP_LONG))
{
if (shiftAmount > 63)
{
goto SKIP;
}
andMask->SetLngValue(1ll << shiftAmount);
// Reverse the cond if necessary
if (op2Value == 1)
{
gtReverseCond(cmp);
op2->SetLngValue(0);
}
}
andOp->gtOp1 = rshiftOp->gtGetOp1();
DEBUG_DESTROY_NODE(rshiftOp->gtGetOp2());
DEBUG_DESTROY_NODE(rshiftOp);
}
}
SKIP:
// Now check for compares with small constant longs that can be cast to int.
// Note that we filter out negative values here so that the transformations
// below are correct. E. g. "EQ(-1L, CAST_UN(int))" is always "false", but were
// we to make it into "EQ(-1, int)", "true" becomes possible for negative inputs.
if (!op2->TypeIs(TYP_LONG) || ((op2->LngValue() >> 31) != 0))
{
return cmp;
}
if (!op1->OperIs(GT_AND))
{
// Another interesting case: cast from int.
if (op1->OperIs(GT_CAST) && op1->AsCast()->CastOp()->TypeIs(TYP_INT) && !op1->gtOverflow())
{
// Simply make this into an integer comparison.
cmp->gtOp1 = op1->AsCast()->CastOp();
op2->BashToConst(static_cast<int32_t>(op2->LngValue()));
fgUpdateConstTreeValueNumber(op2);
}
return cmp;
}
// Now we perform the following optimization:
// EQ/NE(AND(OP long, CNS_LNG), CNS_LNG) =>
// EQ/NE(AND(CAST(int <- OP), CNS_INT), CNS_INT)
// when the constants are sufficiently small.
// This transform cannot preserve VNs.
if (fgGlobalMorph)
{
assert(op1->TypeIs(TYP_LONG) && op1->OperIs(GT_AND));
// Is the result of the mask effectively an INT?
GenTreeOp* andOp = op1->AsOp();
if (!andOp->gtGetOp2()->OperIs(GT_CNS_NATIVELONG))
{
return cmp;
}
GenTreeIntConCommon* andMask = andOp->gtGetOp2()->AsIntConCommon();
if ((andMask->LngValue() >> 32) != 0)
{
return cmp;
}
// Now we narrow the first operand of AND to int.
if (optNarrowTree(andOp->gtGetOp1(), TYP_LONG, TYP_INT, ValueNumPair(), false))
{
optNarrowTree(andOp->gtGetOp1(), TYP_LONG, TYP_INT, ValueNumPair(), true);
}
else
{
andOp->gtOp1 = gtNewCastNode(TYP_INT, andOp->gtGetOp1(), false, TYP_INT);
}
assert(andMask == andOp->gtGetOp2());
// Now replace the mask node.
andMask->BashToConst(static_cast<int32_t>(andMask->LngValue()));
// Now change the type of the AND node.
andOp->ChangeType(TYP_INT);
// Finally we replace the comparand.
op2->BashToConst(static_cast<int32_t>(op2->LngValue()));
}
return cmp;
}
//------------------------------------------------------------------------
// fgOptimizeRelationalComparisonWithConst: optimizes a comparison operation.
//
// Recognizes comparisons against various constant operands and morphs
// them, if possible, into comparisons against zero.
//
// Arguments:
// cmp - the GT_LE/GT_LT/GT_GE/GT_GT tree to morph.
//
// Return Value:
// The "cmp" tree, possibly with a modified oper.
// The second operand's constant value may be modified as well.
//
// Assumptions:
// The operands have been swapped so that any constants are on the right.
// The second operand is an integral constant.
//
GenTree* Compiler::fgOptimizeRelationalComparisonWithConst(GenTreeOp* cmp)
{
assert(cmp->OperIs(GT_LE, GT_LT, GT_GE, GT_GT));
assert(cmp->gtGetOp2()->IsIntegralConst());
assert(!gtIsActiveCSE_Candidate(cmp->gtGetOp2()));
GenTree* op1 = cmp->gtGetOp1();
GenTreeIntConCommon* op2 = cmp->gtGetOp2()->AsIntConCommon();
assert(genActualType(op1) == genActualType(op2));
genTreeOps oper = cmp->OperGet();
int64_t op2Value = op2->IntegralValue();
if (op2Value == 1)
{
// Check for "expr >= 1".
if (oper == GT_GE)
{
// Change to "expr != 0" for unsigned and "expr > 0" for signed.
oper = cmp->IsUnsigned() ? GT_NE : GT_GT;
}
// Check for "expr < 1".
else if (oper == GT_LT)
{
// Change to "expr == 0" for unsigned and "expr <= 0".
oper = cmp->IsUnsigned() ? GT_EQ : GT_LE;
}
}
// Check for "expr relop -1".
else if (!cmp->IsUnsigned() && (op2Value == -1))
{
// Check for "expr <= -1".
if (oper == GT_LE)
{
// Change to "expr < 0".
oper = GT_LT;
}
// Check for "expr > -1".
else if (oper == GT_GT)
{
// Change to "expr >= 0".
oper = GT_GE;
}
}
else if (cmp->IsUnsigned())
{
if ((oper == GT_LE) || (oper == GT_GT))
{
if (op2Value == 0)
{
// IL doesn't have a cne instruction so compilers use cgt.un instead. The JIT
// recognizes certain patterns that involve GT_NE (e.g (x & 4) != 0) and fails
// if GT_GT is used instead. Transform (x GT_GT.unsigned 0) into (x GT_NE 0)
// and (x GT_LE.unsigned 0) into (x GT_EQ 0). The later case is rare, it sometimes
// occurs as a result of branch inversion.
oper = (oper == GT_LE) ? GT_EQ : GT_NE;
cmp->gtFlags &= ~GTF_UNSIGNED;
}
// LE_UN/GT_UN(expr, int/long.MaxValue) => GE/LT(expr, 0).
else if (((op1->TypeIs(TYP_LONG) && (op2Value == INT64_MAX))) ||
((genActualType(op1) == TYP_INT) && (op2Value == INT32_MAX)))
{
oper = (oper == GT_LE) ? GT_GE : GT_LT;
cmp->gtFlags &= ~GTF_UNSIGNED;
}
}
}
if (!cmp->OperIs(oper))
{
// Keep the old ValueNumber for 'tree' as the new expr
// will still compute the same value as before.
cmp->SetOper(oper, GenTree::PRESERVE_VN);
op2->SetIntegralValue(0);
fgUpdateConstTreeValueNumber(op2);
}
return cmp;
}
#ifdef FEATURE_HW_INTRINSICS
//------------------------------------------------------------------------
// fgOptimizeHWIntrinsic: optimize a HW intrinsic node
//
// Arguments:
// node - HWIntrinsic node to examine
//
// Returns:
// The original node if no optimization happened or if tree bashing occured.
// An alternative tree if an optimization happened.
//
// Notes:
// Checks for HWIntrinsic nodes: Vector64.Create/Vector128.Create/Vector256.Create,
// and if the call is one of these, attempt to optimize.
// This is post-order, meaning that it will not morph the children.
//
GenTree* Compiler::fgOptimizeHWIntrinsic(GenTreeHWIntrinsic* node)
{
assert(!optValnumCSE_phase);
if (opts.OptimizationDisabled())
{
return node;
}
switch (node->GetHWIntrinsicId())
{
case NI_Vector128_Create:
#if defined(TARGET_XARCH)
case NI_Vector256_Create:
#elif defined(TARGET_ARM64)
case NI_Vector64_Create:
#endif
{
bool hwAllArgsAreConstZero = true;
for (GenTree* arg : node->Operands())
{
if (!arg->IsIntegralConst(0) && !arg->IsFloatPositiveZero())
{
hwAllArgsAreConstZero = false;
break;
}
}
if (hwAllArgsAreConstZero)
{
switch (node->GetHWIntrinsicId())
{
case NI_Vector128_Create:
{
node->ResetHWIntrinsicId(NI_Vector128_get_Zero);
break;
}
#if defined(TARGET_XARCH)
case NI_Vector256_Create:
{
node->ResetHWIntrinsicId(NI_Vector256_get_Zero);
break;
}
#elif defined(TARGET_ARM64)
case NI_Vector64_Create:
{
node->ResetHWIntrinsicId(NI_Vector64_get_Zero);
break;
}
#endif
default:
unreached();
}
}
break;
}
default:
break;
}
return node;
}
#endif
//------------------------------------------------------------------------
// fgOptimizeCommutativeArithmetic: Optimizes commutative operations.
//
// Arguments:
// tree - the unchecked GT_ADD/GT_MUL/GT_OR/GT_XOR/GT_AND tree to optimize.
//
// Return Value:
// The optimized tree that can have any shape.
//
GenTree* Compiler::fgOptimizeCommutativeArithmetic(GenTreeOp* tree)
{
assert(tree->OperIs(GT_ADD, GT_MUL, GT_OR, GT_XOR, GT_AND));
assert(!tree->gtOverflowEx());
// Commute constants to the right.
if (tree->gtGetOp1()->OperIsConst() && !tree->gtGetOp1()->TypeIs(TYP_REF))
{
// TODO-Review: We used to assert here that "(!op2->OperIsConst() || !opts.OptEnabled(CLFLG_CONSTANTFOLD))".
// This may indicate a missed "remorph". Task is to re-enable this assertion and investigate.
std::swap(tree->gtOp1, tree->gtOp2);
}
if (fgOperIsBitwiseRotationRoot(tree->OperGet()))
{
GenTree* rotationTree = fgRecognizeAndMorphBitwiseRotation(tree);
if (rotationTree != nullptr)
{
return rotationTree;
}
}
if (fgGlobalMorph && tree->OperIs(GT_AND, GT_OR, GT_XOR))
{
GenTree* castTree = fgMorphCastedBitwiseOp(tree->AsOp());
if (castTree != nullptr)
{
return castTree;
}
}
if (varTypeIsIntegralOrI(tree))
{
genTreeOps oldTreeOper = tree->OperGet();
GenTreeOp* optimizedTree = fgMorphCommutative(tree->AsOp());
if (optimizedTree != nullptr)
{
if (!optimizedTree->OperIs(oldTreeOper))
{
// "optimizedTree" could end up being a COMMA.
return optimizedTree;
}
tree = optimizedTree;
}
}
if (!optValnumCSE_phase)
{
GenTree* optimizedTree = nullptr;
if (tree->OperIs(GT_ADD))
{
optimizedTree = fgOptimizeAddition(tree);
}
else if (tree->OperIs(GT_MUL))
{
optimizedTree = fgOptimizeMultiply(tree);
}
else if (tree->OperIs(GT_AND))
{
optimizedTree = fgOptimizeBitwiseAnd(tree);
}
else if (tree->OperIs(GT_XOR))
{
optimizedTree = fgOptimizeBitwiseXor(tree);
}
if (optimizedTree != nullptr)
{
return optimizedTree;
}
}
return tree;
}
//------------------------------------------------------------------------
// fgOptimizeAddition: optimizes addition.
//
// Arguments:
// add - the unchecked GT_ADD tree to optimize.
//
// Return Value:
// The optimized tree, that can have any shape, in case any transformations
// were performed. Otherwise, "nullptr", guaranteeing no state change.
//
GenTree* Compiler::fgOptimizeAddition(GenTreeOp* add)
{
assert(add->OperIs(GT_ADD) && !add->gtOverflow());
assert(!optValnumCSE_phase);
GenTree* op1 = add->gtGetOp1();
GenTree* op2 = add->gtGetOp2();
// Fold "((x + icon1) + (y + icon2))" to ((x + y) + (icon1 + icon2))".
// Be careful not to create a byref pointer that may point outside of the ref object.
// Only do this in global morph as we don't recompute the VN for "(x + y)", the new "op2".
if (op1->OperIs(GT_ADD) && op2->OperIs(GT_ADD) && !op1->gtOverflow() && !op2->gtOverflow() &&
op1->AsOp()->gtGetOp2()->IsCnsIntOrI() && op2->AsOp()->gtGetOp2()->IsCnsIntOrI() &&
!varTypeIsGC(op1->AsOp()->gtGetOp1()) && !varTypeIsGC(op2->AsOp()->gtGetOp1()) && fgGlobalMorph)
{
GenTreeOp* addOne = op1->AsOp();
GenTreeOp* addTwo = op2->AsOp();
GenTreeIntCon* constOne = addOne->gtGetOp2()->AsIntCon();
GenTreeIntCon* constTwo = addTwo->gtGetOp2()->AsIntCon();
addOne->gtOp2 = addTwo->gtGetOp1();
addOne->SetAllEffectsFlags(addOne->gtGetOp1(), addOne->gtGetOp2());
DEBUG_DESTROY_NODE(addTwo);
constOne->SetValueTruncating(constOne->IconValue() + constTwo->IconValue());
op2 = constOne;
add->gtOp2 = constOne;
DEBUG_DESTROY_NODE(constTwo);
}
// Fold (x + 0) - given it won't change the tree type to TYP_REF.
// TODO-Bug: this code will lose the GC-ness of a tree like "native int + byref(0)".
if (op2->IsIntegralConst(0) && ((add->TypeGet() == op1->TypeGet()) || !op1->TypeIs(TYP_REF)))
{
if (op2->IsCnsIntOrI() && varTypeIsI(op1))
{
fgAddFieldSeqForZeroOffset(op1, op2->AsIntCon()->gtFieldSeq);
}
DEBUG_DESTROY_NODE(op2);
DEBUG_DESTROY_NODE(add);
return op1;
}
// Note that these transformations are legal for floating-point ADDs as well.
if (opts.OptimizationEnabled())
{
// - a + b = > b - a
// ADD((NEG(a), b) => SUB(b, a)
// Do not do this if "op2" is constant for canonicalization purposes.
if (op1->OperIs(GT_NEG) && !op2->OperIs(GT_NEG) && !op2->IsIntegralConst() && gtCanSwapOrder(op1, op2))
{
add->SetOper(GT_SUB);
add->gtOp1 = op2;
add->gtOp2 = op1->AsOp()->gtGetOp1();
DEBUG_DESTROY_NODE(op1);
return add;
}
// a + -b = > a - b
// ADD(a, (NEG(b)) => SUB(a, b)
if (!op1->OperIs(GT_NEG) && op2->OperIs(GT_NEG))
{
add->SetOper(GT_SUB);
add->gtOp2 = op2->AsOp()->gtGetOp1();
DEBUG_DESTROY_NODE(op2);
return add;
}
}
return nullptr;
}
//------------------------------------------------------------------------
// fgOptimizeMultiply: optimizes multiplication.
//
// Arguments:
// mul - the unchecked TYP_I_IMPL/TYP_INT GT_MUL tree to optimize.
//
// Return Value:
// The optimized tree, that can have any shape, in case any transformations
// were performed. Otherwise, "nullptr", guaranteeing no state change.
//
GenTree* Compiler::fgOptimizeMultiply(GenTreeOp* mul)
{
assert(mul->OperIs(GT_MUL));
assert(varTypeIsIntOrI(mul) || varTypeIsFloating(mul));
assert(!mul->gtOverflow());
assert(!optValnumCSE_phase);
GenTree* op1 = mul->gtGetOp1();
GenTree* op2 = mul->gtGetOp2();
assert(mul->TypeGet() == genActualType(op1));
assert(mul->TypeGet() == genActualType(op2));
if (opts.OptimizationEnabled() && op2->IsCnsFltOrDbl())
{
double multiplierValue = op2->AsDblCon()->gtDconVal;
if (multiplierValue == 1.0)
{
// Fold "x * 1.0" to "x".
DEBUG_DESTROY_NODE(op2);
DEBUG_DESTROY_NODE(mul);
return op1;
}
// Fold "x * 2.0" to "x + x".
// If op1 is not a local we will have to introduce a temporary via GT_COMMA.
// Unfortunately, it's not optHoistLoopCode-friendly (yet), so we'll only do
// this for locals / after hoisting has run (when rationalization remorphs
// math INTRINSICSs into calls...).
if ((multiplierValue == 2.0) && (op1->IsLocal() || (fgOrder == FGOrderLinear)))
{
op2 = fgMakeMultiUse(&op1);
GenTree* add = gtNewOperNode(GT_ADD, mul->TypeGet(), op1, op2);
INDEBUG(add->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
return add;
}
}
if (op2->IsIntegralConst())
{
ssize_t mult = op2->AsIntConCommon()->IconValue();
if (mult == 0)
{
// We may be able to throw away op1 (unless it has side-effects)
if ((op1->gtFlags & GTF_SIDE_EFFECT) == 0)
{
DEBUG_DESTROY_NODE(op1);
DEBUG_DESTROY_NODE(mul);
return op2; // Just return the "0" node
}
// We need to keep op1 for the side-effects. Hang it off a GT_COMMA node.
mul->ChangeOper(GT_COMMA, GenTree::PRESERVE_VN);
return mul;
}
#ifdef TARGET_XARCH
// Should we try to replace integer multiplication with lea/add/shift sequences?
bool mulShiftOpt = compCodeOpt() != SMALL_CODE;
#else // !TARGET_XARCH
bool mulShiftOpt = false;
#endif // !TARGET_XARCH
size_t abs_mult = (mult >= 0) ? mult : -mult;
size_t lowestBit = genFindLowestBit(abs_mult);
bool changeToShift = false;
// is it a power of two? (positive or negative)
if (abs_mult == lowestBit)
{
// if negative negate (min-int does not need negation)
if (mult < 0 && mult != SSIZE_T_MIN)
{
op1 = gtNewOperNode(GT_NEG, genActualType(op1), op1);
mul->gtOp1 = op1;
fgMorphTreeDone(op1);
}
if (abs_mult == 1)
{
DEBUG_DESTROY_NODE(op2);
DEBUG_DESTROY_NODE(mul);
return op1;
}
// Change the multiplication into a shift by log2(val) bits.
op2->AsIntConCommon()->SetIconValue(genLog2(abs_mult));
changeToShift = true;
}
else if (mulShiftOpt && (lowestBit > 1) && jitIsScaleIndexMul(lowestBit))
{
int shift = genLog2(lowestBit);
ssize_t factor = abs_mult >> shift;
if (factor == 3 || factor == 5 || factor == 9)
{
// if negative negate (min-int does not need negation)
if (mult < 0 && mult != SSIZE_T_MIN)
{
op1 = gtNewOperNode(GT_NEG, genActualType(op1), op1);
mul->gtOp1 = op1;
fgMorphTreeDone(op1);
}
// change the multiplication into a smaller multiplication (by 3, 5 or 9) and a shift
op1 = gtNewOperNode(GT_MUL, mul->TypeGet(), op1, gtNewIconNode(factor, mul->TypeGet()));
mul->gtOp1 = op1;
fgMorphTreeDone(op1);
op2->AsIntConCommon()->SetIconValue(shift);
changeToShift = true;
}
}
if (changeToShift)
{
fgUpdateConstTreeValueNumber(op2);
mul->ChangeOper(GT_LSH, GenTree::PRESERVE_VN);
return mul;
}
}
return nullptr;
}
//------------------------------------------------------------------------
// fgOptimizeBitwiseAnd: optimizes the "and" operation.
//
// Arguments:
// andOp - the GT_AND tree to optimize.
//
// Return Value:
// The optimized tree, currently always a relop, in case any transformations
// were performed. Otherwise, "nullptr", guaranteeing no state change.
//
GenTree* Compiler::fgOptimizeBitwiseAnd(GenTreeOp* andOp)
{
assert(andOp->OperIs(GT_AND));
assert(!optValnumCSE_phase);
GenTree* op1 = andOp->gtGetOp1();
GenTree* op2 = andOp->gtGetOp2();
// Fold "cmp & 1" to just "cmp".
if (andOp->TypeIs(TYP_INT) && op1->OperIsCompare() && op2->IsIntegralConst(1))
{
DEBUG_DESTROY_NODE(op2);
DEBUG_DESTROY_NODE(andOp);
return op1;
}
return nullptr;
}
//------------------------------------------------------------------------
// fgOptimizeRelationalComparisonWithCasts: Recognizes comparisons against
// various cast operands and tries to remove them. E.g.:
//
// * GE int
// +--* CAST long <- ulong <- uint
// | \--* X int
// \--* CNS_INT long
//
// to:
//
// * GE_un int
// +--* X int
// \--* CNS_INT int
//
// same for:
//
// * GE int
// +--* CAST long <- ulong <- uint
// | \--* X int
// \--* CAST long <- [u]long <- int
// \--* ARR_LEN int
//
// These patterns quite often show up along with index checks
//
// Arguments:
// cmp - the GT_LE/GT_LT/GT_GE/GT_GT tree to morph.
//
// Return Value:
// Returns the same tree where operands might have narrower types
//
// Notes:
// TODO-Casts: consider unifying this function with "optNarrowTree"
//
GenTree* Compiler::fgOptimizeRelationalComparisonWithCasts(GenTreeOp* cmp)
{
assert(cmp->OperIs(GT_LE, GT_LT, GT_GE, GT_GT));
assert(!optValnumCSE_phase);
GenTree* op1 = cmp->gtGetOp1();
GenTree* op2 = cmp->gtGetOp2();
// Caller is expected to call this function only if we have CAST nodes
assert(op1->OperIs(GT_CAST) || op2->OperIs(GT_CAST));
if (!op1->TypeIs(TYP_LONG))
{
// We can extend this logic to handle small types as well, but currently it's done mostly to
// assist range check elimination
return cmp;
}
GenTree* castOp;
GenTree* knownPositiveOp;
bool knownPositiveIsOp2;
if (op2->IsIntegralConst() || ((op2->OperIs(GT_CAST) && op2->AsCast()->CastOp()->OperIs(GT_ARR_LENGTH))))
{
// op2 is either a LONG constant or (T)ARR_LENGTH
knownPositiveIsOp2 = true;
castOp = cmp->gtGetOp1();
knownPositiveOp = cmp->gtGetOp2();
}
else
{
// op1 is either a LONG constant (yes, it's pretty normal for relops)
// or (T)ARR_LENGTH
castOp = cmp->gtGetOp2();
knownPositiveOp = cmp->gtGetOp1();
knownPositiveIsOp2 = false;
}
if (castOp->OperIs(GT_CAST) && varTypeIsLong(castOp->CastToType()) && castOp->AsCast()->CastOp()->TypeIs(TYP_INT) &&
castOp->IsUnsigned() && !castOp->gtOverflow())
{
bool knownPositiveFitsIntoU32 = false;
if (knownPositiveOp->IsIntegralConst() && FitsIn<UINT32>(knownPositiveOp->AsIntConCommon()->IntegralValue()))
{
// BTW, we can fold the whole condition if op2 doesn't fit into UINT_MAX.
knownPositiveFitsIntoU32 = true;
}
else if (knownPositiveOp->OperIs(GT_CAST) && varTypeIsLong(knownPositiveOp->CastToType()) &&
knownPositiveOp->AsCast()->CastOp()->OperIs(GT_ARR_LENGTH))
{
knownPositiveFitsIntoU32 = true;
// TODO-Casts: recognize Span.Length here as well.
}
if (!knownPositiveFitsIntoU32)
{
return cmp;
}
JITDUMP("Removing redundant cast(s) for:\n")
DISPTREE(cmp)
JITDUMP("\n\nto:\n\n")
cmp->SetUnsigned();
// Drop cast from castOp
if (knownPositiveIsOp2)
{
cmp->gtOp1 = castOp->AsCast()->CastOp();
}
else
{
cmp->gtOp2 = castOp->AsCast()->CastOp();
}
DEBUG_DESTROY_NODE(castOp);
if (knownPositiveOp->OperIs(GT_CAST))
{
// Drop cast from knownPositiveOp too
if (knownPositiveIsOp2)
{
cmp->gtOp2 = knownPositiveOp->AsCast()->CastOp();
}
else
{
cmp->gtOp1 = knownPositiveOp->AsCast()->CastOp();
}
DEBUG_DESTROY_NODE(knownPositiveOp);
}
else
{
// Change type for constant from LONG to INT
knownPositiveOp->ChangeType(TYP_INT);
#ifndef TARGET_64BIT
assert(knownPositiveOp->OperIs(GT_CNS_LNG));
knownPositiveOp->BashToConst(static_cast<int>(knownPositiveOp->AsIntConCommon()->IntegralValue()));
#endif
fgUpdateConstTreeValueNumber(knownPositiveOp);
}
DISPTREE(cmp)
JITDUMP("\n")
}
return cmp;
}
// fgOptimizeBitwiseXor: optimizes the "xor" operation.
//
// Arguments:
// xorOp - the GT_XOR tree to optimize.
//
// Return Value:
// The optimized tree, currently always a local variable, in case any transformations
// were performed. Otherwise, "nullptr", guaranteeing no state change.
//
GenTree* Compiler::fgOptimizeBitwiseXor(GenTreeOp* xorOp)
{
assert(xorOp->OperIs(GT_XOR));
assert(!optValnumCSE_phase);
GenTree* op1 = xorOp->gtGetOp1();
GenTree* op2 = xorOp->gtGetOp2();
if (op2->IsIntegralConst(0))
{
/* "x ^ 0" is "x" */
DEBUG_DESTROY_NODE(xorOp, op2);
return op1;
}
else if (op2->IsIntegralConst(-1))
{
/* "x ^ -1" is "~x" */
xorOp->ChangeOper(GT_NOT);
xorOp->gtOp2 = nullptr;
DEBUG_DESTROY_NODE(op2);
return xorOp;
}
else if (op2->IsIntegralConst(1) && op1->OperIsCompare())
{
/* "binaryVal ^ 1" is "!binaryVal" */
gtReverseCond(op1);
DEBUG_DESTROY_NODE(op2);
DEBUG_DESTROY_NODE(xorOp);
return op1;
}
return nullptr;
}
//------------------------------------------------------------------------
// fgPropagateCommaThrow: propagate a "comma throw" up the tree.
//
// "Comma throws" in the compiler represent the canonical form of an always
// throwing expression. They have the shape of COMMA(THROW, ZERO), to satisfy
// the semantic that the original expression produced some value and are
// generated by "gtFoldExprConst" when it encounters checked arithmetic that
// will determinably overflow.
//
// In the global morphing phase, "comma throws" are "propagated" up the tree,
// in post-order, to eliminate nodes that will never execute. This method,
// called by "fgMorphSmpOp", encapsulates this optimization.
//
// Arguments:
// parent - the node currently being processed.
// commaThrow - the comma throw in question, "parent"'s operand.
// precedingSideEffects - side effects of nodes preceding "comma" in execution order.
//
// Return Value:
// If "parent" is to be replaced with a comma throw, i. e. the propagation was successful,
// the new "parent", otherwise "nullptr", guaranteeing no state change, with one exception:
// the "fgRemoveRestOfBlock" "global" may be set. Note that the new returned tree does not
// have to be a "comma throw", it can be "bare" throw call if the "parent" node did not
// produce any value.
//
// Notes:
// "Comma throws" are very rare.
//
GenTree* Compiler::fgPropagateCommaThrow(GenTree* parent, GenTreeOp* commaThrow, GenTreeFlags precedingSideEffects)
{
// Comma throw propagation does not preserve VNs, and deletes nodes.
assert(fgGlobalMorph);
assert(fgIsCommaThrow(commaThrow));
if ((commaThrow->gtFlags & GTF_COLON_COND) == 0)
{
fgRemoveRestOfBlock = true;
}
if ((precedingSideEffects & GTF_ALL_EFFECT) == 0)
{
if (parent->TypeIs(TYP_VOID))
{
// Return the throw node as the new tree.
return commaThrow->gtGetOp1();
}
// Fix up the COMMA's type if needed.
if (genActualType(parent) != genActualType(commaThrow))
{
commaThrow->gtGetOp2()->BashToZeroConst(genActualType(parent));
commaThrow->ChangeType(genActualType(parent));
}
return commaThrow;
}
return nullptr;
}
//----------------------------------------------------------------------------------------------
// fgMorphRetInd: Try to get rid of extra IND(ADDR()) pairs in a return tree.
//
// Arguments:
// node - The return node that uses an indirection.
//
// Return Value:
// the original op1 of the ret if there was no optimization or an optimized new op1.
//
GenTree* Compiler::fgMorphRetInd(GenTreeUnOp* ret)
{
assert(ret->OperIs(GT_RETURN));
assert(ret->gtGetOp1()->OperIs(GT_IND, GT_BLK, GT_OBJ));
GenTreeIndir* ind = ret->gtGetOp1()->AsIndir();
GenTree* addr = ind->Addr();
if (addr->OperIs(GT_ADDR) && addr->gtGetOp1()->OperIs(GT_LCL_VAR))
{
// If struct promotion was undone, adjust the annotations
if (fgGlobalMorph && fgMorphImplicitByRefArgs(addr))
{
return ind;
}
// If `return` retypes LCL_VAR as a smaller struct it should not set `doNotEnregister` on that
// LclVar.
// Example: in `Vector128:AsVector2` we have RETURN SIMD8(OBJ SIMD8(ADDR byref(LCL_VAR SIMD16))).
GenTreeLclVar* lclVar = addr->gtGetOp1()->AsLclVar();
if (!lvaIsImplicitByRefLocal(lclVar->GetLclNum()))
{
assert(!gtIsActiveCSE_Candidate(addr) && !gtIsActiveCSE_Candidate(ind));
unsigned indSize;
if (ind->OperIs(GT_IND))
{
indSize = genTypeSize(ind);
}
else
{
indSize = ind->AsBlk()->GetLayout()->GetSize();
}
LclVarDsc* varDsc = lvaGetDesc(lclVar);
unsigned lclVarSize;
if (!lclVar->TypeIs(TYP_STRUCT))
{
lclVarSize = genTypeSize(varDsc->TypeGet());
}
else
{
lclVarSize = varDsc->lvExactSize;
}
// TODO: change conditions in `canFold` to `indSize <= lclVarSize`, but currently do not support `BITCAST
// int<-SIMD16` etc.
assert((indSize <= lclVarSize) || varDsc->lvDoNotEnregister);
#if defined(TARGET_64BIT)
bool canFold = (indSize == lclVarSize);
#else // !TARGET_64BIT
// TODO: improve 32 bit targets handling for LONG returns if necessary, nowadays we do not support `BITCAST
// long<->double` there.
bool canFold = (indSize == lclVarSize) && (lclVarSize <= REGSIZE_BYTES);
#endif
// TODO: support `genReturnBB != nullptr`, it requires #11413 to avoid `Incompatible types for
// gtNewTempAssign`.
if (canFold && (genReturnBB == nullptr))
{
// Fold (TYPE1)*(&(TYPE2)x) even if types do not match, lowering will handle it.
// Getting rid of this IND(ADDR()) pair allows to keep lclVar as not address taken
// and enregister it.
DEBUG_DESTROY_NODE(ind);
DEBUG_DESTROY_NODE(addr);
ret->gtOp1 = lclVar;
// We use GTF_DONT_CSE as an "is under GT_ADDR" check. We can
// get rid of it now since the GT_RETURN node should never have
// its address taken.
assert((ret->gtFlags & GTF_DONT_CSE) == 0);
lclVar->gtFlags &= ~GTF_DONT_CSE;
return lclVar;
}
else if (!varDsc->lvDoNotEnregister)
{
lvaSetVarDoNotEnregister(lclVar->GetLclNum() DEBUGARG(DoNotEnregisterReason::BlockOpRet));
}
}
}
return ind;
}
#ifdef _PREFAST_
#pragma warning(pop)
#endif
GenTree* Compiler::fgMorphSmpOpOptional(GenTreeOp* tree)
{
genTreeOps oper = tree->gtOper;
GenTree* op1 = tree->gtOp1;
GenTree* op2 = tree->gtOp2;
var_types typ = tree->TypeGet();
if (fgGlobalMorph && GenTree::OperIsCommutative(oper))
{
/* Swap the operands so that the more expensive one is 'op1' */
if (tree->gtFlags & GTF_REVERSE_OPS)
{
tree->gtOp1 = op2;
tree->gtOp2 = op1;
op2 = op1;
op1 = tree->gtOp1;
tree->gtFlags &= ~GTF_REVERSE_OPS;
}
if (oper == op2->gtOper)
{
/* Reorder nested operators at the same precedence level to be
left-recursive. For example, change "(a+(b+c))" to the
equivalent expression "((a+b)+c)".
*/
/* Things are handled differently for floating-point operators */
if (!varTypeIsFloating(tree->TypeGet()))
{
fgMoveOpsLeft(tree);
op1 = tree->gtOp1;
op2 = tree->gtOp2;
}
}
}
#if REARRANGE_ADDS
/* Change "((x+icon)+y)" to "((x+y)+icon)"
Don't reorder floating-point operations */
if (fgGlobalMorph && (oper == GT_ADD) && !tree->gtOverflow() && (op1->gtOper == GT_ADD) && !op1->gtOverflow() &&
varTypeIsIntegralOrI(typ))
{
GenTree* ad1 = op1->AsOp()->gtOp1;
GenTree* ad2 = op1->AsOp()->gtOp2;
if (!op2->OperIsConst() && ad2->OperIsConst())
{
// This takes
// + (tree)
// / \.
// / \.
// / \.
// + (op1) op2
// / \.
// / \.
// ad1 ad2
//
// and it swaps ad2 and op2.
// Don't create a byref pointer that may point outside of the ref object.
// If a GC happens, the byref won't get updated. This can happen if one
// of the int components is negative. It also requires the address generation
// be in a fully-interruptible code region.
if (!varTypeIsGC(ad1->TypeGet()) && !varTypeIsGC(op2->TypeGet()))
{
tree->gtOp2 = ad2;
op1->AsOp()->gtOp2 = op2;
op1->gtFlags |= op2->gtFlags & GTF_ALL_EFFECT;
op2 = tree->gtOp2;
}
}
}
#endif
/*-------------------------------------------------------------------------
* Perform optional oper-specific postorder morphing
*/
switch (oper)
{
case GT_ASG:
// Make sure we're allowed to do this.
if (optValnumCSE_phase)
{
// It is not safe to reorder/delete CSE's
break;
}
if (varTypeIsStruct(typ) && !tree->IsPhiDefn())
{
if (tree->OperIsCopyBlkOp())
{
return fgMorphCopyBlock(tree);
}
else
{
return fgMorphInitBlock(tree);
}
}
if (typ == TYP_LONG)
{
break;
}
if (op2->gtFlags & GTF_ASG)
{
break;
}
if ((op2->gtFlags & GTF_CALL) && (op1->gtFlags & GTF_ALL_EFFECT))
{
break;
}
/* Special case: a cast that can be thrown away */
// TODO-Cleanup: fgMorphSmp does a similar optimization. However, it removes only
// one cast and sometimes there is another one after it that gets removed by this
// code. fgMorphSmp should be improved to remove all redundant casts so this code
// can be removed.
if (op1->gtOper == GT_IND && op2->gtOper == GT_CAST && !op2->gtOverflow())
{
var_types srct;
var_types cast;
var_types dstt;
srct = op2->AsCast()->CastOp()->TypeGet();
cast = (var_types)op2->CastToType();
dstt = op1->TypeGet();
/* Make sure these are all ints and precision is not lost */
if (genTypeSize(cast) >= genTypeSize(dstt) && dstt <= TYP_INT && srct <= TYP_INT)
{
op2 = tree->gtOp2 = op2->AsCast()->CastOp();
}
}
break;
case GT_MUL:
/* Check for the case "(val + icon) * icon" */
if (op2->gtOper == GT_CNS_INT && op1->gtOper == GT_ADD)
{
GenTree* add = op1->AsOp()->gtOp2;
if (add->IsCnsIntOrI() && (op2->GetScaleIndexMul() != 0))
{
if (tree->gtOverflow() || op1->gtOverflow())
{
break;
}
ssize_t imul = op2->AsIntCon()->gtIconVal;
ssize_t iadd = add->AsIntCon()->gtIconVal;
/* Change '(val + iadd) * imul' -> '(val * imul) + (iadd * imul)' */
oper = GT_ADD;
tree->ChangeOper(oper);
op2->AsIntCon()->SetValueTruncating(iadd * imul);
op1->ChangeOper(GT_MUL);
add->AsIntCon()->SetIconValue(imul);
}
}
break;
case GT_DIV:
/* For "val / 1", just return "val" */
if (op2->IsIntegralConst(1))
{
DEBUG_DESTROY_NODE(tree);
return op1;
}
break;
case GT_UDIV:
case GT_UMOD:
tree->CheckDivideByConstOptimized(this);
break;
case GT_LSH:
/* Check for the case "(val + icon) << icon" */
if (!optValnumCSE_phase && op2->IsCnsIntOrI() && op1->gtOper == GT_ADD && !op1->gtOverflow())
{
GenTree* cns = op1->AsOp()->gtOp2;
if (cns->IsCnsIntOrI() && (op2->GetScaleIndexShf() != 0))
{
ssize_t ishf = op2->AsIntConCommon()->IconValue();
ssize_t iadd = cns->AsIntConCommon()->IconValue();
// printf("Changing '(val+icon1)<<icon2' into '(val<<icon2+icon1<<icon2)'\n");
/* Change "(val + iadd) << ishf" into "(val<<ishf + iadd<<ishf)" */
tree->ChangeOper(GT_ADD);
// we are reusing the shift amount node here, but the type we want is that of the shift result
op2->gtType = op1->gtType;
op2->AsIntConCommon()->SetValueTruncating(iadd << ishf);
op1->ChangeOper(GT_LSH);
cns->AsIntConCommon()->SetIconValue(ishf);
}
}
break;
case GT_INIT_VAL:
// Initialization values for initBlk have special semantics - their lower
// byte is used to fill the struct. However, we allow 0 as a "bare" value,
// which enables them to get a VNForZero, and be propagated.
if (op1->IsIntegralConst(0))
{
return op1;
}
break;
default:
break;
}
return tree;
}
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
//------------------------------------------------------------------------
// fgMorphMultiOp: Morph a GenTreeMultiOp (SIMD/HWINTRINSIC) tree.
//
// Arguments:
// multiOp - The tree to morph
//
// Return Value:
// The fully morphed tree.
//
GenTree* Compiler::fgMorphMultiOp(GenTreeMultiOp* multiOp)
{
gtUpdateNodeOperSideEffects(multiOp);
bool dontCseConstArguments = false;
#if defined(FEATURE_HW_INTRINSICS)
// Opportunistically, avoid unexpected CSE for hw intrinsics with IMM arguments
if (multiOp->OperIs(GT_HWINTRINSIC))
{
NamedIntrinsic hwIntrinsic = multiOp->AsHWIntrinsic()->GetHWIntrinsicId();
#if defined(TARGET_XARCH)
if (HWIntrinsicInfo::lookupCategory(hwIntrinsic) == HW_Category_IMM)
{
dontCseConstArguments = true;
}
#elif defined(TARGET_ARMARCH)
if (HWIntrinsicInfo::HasImmediateOperand(hwIntrinsic))
{
dontCseConstArguments = true;
}
#endif
}
#endif
for (GenTree** use : multiOp->UseEdges())
{
*use = fgMorphTree(*use);
GenTree* operand = *use;
multiOp->gtFlags |= (operand->gtFlags & GTF_ALL_EFFECT);
if (dontCseConstArguments && operand->OperIsConst())
{
operand->SetDoNotCSE();
}
// Promoted structs after morph must be in one of two states:
// a) Fully eliminated from the IR (independent promotion) OR only be
// used by "special" nodes (e. g. LHS of ASGs for multi-reg structs).
// b) Marked as do-not-enregister (dependent promotion).
//
// So here we preserve this invariant and mark any promoted structs as do-not-enreg.
//
if (operand->OperIs(GT_LCL_VAR) && lvaGetDesc(operand->AsLclVar())->lvPromoted)
{
lvaSetVarDoNotEnregister(operand->AsLclVar()->GetLclNum()
DEBUGARG(DoNotEnregisterReason::SimdUserForcesDep));
}
}
#if defined(FEATURE_HW_INTRINSICS)
if (opts.OptimizationEnabled() && multiOp->OperIs(GT_HWINTRINSIC))
{
GenTreeHWIntrinsic* hw = multiOp->AsHWIntrinsic();
switch (hw->GetHWIntrinsicId())
{
#if defined(TARGET_XARCH)
case NI_SSE_Xor:
case NI_SSE2_Xor:
case NI_AVX_Xor:
case NI_AVX2_Xor:
{
// Transform XOR(X, 0) to X for vectors
GenTree* op1 = hw->Op(1);
GenTree* op2 = hw->Op(2);
if (!gtIsActiveCSE_Candidate(hw))
{
if (op1->IsIntegralConstVector(0) && !gtIsActiveCSE_Candidate(op1))
{
DEBUG_DESTROY_NODE(hw);
DEBUG_DESTROY_NODE(op1);
return op2;
}
if (op2->IsIntegralConstVector(0) && !gtIsActiveCSE_Candidate(op2))
{
DEBUG_DESTROY_NODE(hw);
DEBUG_DESTROY_NODE(op2);
return op1;
}
}
break;
}
#endif
case NI_Vector128_Create:
#if defined(TARGET_XARCH)
case NI_Vector256_Create:
#elif defined(TARGET_ARMARCH)
case NI_Vector64_Create:
#endif
{
bool hwAllArgsAreConst = true;
for (GenTree** use : multiOp->UseEdges())
{
if (!(*use)->OperIsConst())
{
hwAllArgsAreConst = false;
break;
}
}
// Avoid unexpected CSE for constant arguments for Vector_.Create
// but only if all arguments are constants.
if (hwAllArgsAreConst)
{
for (GenTree** use : multiOp->UseEdges())
{
(*use)->SetDoNotCSE();
}
}
}
break;
default:
break;
}
}
#endif // defined(FEATURE_HW_INTRINSICS) && defined(TARGET_XARCH)
#ifdef FEATURE_HW_INTRINSICS
if (multiOp->OperIsHWIntrinsic() && !optValnumCSE_phase)
{
return fgOptimizeHWIntrinsic(multiOp->AsHWIntrinsic());
}
#endif
return multiOp;
}
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
//------------------------------------------------------------------------
// fgMorphModToSubMulDiv: Transform a % b into the equivalent a - (a / b) * b
// (see ECMA III 3.55 and III.3.56).
//
// Arguments:
// tree - The GT_MOD/GT_UMOD tree to morph
//
// Returns:
// The morphed tree
//
// Notes:
// For ARM64 we don't have a remainder instruction so this transform is
// always done. For XARCH this transform is done if we know that magic
// division will be used, in that case this transform allows CSE to
// eliminate the redundant div from code like "x = a / 3; y = a % 3;".
//
GenTree* Compiler::fgMorphModToSubMulDiv(GenTreeOp* tree)
{
JITDUMP("\nMorphing MOD/UMOD [%06u] to Sub/Mul/Div\n", dspTreeID(tree));
if (tree->OperGet() == GT_MOD)
{
tree->SetOper(GT_DIV);
}
else if (tree->OperGet() == GT_UMOD)
{
tree->SetOper(GT_UDIV);
}
else
{
noway_assert(!"Illegal gtOper in fgMorphModToSubMulDiv");
}
var_types type = tree->gtType;
GenTree* const copyOfNumeratorValue = fgMakeMultiUse(&tree->gtOp1);
GenTree* const copyOfDenominatorValue = fgMakeMultiUse(&tree->gtOp2);
GenTree* const mul = gtNewOperNode(GT_MUL, type, tree, copyOfDenominatorValue);
GenTree* const sub = gtNewOperNode(GT_SUB, type, copyOfNumeratorValue, mul);
// Ensure "sub" does not evaluate "copyOfNumeratorValue" before it is defined by "mul".
//
sub->gtFlags |= GTF_REVERSE_OPS;
#ifdef DEBUG
sub->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
tree->CheckDivideByConstOptimized(this);
return sub;
}
//------------------------------------------------------------------------
// fgMorphUModToAndSub: Transform a % b into the equivalent a & (b - 1).
// '%' must be unsigned (GT_UMOD).
// 'a' and 'b' must be integers.
// 'b' must be a constant and a power of two.
//
// Arguments:
// tree - The GT_UMOD tree to morph
//
// Returns:
// The morphed tree
//
// Notes:
// This is more optimized than calling fgMorphModToSubMulDiv.
//
GenTree* Compiler::fgMorphUModToAndSub(GenTreeOp* tree)
{
JITDUMP("\nMorphing UMOD [%06u] to And/Sub\n", dspTreeID(tree));
assert(tree->OperIs(GT_UMOD));
assert(tree->gtOp2->IsIntegralConstUnsignedPow2());
const var_types type = tree->TypeGet();
const size_t cnsValue = (static_cast<size_t>(tree->gtOp2->AsIntConCommon()->IntegralValue())) - 1;
GenTree* const newTree = gtNewOperNode(GT_AND, type, tree->gtOp1, gtNewIconNode(cnsValue, type));
INDEBUG(newTree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
DEBUG_DESTROY_NODE(tree->gtOp2);
DEBUG_DESTROY_NODE(tree);
return newTree;
}
//------------------------------------------------------------------------------
// fgOperIsBitwiseRotationRoot : Check if the operation can be a root of a bitwise rotation tree.
//
//
// Arguments:
// oper - Operation to check
//
// Return Value:
// True if the operation can be a root of a bitwise rotation tree; false otherwise.
bool Compiler::fgOperIsBitwiseRotationRoot(genTreeOps oper)
{
return (oper == GT_OR) || (oper == GT_XOR);
}
//------------------------------------------------------------------------------
// fgRecognizeAndMorphBitwiseRotation : Check if the tree represents a left or right rotation. If so, return
// an equivalent GT_ROL or GT_ROR tree; otherwise, return the original tree.
//
// Arguments:
// tree - tree to check for a rotation pattern
//
// Return Value:
// An equivalent GT_ROL or GT_ROR tree if a pattern is found; "nullptr" otherwise.
//
// Assumption:
// The input is a GT_OR or a GT_XOR tree.
GenTree* Compiler::fgRecognizeAndMorphBitwiseRotation(GenTree* tree)
{
//
// Check for a rotation pattern, e.g.,
//
// OR ROL
// / \ / \.
// LSH RSZ -> x y
// / \ / \.
// x AND x AND
// / \ / \.
// y 31 ADD 31
// / \.
// NEG 32
// |
// y
// The patterns recognized:
// (x << (y & M)) op (x >>> ((-y + N) & M))
// (x >>> ((-y + N) & M)) op (x << (y & M))
//
// (x << y) op (x >>> (-y + N))
// (x >> > (-y + N)) op (x << y)
//
// (x >>> (y & M)) op (x << ((-y + N) & M))
// (x << ((-y + N) & M)) op (x >>> (y & M))
//
// (x >>> y) op (x << (-y + N))
// (x << (-y + N)) op (x >>> y)
//
// (x << c1) op (x >>> c2)
// (x >>> c1) op (x << c2)
//
// where
// c1 and c2 are const
// c1 + c2 == bitsize(x)
// N == bitsize(x)
// M is const
// M & (N - 1) == N - 1
// op is either | or ^
if (((tree->gtFlags & GTF_PERSISTENT_SIDE_EFFECTS) != 0) || ((tree->gtFlags & GTF_ORDER_SIDEEFF) != 0))
{
// We can't do anything if the tree has assignments, calls, or volatile
// reads. Note that we allow GTF_EXCEPT side effect since any exceptions
// thrown by the original tree will be thrown by the transformed tree as well.
return nullptr;
}
genTreeOps oper = tree->OperGet();
assert(fgOperIsBitwiseRotationRoot(oper));
// Check if we have an LSH on one side of the OR and an RSZ on the other side.
GenTree* op1 = tree->gtGetOp1();
GenTree* op2 = tree->gtGetOp2();
GenTree* leftShiftTree = nullptr;
GenTree* rightShiftTree = nullptr;
if ((op1->OperGet() == GT_LSH) && (op2->OperGet() == GT_RSZ))
{
leftShiftTree = op1;
rightShiftTree = op2;
}
else if ((op1->OperGet() == GT_RSZ) && (op2->OperGet() == GT_LSH))
{
leftShiftTree = op2;
rightShiftTree = op1;
}
else
{
return nullptr;
}
// Check if the trees representing the value to shift are identical.
// We already checked that there are no side effects above.
if (GenTree::Compare(leftShiftTree->gtGetOp1(), rightShiftTree->gtGetOp1()))
{
GenTree* rotatedValue = leftShiftTree->gtGetOp1();
var_types rotatedValueActualType = genActualType(rotatedValue->gtType);
ssize_t rotatedValueBitSize = genTypeSize(rotatedValueActualType) * 8;
noway_assert((rotatedValueBitSize == 32) || (rotatedValueBitSize == 64));
GenTree* leftShiftIndex = leftShiftTree->gtGetOp2();
GenTree* rightShiftIndex = rightShiftTree->gtGetOp2();
// The shift index may be masked. At least (rotatedValueBitSize - 1) lower bits
// shouldn't be masked for the transformation to be valid. If additional
// higher bits are not masked, the transformation is still valid since the result
// of MSIL shift instructions is unspecified if the shift amount is greater or equal
// than the width of the value being shifted.
ssize_t minimalMask = rotatedValueBitSize - 1;
ssize_t leftShiftMask = -1;
ssize_t rightShiftMask = -1;
if ((leftShiftIndex->OperGet() == GT_AND))
{
if (leftShiftIndex->gtGetOp2()->IsCnsIntOrI())
{
leftShiftMask = leftShiftIndex->gtGetOp2()->AsIntCon()->gtIconVal;
leftShiftIndex = leftShiftIndex->gtGetOp1();
}
else
{
return nullptr;
}
}
if ((rightShiftIndex->OperGet() == GT_AND))
{
if (rightShiftIndex->gtGetOp2()->IsCnsIntOrI())
{
rightShiftMask = rightShiftIndex->gtGetOp2()->AsIntCon()->gtIconVal;
rightShiftIndex = rightShiftIndex->gtGetOp1();
}
else
{
return nullptr;
}
}
if (((minimalMask & leftShiftMask) != minimalMask) || ((minimalMask & rightShiftMask) != minimalMask))
{
// The shift index is overmasked, e.g., we have
// something like (x << y & 15) or
// (x >> (32 - y) & 15 with 32 bit x.
// The transformation is not valid.
return nullptr;
}
GenTree* shiftIndexWithAdd = nullptr;
GenTree* shiftIndexWithoutAdd = nullptr;
genTreeOps rotateOp = GT_NONE;
GenTree* rotateIndex = nullptr;
if (leftShiftIndex->OperGet() == GT_ADD)
{
shiftIndexWithAdd = leftShiftIndex;
shiftIndexWithoutAdd = rightShiftIndex;
rotateOp = GT_ROR;
}
else if (rightShiftIndex->OperGet() == GT_ADD)
{
shiftIndexWithAdd = rightShiftIndex;
shiftIndexWithoutAdd = leftShiftIndex;
rotateOp = GT_ROL;
}
if (shiftIndexWithAdd != nullptr)
{
if (shiftIndexWithAdd->gtGetOp2()->IsCnsIntOrI())
{
if (shiftIndexWithAdd->gtGetOp2()->AsIntCon()->gtIconVal == rotatedValueBitSize)
{
if (shiftIndexWithAdd->gtGetOp1()->OperGet() == GT_NEG)
{
if (GenTree::Compare(shiftIndexWithAdd->gtGetOp1()->gtGetOp1(), shiftIndexWithoutAdd))
{
// We found one of these patterns:
// (x << (y & M)) | (x >>> ((-y + N) & M))
// (x << y) | (x >>> (-y + N))
// (x >>> (y & M)) | (x << ((-y + N) & M))
// (x >>> y) | (x << (-y + N))
// where N == bitsize(x), M is const, and
// M & (N - 1) == N - 1
CLANG_FORMAT_COMMENT_ANCHOR;
#ifndef TARGET_64BIT
if (!shiftIndexWithoutAdd->IsCnsIntOrI() && (rotatedValueBitSize == 64))
{
// TODO-X86-CQ: we need to handle variable-sized long shifts specially on x86.
// GT_LSH, GT_RSH, and GT_RSZ have helpers for this case. We may need
// to add helpers for GT_ROL and GT_ROR.
return nullptr;
}
#endif
rotateIndex = shiftIndexWithoutAdd;
}
}
}
}
}
else if ((leftShiftIndex->IsCnsIntOrI() && rightShiftIndex->IsCnsIntOrI()))
{
if (leftShiftIndex->AsIntCon()->gtIconVal + rightShiftIndex->AsIntCon()->gtIconVal == rotatedValueBitSize)
{
// We found this pattern:
// (x << c1) | (x >>> c2)
// where c1 and c2 are const and c1 + c2 == bitsize(x)
rotateOp = GT_ROL;
rotateIndex = leftShiftIndex;
}
}
if (rotateIndex != nullptr)
{
noway_assert(GenTree::OperIsRotate(rotateOp));
GenTreeFlags inputTreeEffects = tree->gtFlags & GTF_ALL_EFFECT;
// We can use the same tree only during global morph; reusing the tree in a later morph
// may invalidate value numbers.
if (fgGlobalMorph)
{
tree->AsOp()->gtOp1 = rotatedValue;
tree->AsOp()->gtOp2 = rotateIndex;
tree->ChangeOper(rotateOp);
unsigned childFlags = 0;
for (GenTree* op : tree->Operands())
{
childFlags |= (op->gtFlags & GTF_ALL_EFFECT);
}
// The parent's flags should be a superset of its operands' flags
noway_assert((inputTreeEffects & childFlags) == childFlags);
}
else
{
tree = gtNewOperNode(rotateOp, rotatedValueActualType, rotatedValue, rotateIndex);
noway_assert(inputTreeEffects == (tree->gtFlags & GTF_ALL_EFFECT));
}
return tree;
}
}
return nullptr;
}
#if !defined(TARGET_64BIT)
//------------------------------------------------------------------------------
// fgRecognizeAndMorphLongMul : Check for and morph long multiplication with 32 bit operands.
//
// Uses "GenTree::IsValidLongMul" to check for the long multiplication pattern. Will swap
// operands if the first one is a constant and the second one is not, even for trees which
// end up not being eligibile for long multiplication.
//
// Arguments:
// mul - GT_MUL tree to check for a long multiplication opportunity
//
// Return Value:
// The original tree, with operands possibly swapped, if it is not eligible for long multiplication.
// Tree with GTF_MUL_64RSLT set, side effect flags propagated, and children morphed if it is.
//
GenTreeOp* Compiler::fgRecognizeAndMorphLongMul(GenTreeOp* mul)
{
assert(mul->OperIs(GT_MUL));
assert(mul->TypeIs(TYP_LONG));
GenTree* op1 = mul->gtGetOp1();
GenTree* op2 = mul->gtGetOp2();
// "IsValidLongMul" and decomposition do not handle constant op1.
if (op1->IsIntegralConst())
{
std::swap(op1, op2);
mul->gtOp1 = op1;
mul->gtOp2 = op2;
}
if (!mul->IsValidLongMul())
{
return mul;
}
// MUL_LONG needs to do the work the casts would have done.
mul->ClearUnsigned();
if (op1->IsUnsigned())
{
mul->SetUnsigned();
}
// "IsValidLongMul" returned "true", so this GT_MUL cannot overflow.
mul->ClearOverflow();
mul->Set64RsltMul();
return fgMorphLongMul(mul);
}
//------------------------------------------------------------------------------
// fgMorphLongMul : Morphs GT_MUL nodes marked with GTF_MUL_64RSLT.
//
// Morphs *only* the operands of casts that compose the long mul to
// avoid them being folded aways.
//
// Arguments:
// mul - GT_MUL tree to morph operands of
//
// Return Value:
// The original tree, with operands morphed and flags propagated.
//
GenTreeOp* Compiler::fgMorphLongMul(GenTreeOp* mul)
{
INDEBUG(mul->DebugCheckLongMul());
GenTree* op1 = mul->gtGetOp1();
GenTree* op2 = mul->gtGetOp2();
// Morph the operands. We cannot allow the casts to go away, so we morph their operands directly.
op1->AsCast()->CastOp() = fgMorphTree(op1->AsCast()->CastOp());
op1->SetAllEffectsFlags(op1->AsCast()->CastOp());
if (op2->OperIs(GT_CAST))
{
op2->AsCast()->CastOp() = fgMorphTree(op2->AsCast()->CastOp());
op2->SetAllEffectsFlags(op2->AsCast()->CastOp());
}
mul->SetAllEffectsFlags(op1, op2);
op1->SetDoNotCSE();
op2->SetDoNotCSE();
return mul;
}
#endif // !defined(TARGET_64BIT)
/*****************************************************************************
*
* Transform the given tree for code generation and return an equivalent tree.
*/
GenTree* Compiler::fgMorphTree(GenTree* tree, MorphAddrContext* mac)
{
assert(tree);
#ifdef DEBUG
if (verbose)
{
if ((unsigned)JitConfig.JitBreakMorphTree() == tree->gtTreeID)
{
noway_assert(!"JitBreakMorphTree hit");
}
}
#endif
#ifdef DEBUG
int thisMorphNum = 0;
if (verbose && treesBeforeAfterMorph)
{
thisMorphNum = morphNum++;
printf("\nfgMorphTree (before %d):\n", thisMorphNum);
gtDispTree(tree);
}
#endif
if (fgGlobalMorph)
{
// Apply any rewrites for implicit byref arguments before morphing the
// tree.
if (fgMorphImplicitByRefArgs(tree))
{
#ifdef DEBUG
if (verbose && treesBeforeAfterMorph)
{
printf("\nfgMorphTree (%d), after implicit-byref rewrite:\n", thisMorphNum);
gtDispTree(tree);
}
#endif
}
}
/*-------------------------------------------------------------------------
* fgMorphTree() can potentially replace a tree with another, and the
* caller has to store the return value correctly.
* Turn this on to always make copy of "tree" here to shake out
* hidden/unupdated references.
*/
#ifdef DEBUG
if (compStressCompile(STRESS_GENERIC_CHECK, 0))
{
GenTree* copy;
if (GenTree::s_gtNodeSizes[tree->gtOper] == TREE_NODE_SZ_SMALL)
{
copy = gtNewLargeOperNode(GT_ADD, TYP_INT);
}
else
{
copy = new (this, GT_CALL) GenTreeCall(TYP_INT);
}
copy->ReplaceWith(tree, this);
#if defined(LATE_DISASM)
// GT_CNS_INT is considered small, so ReplaceWith() won't copy all fields
if ((tree->gtOper == GT_CNS_INT) && tree->IsIconHandle())
{
copy->AsIntCon()->gtCompileTimeHandle = tree->AsIntCon()->gtCompileTimeHandle;
}
#endif
DEBUG_DESTROY_NODE(tree);
tree = copy;
}
#endif // DEBUG
if (fgGlobalMorph)
{
/* Ensure that we haven't morphed this node already */
assert(((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) == 0) && "ERROR: Already morphed this node!");
/* Before morphing the tree, we try to propagate any active assertions */
if (optLocalAssertionProp)
{
/* Do we have any active assertions? */
if (optAssertionCount > 0)
{
GenTree* newTree = tree;
while (newTree != nullptr)
{
tree = newTree;
/* newTree is non-Null if we propagated an assertion */
newTree = optAssertionProp(apFull, tree, nullptr, nullptr);
}
assert(tree != nullptr);
}
}
PREFAST_ASSUME(tree != nullptr);
}
/* Save the original un-morphed tree for fgMorphTreeDone */
GenTree* oldTree = tree;
/* Figure out what kind of a node we have */
unsigned kind = tree->OperKind();
/* Is this a constant node? */
if (tree->OperIsConst())
{
tree = fgMorphConst(tree);
goto DONE;
}
/* Is this a leaf node? */
if (kind & GTK_LEAF)
{
tree = fgMorphLeaf(tree);
goto DONE;
}
/* Is it a 'simple' unary/binary operator? */
if (kind & GTK_SMPOP)
{
tree = fgMorphSmpOp(tree, mac);
goto DONE;
}
/* See what kind of a special operator we have here */
switch (tree->OperGet())
{
case GT_CALL:
if (tree->OperMayThrow(this))
{
tree->gtFlags |= GTF_EXCEPT;
}
else
{
tree->gtFlags &= ~GTF_EXCEPT;
}
tree = fgMorphCall(tree->AsCall());
break;
#if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
#if defined(FEATURE_SIMD)
case GT_SIMD:
#endif
#if defined(FEATURE_HW_INTRINSICS)
case GT_HWINTRINSIC:
#endif
tree = fgMorphMultiOp(tree->AsMultiOp());
break;
#endif // defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
case GT_ARR_ELEM:
tree->AsArrElem()->gtArrObj = fgMorphTree(tree->AsArrElem()->gtArrObj);
unsigned dim;
for (dim = 0; dim < tree->AsArrElem()->gtArrRank; dim++)
{
tree->AsArrElem()->gtArrInds[dim] = fgMorphTree(tree->AsArrElem()->gtArrInds[dim]);
}
tree->gtFlags &= ~GTF_CALL;
tree->gtFlags |= tree->AsArrElem()->gtArrObj->gtFlags & GTF_ALL_EFFECT;
for (dim = 0; dim < tree->AsArrElem()->gtArrRank; dim++)
{
tree->gtFlags |= tree->AsArrElem()->gtArrInds[dim]->gtFlags & GTF_ALL_EFFECT;
}
if (fgGlobalMorph)
{
fgSetRngChkTarget(tree, false);
}
break;
case GT_ARR_OFFSET:
tree->AsArrOffs()->gtOffset = fgMorphTree(tree->AsArrOffs()->gtOffset);
tree->AsArrOffs()->gtIndex = fgMorphTree(tree->AsArrOffs()->gtIndex);
tree->AsArrOffs()->gtArrObj = fgMorphTree(tree->AsArrOffs()->gtArrObj);
tree->gtFlags &= ~GTF_CALL;
tree->gtFlags |= tree->AsArrOffs()->gtOffset->gtFlags & GTF_ALL_EFFECT;
tree->gtFlags |= tree->AsArrOffs()->gtIndex->gtFlags & GTF_ALL_EFFECT;
tree->gtFlags |= tree->AsArrOffs()->gtArrObj->gtFlags & GTF_ALL_EFFECT;
if (fgGlobalMorph)
{
fgSetRngChkTarget(tree, false);
}
break;
case GT_PHI:
tree->gtFlags &= ~GTF_ALL_EFFECT;
for (GenTreePhi::Use& use : tree->AsPhi()->Uses())
{
use.SetNode(fgMorphTree(use.GetNode()));
tree->gtFlags |= use.GetNode()->gtFlags & GTF_ALL_EFFECT;
}
break;
case GT_FIELD_LIST:
tree->gtFlags &= ~GTF_ALL_EFFECT;
for (GenTreeFieldList::Use& use : tree->AsFieldList()->Uses())
{
use.SetNode(fgMorphTree(use.GetNode()));
tree->gtFlags |= (use.GetNode()->gtFlags & GTF_ALL_EFFECT);
}
break;
case GT_CMPXCHG:
tree->AsCmpXchg()->gtOpLocation = fgMorphTree(tree->AsCmpXchg()->gtOpLocation);
tree->AsCmpXchg()->gtOpValue = fgMorphTree(tree->AsCmpXchg()->gtOpValue);
tree->AsCmpXchg()->gtOpComparand = fgMorphTree(tree->AsCmpXchg()->gtOpComparand);
tree->gtFlags &= (~GTF_EXCEPT & ~GTF_CALL);
tree->gtFlags |= tree->AsCmpXchg()->gtOpLocation->gtFlags & GTF_ALL_EFFECT;
tree->gtFlags |= tree->AsCmpXchg()->gtOpValue->gtFlags & GTF_ALL_EFFECT;
tree->gtFlags |= tree->AsCmpXchg()->gtOpComparand->gtFlags & GTF_ALL_EFFECT;
break;
case GT_STORE_DYN_BLK:
tree = fgMorphStoreDynBlock(tree->AsStoreDynBlk());
break;
default:
#ifdef DEBUG
gtDispTree(tree);
#endif
noway_assert(!"unexpected operator");
}
DONE:
fgMorphTreeDone(tree, oldTree DEBUGARG(thisMorphNum));
return tree;
}
//------------------------------------------------------------------------
// fgKillDependentAssertionsSingle: Kill all assertions specific to lclNum
//
// Arguments:
// lclNum - The varNum of the lclVar for which we're killing assertions.
// tree - (DEBUG only) the tree responsible for killing its assertions.
//
void Compiler::fgKillDependentAssertionsSingle(unsigned lclNum DEBUGARG(GenTree* tree))
{
/* All dependent assertions are killed here */
ASSERT_TP killed = BitVecOps::MakeCopy(apTraits, GetAssertionDep(lclNum));
if (killed)
{
AssertionIndex index = optAssertionCount;
while (killed && (index > 0))
{
if (BitVecOps::IsMember(apTraits, killed, index - 1))
{
#ifdef DEBUG
AssertionDsc* curAssertion = optGetAssertion(index);
noway_assert((curAssertion->op1.lcl.lclNum == lclNum) ||
((curAssertion->op2.kind == O2K_LCLVAR_COPY) && (curAssertion->op2.lcl.lclNum == lclNum)));
if (verbose)
{
printf("\nThe assignment ");
printTreeID(tree);
printf(" using V%02u removes: ", curAssertion->op1.lcl.lclNum);
optPrintAssertion(curAssertion);
}
#endif
// Remove this bit from the killed mask
BitVecOps::RemoveElemD(apTraits, killed, index - 1);
optAssertionRemove(index);
}
index--;
}
// killed mask should now be zero
noway_assert(BitVecOps::IsEmpty(apTraits, killed));
}
}
//------------------------------------------------------------------------
// fgKillDependentAssertions: Kill all dependent assertions with regard to lclNum.
//
// Arguments:
// lclNum - The varNum of the lclVar for which we're killing assertions.
// tree - (DEBUG only) the tree responsible for killing its assertions.
//
// Notes:
// For structs and struct fields, it will invalidate the children and parent
// respectively.
// Calls fgKillDependentAssertionsSingle to kill the assertions for a single lclVar.
//
void Compiler::fgKillDependentAssertions(unsigned lclNum DEBUGARG(GenTree* tree))
{
LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (varDsc->lvPromoted)
{
noway_assert(varTypeIsStruct(varDsc));
// Kill the field locals.
for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i)
{
fgKillDependentAssertionsSingle(i DEBUGARG(tree));
}
// Kill the struct local itself.
fgKillDependentAssertionsSingle(lclNum DEBUGARG(tree));
}
else if (varDsc->lvIsStructField)
{
// Kill the field local.
fgKillDependentAssertionsSingle(lclNum DEBUGARG(tree));
// Kill the parent struct.
fgKillDependentAssertionsSingle(varDsc->lvParentLcl DEBUGARG(tree));
}
else
{
fgKillDependentAssertionsSingle(lclNum DEBUGARG(tree));
}
}
/*****************************************************************************
*
* This function is called to complete the morphing of a tree node
* It should only be called once for each node.
* If DEBUG is defined the flag GTF_DEBUG_NODE_MORPHED is checked and updated,
* to enforce the invariant that each node is only morphed once.
* If local assertion prop is enabled the result tree may be replaced
* by an equivalent tree.
*
*/
void Compiler::fgMorphTreeDone(GenTree* tree,
GenTree* oldTree /* == NULL */
DEBUGARG(int morphNum))
{
#ifdef DEBUG
if (verbose && treesBeforeAfterMorph)
{
printf("\nfgMorphTree (after %d):\n", morphNum);
gtDispTree(tree);
printf(""); // in our logic this causes a flush
}
#endif
if (!fgGlobalMorph)
{
return;
}
if ((oldTree != nullptr) && (oldTree != tree))
{
/* Ensure that we have morphed this node */
assert((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) && "ERROR: Did not morph this node!");
#ifdef DEBUG
TransferTestDataToNode(oldTree, tree);
#endif
}
else
{
// Ensure that we haven't morphed this node already
assert(((tree->gtDebugFlags & GTF_DEBUG_NODE_MORPHED) == 0) && "ERROR: Already morphed this node!");
}
if (tree->OperIsConst())
{
goto DONE;
}
if (!optLocalAssertionProp)
{
goto DONE;
}
/* Do we have any active assertions? */
if (optAssertionCount > 0)
{
/* Is this an assignment to a local variable */
GenTreeLclVarCommon* lclVarTree = nullptr;
// The check below will miss LIR-style assignments.
//
// But we shouldn't be running local assertion prop on these,
// as local prop gets disabled when we run global prop.
assert(!tree->OperIs(GT_STORE_LCL_VAR, GT_STORE_LCL_FLD));
// DefinesLocal can return true for some BLK op uses, so
// check what gets assigned only when we're at an assignment.
if (tree->OperIsSsaDef() && tree->DefinesLocal(this, &lclVarTree))
{
unsigned lclNum = lclVarTree->GetLclNum();
noway_assert(lclNum < lvaCount);
fgKillDependentAssertions(lclNum DEBUGARG(tree));
}
}
/* If this tree makes a new assertion - make it available */
optAssertionGen(tree);
DONE:;
#ifdef DEBUG
/* Mark this node as being morphed */
tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
#endif
}
//------------------------------------------------------------------------
// fgFoldConditional: try and fold conditionals and optimize BBJ_COND or
// BBJ_SWITCH blocks.
//
// Argumetns:
// block - block to examine
//
// Returns:
// FoldResult indicating what changes were made, if any
//
Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block)
{
FoldResult result = FoldResult::FOLD_DID_NOTHING;
// We don't want to make any code unreachable
//
if (opts.OptimizationDisabled())
{
return result;
}
if (block->bbJumpKind == BBJ_COND)
{
noway_assert(block->bbStmtList != nullptr && block->bbStmtList->GetPrevStmt() != nullptr);
Statement* lastStmt = block->lastStmt();
noway_assert(lastStmt->GetNextStmt() == nullptr);
if (lastStmt->GetRootNode()->gtOper == GT_CALL)
{
noway_assert(fgRemoveRestOfBlock);
// Unconditional throw - transform the basic block into a BBJ_THROW
//
fgConvertBBToThrowBB(block);
result = FoldResult::FOLD_CHANGED_CONTROL_FLOW;
JITDUMP("\nConditional folded at " FMT_BB "\n", block->bbNum);
JITDUMP(FMT_BB " becomes a BBJ_THROW\n", block->bbNum);
return result;
}
noway_assert(lastStmt->GetRootNode()->gtOper == GT_JTRUE);
/* Did we fold the conditional */
noway_assert(lastStmt->GetRootNode()->AsOp()->gtOp1);
GenTree* condTree;
condTree = lastStmt->GetRootNode()->AsOp()->gtOp1;
GenTree* cond;
cond = condTree->gtEffectiveVal(true);
if (cond->OperIsConst())
{
/* Yupee - we folded the conditional!
* Remove the conditional statement */
noway_assert(cond->gtOper == GT_CNS_INT);
noway_assert((block->bbNext->countOfInEdges() > 0) && (block->bbJumpDest->countOfInEdges() > 0));
if (condTree != cond)
{
// Preserve any side effects
assert(condTree->OperIs(GT_COMMA));
lastStmt->SetRootNode(condTree);
result = FoldResult::FOLD_ALTERED_LAST_STMT;
}
else
{
// no side effects, remove the jump entirely
fgRemoveStmt(block, lastStmt);
result = FoldResult::FOLD_REMOVED_LAST_STMT;
}
// block is a BBJ_COND that we are folding the conditional for.
// bTaken is the path that will always be taken from block.
// bNotTaken is the path that will never be taken from block.
//
BasicBlock* bTaken;
BasicBlock* bNotTaken;
if (cond->AsIntCon()->gtIconVal != 0)
{
/* JTRUE 1 - transform the basic block into a BBJ_ALWAYS */
block->bbJumpKind = BBJ_ALWAYS;
bTaken = block->bbJumpDest;
bNotTaken = block->bbNext;
}
else
{
/* Unmark the loop if we are removing a backwards branch */
/* dest block must also be marked as a loop head and */
/* We must be able to reach the backedge block */
if ((block->bbJumpDest->isLoopHead()) && (block->bbJumpDest->bbNum <= block->bbNum) &&
fgReachable(block->bbJumpDest, block))
{
optUnmarkLoopBlocks(block->bbJumpDest, block);
}
/* JTRUE 0 - transform the basic block into a BBJ_NONE */
block->bbJumpKind = BBJ_NONE;
bTaken = block->bbNext;
bNotTaken = block->bbJumpDest;
}
if (fgHaveValidEdgeWeights)
{
// We are removing an edge from block to bNotTaken
// and we have already computed the edge weights, so
// we will try to adjust some of the weights
//
flowList* edgeTaken = fgGetPredForBlock(bTaken, block);
BasicBlock* bUpdated = nullptr; // non-NULL if we updated the weight of an internal block
// We examine the taken edge (block -> bTaken)
// if block has valid profile weight and bTaken does not we try to adjust bTaken's weight
// else if bTaken has valid profile weight and block does not we try to adjust block's weight
// We can only adjust the block weights when (the edge block -> bTaken) is the only edge into bTaken
//
if (block->hasProfileWeight())
{
// The edge weights for (block -> bTaken) are 100% of block's weight
edgeTaken->setEdgeWeights(block->bbWeight, block->bbWeight, bTaken);
if (!bTaken->hasProfileWeight())
{
if ((bTaken->countOfInEdges() == 1) || (bTaken->bbWeight < block->bbWeight))
{
// Update the weight of bTaken
bTaken->inheritWeight(block);
bUpdated = bTaken;
}
}
}
else if (bTaken->hasProfileWeight())
{
if (bTaken->countOfInEdges() == 1)
{
// There is only one in edge to bTaken
edgeTaken->setEdgeWeights(bTaken->bbWeight, bTaken->bbWeight, bTaken);
// Update the weight of block
block->inheritWeight(bTaken);
bUpdated = block;
}
}
if (bUpdated != nullptr)
{
weight_t newMinWeight;
weight_t newMaxWeight;
flowList* edge;
// Now fix the weights of the edges out of 'bUpdated'
switch (bUpdated->bbJumpKind)
{
case BBJ_NONE:
edge = fgGetPredForBlock(bUpdated->bbNext, bUpdated);
newMaxWeight = bUpdated->bbWeight;
newMinWeight = min(edge->edgeWeightMin(), newMaxWeight);
edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->bbNext);
break;
case BBJ_COND:
edge = fgGetPredForBlock(bUpdated->bbNext, bUpdated);
newMaxWeight = bUpdated->bbWeight;
newMinWeight = min(edge->edgeWeightMin(), newMaxWeight);
edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->bbNext);
FALLTHROUGH;
case BBJ_ALWAYS:
edge = fgGetPredForBlock(bUpdated->bbJumpDest, bUpdated);
newMaxWeight = bUpdated->bbWeight;
newMinWeight = min(edge->edgeWeightMin(), newMaxWeight);
edge->setEdgeWeights(newMinWeight, newMaxWeight, bUpdated->bbNext);
break;
default:
// We don't handle BBJ_SWITCH
break;
}
}
}
/* modify the flow graph */
/* Remove 'block' from the predecessor list of 'bNotTaken' */
fgRemoveRefPred(bNotTaken, block);
#ifdef DEBUG
if (verbose)
{
printf("\nConditional folded at " FMT_BB "\n", block->bbNum);
printf(FMT_BB " becomes a %s", block->bbNum,
block->bbJumpKind == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE");
if (block->bbJumpKind == BBJ_ALWAYS)
{
printf(" to " FMT_BB, block->bbJumpDest->bbNum);
}
printf("\n");
}
#endif
/* if the block was a loop condition we may have to modify
* the loop table */
for (unsigned loopNum = 0; loopNum < optLoopCount; loopNum++)
{
/* Some loops may have been already removed by
* loop unrolling or conditional folding */
if (optLoopTable[loopNum].lpFlags & LPFLG_REMOVED)
{
continue;
}
/* We are only interested in the loop bottom */
if (optLoopTable[loopNum].lpBottom == block)
{
if (cond->AsIntCon()->gtIconVal == 0)
{
/* This was a bogus loop (condition always false)
* Remove the loop from the table */
optMarkLoopRemoved(loopNum);
optLoopTable[loopNum].lpTop->unmarkLoopAlign(this DEBUG_ARG("Bogus loop"));
#ifdef DEBUG
if (verbose)
{
printf("Removing loop " FMT_LP " (from " FMT_BB " to " FMT_BB ")\n\n", loopNum,
optLoopTable[loopNum].lpTop->bbNum, optLoopTable[loopNum].lpBottom->bbNum);
}
#endif
}
}
}
}
}
else if (block->bbJumpKind == BBJ_SWITCH)
{
noway_assert(block->bbStmtList != nullptr && block->bbStmtList->GetPrevStmt() != nullptr);
Statement* lastStmt = block->lastStmt();
noway_assert(lastStmt->GetNextStmt() == nullptr);
if (lastStmt->GetRootNode()->gtOper == GT_CALL)
{
noway_assert(fgRemoveRestOfBlock);
// Unconditional throw - transform the basic block into a BBJ_THROW
//
fgConvertBBToThrowBB(block);
result = FoldResult::FOLD_CHANGED_CONTROL_FLOW;
JITDUMP("\nConditional folded at " FMT_BB "\n", block->bbNum);
JITDUMP(FMT_BB " becomes a BBJ_THROW\n", block->bbNum);
return result;
}
noway_assert(lastStmt->GetRootNode()->gtOper == GT_SWITCH);
/* Did we fold the conditional */
noway_assert(lastStmt->GetRootNode()->AsOp()->gtOp1);
GenTree* condTree;
condTree = lastStmt->GetRootNode()->AsOp()->gtOp1;
GenTree* cond;
cond = condTree->gtEffectiveVal(true);
if (cond->OperIsConst())
{
/* Yupee - we folded the conditional!
* Remove the conditional statement */
noway_assert(cond->gtOper == GT_CNS_INT);
if (condTree != cond)
{
// Preserve any side effects
assert(condTree->OperIs(GT_COMMA));
lastStmt->SetRootNode(condTree);
result = FoldResult::FOLD_ALTERED_LAST_STMT;
}
else
{
// no side effects, remove the switch entirely
fgRemoveStmt(block, lastStmt);
result = FoldResult::FOLD_REMOVED_LAST_STMT;
}
/* modify the flow graph */
/* Find the actual jump target */
unsigned switchVal;
switchVal = (unsigned)cond->AsIntCon()->gtIconVal;
unsigned jumpCnt;
jumpCnt = block->bbJumpSwt->bbsCount;
BasicBlock** jumpTab;
jumpTab = block->bbJumpSwt->bbsDstTab;
bool foundVal;
foundVal = false;
for (unsigned val = 0; val < jumpCnt; val++, jumpTab++)
{
BasicBlock* curJump = *jumpTab;
assert(curJump->countOfInEdges() > 0);
// If val matches switchVal or we are at the last entry and
// we never found the switch value then set the new jump dest
if ((val == switchVal) || (!foundVal && (val == jumpCnt - 1)))
{
if (curJump != block->bbNext)
{
/* transform the basic block into a BBJ_ALWAYS */
block->bbJumpKind = BBJ_ALWAYS;
block->bbJumpDest = curJump;
}
else
{
/* transform the basic block into a BBJ_NONE */
block->bbJumpKind = BBJ_NONE;
}
foundVal = true;
}
else
{
/* Remove 'block' from the predecessor list of 'curJump' */
fgRemoveRefPred(curJump, block);
}
}
assert(foundVal);
#ifdef DEBUG
if (verbose)
{
printf("\nConditional folded at " FMT_BB "\n", block->bbNum);
printf(FMT_BB " becomes a %s", block->bbNum,
block->bbJumpKind == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE");
if (block->bbJumpKind == BBJ_ALWAYS)
{
printf(" to " FMT_BB, block->bbJumpDest->bbNum);
}
printf("\n");
}
#endif
}
}
return result;
}
//------------------------------------------------------------------------
// fgMorphBlockStmt: morph a single statement in a block.
//
// Arguments:
// block - block containing the statement
// stmt - statement to morph
// msg - string to identify caller in a dump
//
// Returns:
// true if 'stmt' was removed from the block.
// s false if 'stmt' is still in the block (even if other statements were removed).
//
// Notes:
// Can be called anytime, unlike fgMorphStmts() which should only be called once.
//
bool Compiler::fgMorphBlockStmt(BasicBlock* block, Statement* stmt DEBUGARG(const char* msg))
{
assert(block != nullptr);
assert(stmt != nullptr);
// Reset some ambient state
fgRemoveRestOfBlock = false;
compCurBB = block;
compCurStmt = stmt;
GenTree* morph = fgMorphTree(stmt->GetRootNode());
// Bug 1106830 - During the CSE phase we can't just remove
// morph->AsOp()->gtOp2 as it could contain CSE expressions.
// This leads to a noway_assert in OptCSE.cpp when
// searching for the removed CSE ref. (using gtFindLink)
//
if (!optValnumCSE_phase)
{
// Check for morph as a GT_COMMA with an unconditional throw
if (fgIsCommaThrow(morph, true))
{
#ifdef DEBUG
if (verbose)
{
printf("Folding a top-level fgIsCommaThrow stmt\n");
printf("Removing op2 as unreachable:\n");
gtDispTree(morph->AsOp()->gtOp2);
printf("\n");
}
#endif
// Use the call as the new stmt
morph = morph->AsOp()->gtOp1;
noway_assert(morph->gtOper == GT_CALL);
}
// we can get a throw as a statement root
if (fgIsThrow(morph))
{
#ifdef DEBUG
if (verbose)
{
printf("We have a top-level fgIsThrow stmt\n");
printf("Removing the rest of block as unreachable:\n");
}
#endif
noway_assert((morph->gtFlags & GTF_COLON_COND) == 0);
fgRemoveRestOfBlock = true;
}
}
stmt->SetRootNode(morph);
// Can the entire tree be removed?
bool removedStmt = false;
// Defer removing statements during CSE so we don't inadvertently remove any CSE defs.
if (!optValnumCSE_phase)
{
removedStmt = fgCheckRemoveStmt(block, stmt);
}
// Or this is the last statement of a conditional branch that was just folded?
if (!removedStmt && (stmt->GetNextStmt() == nullptr) && !fgRemoveRestOfBlock)
{
FoldResult const fr = fgFoldConditional(block);
removedStmt = (fr == FoldResult::FOLD_REMOVED_LAST_STMT);
}
if (!removedStmt)
{
// Have to re-do the evaluation order since for example some later code does not expect constants as op1
gtSetStmtInfo(stmt);
// Have to re-link the nodes for this statement
fgSetStmtSeq(stmt);
}
#ifdef DEBUG
if (verbose)
{
printf("%s %s tree:\n", msg, (removedStmt ? "removed" : "morphed"));
gtDispTree(morph);
printf("\n");
}
#endif
if (fgRemoveRestOfBlock)
{
// Remove the rest of the stmts in the block
for (Statement* removeStmt : StatementList(stmt->GetNextStmt()))
{
fgRemoveStmt(block, removeStmt);
}
// The rest of block has been removed and we will always throw an exception.
//
// For compDbgCode, we prepend an empty BB as the firstBB, it is BBJ_NONE.
// We should not convert it to a ThrowBB.
if ((block != fgFirstBB) || ((fgFirstBB->bbFlags & BBF_INTERNAL) == 0))
{
// Convert block to a throw bb
fgConvertBBToThrowBB(block);
}
#ifdef DEBUG
if (verbose)
{
printf("\n%s Block " FMT_BB " becomes a throw block.\n", msg, block->bbNum);
}
#endif
fgRemoveRestOfBlock = false;
}
return removedStmt;
}
/*****************************************************************************
*
* Morph the statements of the given block.
* This function should be called just once for a block. Use fgMorphBlockStmt()
* for reentrant calls.
*/
void Compiler::fgMorphStmts(BasicBlock* block)
{
fgRemoveRestOfBlock = false;
fgCurrentlyInUseArgTemps = hashBv::Create(this);
for (Statement* const stmt : block->Statements())
{
if (fgRemoveRestOfBlock)
{
fgRemoveStmt(block, stmt);
continue;
}
#ifdef FEATURE_SIMD
if (opts.OptimizationEnabled() && stmt->GetRootNode()->TypeGet() == TYP_FLOAT &&
stmt->GetRootNode()->OperGet() == GT_ASG)
{
fgMorphCombineSIMDFieldAssignments(block, stmt);
}
#endif
fgMorphStmt = stmt;
compCurStmt = stmt;
GenTree* oldTree = stmt->GetRootNode();
#ifdef DEBUG
unsigned oldHash = verbose ? gtHashValue(oldTree) : DUMMY_INIT(~0);
if (verbose)
{
printf("\nfgMorphTree " FMT_BB ", " FMT_STMT " (before)\n", block->bbNum, stmt->GetID());
gtDispTree(oldTree);
}
#endif
/* Morph this statement tree */
GenTree* morphedTree = fgMorphTree(oldTree);
// mark any outgoing arg temps as free so we can reuse them in the next statement.
fgCurrentlyInUseArgTemps->ZeroAll();
// Has fgMorphStmt been sneakily changed ?
if ((stmt->GetRootNode() != oldTree) || (block != compCurBB))
{
if (stmt->GetRootNode() != oldTree)
{
/* This must be tailcall. Ignore 'morphedTree' and carry on with
the tail-call node */
morphedTree = stmt->GetRootNode();
}
else
{
/* This must be a tailcall that caused a GCPoll to get
injected. We haven't actually morphed the call yet
but the flag still got set, clear it here... */
CLANG_FORMAT_COMMENT_ANCHOR;
#ifdef DEBUG
morphedTree->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED;
#endif
}
noway_assert(compTailCallUsed);
noway_assert(morphedTree->gtOper == GT_CALL);
GenTreeCall* call = morphedTree->AsCall();
// Could be
// - a fast call made as jmp in which case block will be ending with
// BBJ_RETURN (as we need epilog) and marked as containing a jmp.
// - a tailcall dispatched via JIT helper, on x86, in which case
// block will be ending with BBJ_THROW.
// - a tail call dispatched via runtime help (IL stubs), in which
// case there will not be any tailcall and the block will be ending
// with BBJ_RETURN (as normal control flow)
noway_assert((call->IsFastTailCall() && (compCurBB->bbJumpKind == BBJ_RETURN) &&
((compCurBB->bbFlags & BBF_HAS_JMP)) != 0) ||
(call->IsTailCallViaJitHelper() && (compCurBB->bbJumpKind == BBJ_THROW)) ||
(!call->IsTailCall() && (compCurBB->bbJumpKind == BBJ_RETURN)));
}
#ifdef DEBUG
if (compStressCompile(STRESS_CLONE_EXPR, 30))
{
// Clone all the trees to stress gtCloneExpr()
if (verbose)
{
printf("\nfgMorphTree (stressClone from):\n");
gtDispTree(morphedTree);
}
morphedTree = gtCloneExpr(morphedTree);
noway_assert(morphedTree != nullptr);
if (verbose)
{
printf("\nfgMorphTree (stressClone to):\n");
gtDispTree(morphedTree);
}
}
/* If the hash value changes. we modified the tree during morphing */
if (verbose)
{
unsigned newHash = gtHashValue(morphedTree);
if (newHash != oldHash)
{
printf("\nfgMorphTree " FMT_BB ", " FMT_STMT " (after)\n", block->bbNum, stmt->GetID());
gtDispTree(morphedTree);
}
}
#endif
/* Check for morphedTree as a GT_COMMA with an unconditional throw */
if (!gtIsActiveCSE_Candidate(morphedTree) && fgIsCommaThrow(morphedTree, true))
{
/* Use the call as the new stmt */
morphedTree = morphedTree->AsOp()->gtOp1;
noway_assert(morphedTree->gtOper == GT_CALL);
noway_assert((morphedTree->gtFlags & GTF_COLON_COND) == 0);
fgRemoveRestOfBlock = true;
}
stmt->SetRootNode(morphedTree);
if (fgRemoveRestOfBlock)
{
continue;
}
/* Has the statement been optimized away */
if (fgCheckRemoveStmt(block, stmt))
{
continue;
}
/* Check if this block ends with a conditional branch that can be folded */
if (fgFoldConditional(block) != FoldResult::FOLD_DID_NOTHING)
{
continue;
}
if (ehBlockHasExnFlowDsc(block))
{
continue;
}
}
if (fgRemoveRestOfBlock)
{
if ((block->bbJumpKind == BBJ_COND) || (block->bbJumpKind == BBJ_SWITCH))
{
Statement* first = block->firstStmt();
noway_assert(first);
Statement* lastStmt = block->lastStmt();
noway_assert(lastStmt && lastStmt->GetNextStmt() == nullptr);
GenTree* last = lastStmt->GetRootNode();
if (((block->bbJumpKind == BBJ_COND) && (last->gtOper == GT_JTRUE)) ||
((block->bbJumpKind == BBJ_SWITCH) && (last->gtOper == GT_SWITCH)))
{
GenTree* op1 = last->AsOp()->gtOp1;
if (op1->OperIsCompare())
{
/* Unmark the comparison node with GTF_RELOP_JMP_USED */
op1->gtFlags &= ~GTF_RELOP_JMP_USED;
}
lastStmt->SetRootNode(fgMorphTree(op1));
}
}
/* Mark block as a BBJ_THROW block */
fgConvertBBToThrowBB(block);
}
#if FEATURE_FASTTAILCALL
GenTree* recursiveTailCall = nullptr;
if (block->endsWithTailCallConvertibleToLoop(this, &recursiveTailCall))
{
fgMorphRecursiveFastTailCallIntoLoop(block, recursiveTailCall->AsCall());
}
#endif
// Reset this back so that it doesn't leak out impacting other blocks
fgRemoveRestOfBlock = false;
}
/*****************************************************************************
*
* Morph the blocks of the method.
* Returns true if the basic block list is modified.
* This function should be called just once.
*/
void Compiler::fgMorphBlocks()
{
#ifdef DEBUG
if (verbose)
{
printf("\n*************** In fgMorphBlocks()\n");
}
#endif
/* Since fgMorphTree can be called after various optimizations to re-arrange
* the nodes we need a global flag to signal if we are during the one-pass
* global morphing */
fgGlobalMorph = true;
//
// Local assertion prop is enabled if we are optimized
//
optLocalAssertionProp = opts.OptimizationEnabled();
if (optLocalAssertionProp)
{
//
// Initialize for local assertion prop
//
optAssertionInit(true);
}
if (!compEnregLocals())
{
// Morph is checking if lvDoNotEnregister is already set for some optimizations.
// If we are running without `CLFLG_REGVAR` flag set (`compEnregLocals() == false`)
// then we already know that we won't enregister any locals and it is better to set
// this flag before we start reading it.
// The main reason why this flag is not set is that we are running in minOpts.
lvSetMinOptsDoNotEnreg();
}
/*-------------------------------------------------------------------------
* Process all basic blocks in the function
*/
BasicBlock* block = fgFirstBB;
noway_assert(block);
do
{
#ifdef DEBUG
if (verbose)
{
printf("\nMorphing " FMT_BB " of '%s'\n", block->bbNum, info.compFullName);
}
#endif
if (optLocalAssertionProp)
{
//
// Clear out any currently recorded assertion candidates
// before processing each basic block,
// also we must handle QMARK-COLON specially
//
optAssertionReset(0);
}
// Make the current basic block address available globally.
compCurBB = block;
// Process all statement trees in the basic block.
fgMorphStmts(block);
// Do we need to merge the result of this block into a single return block?
if ((block->bbJumpKind == BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0))
{
if ((genReturnBB != nullptr) && (genReturnBB != block))
{
fgMergeBlockReturn(block);
}
}
block = block->bbNext;
} while (block != nullptr);
// We are done with the global morphing phase
fgGlobalMorph = false;
compCurBB = nullptr;
// Under OSR, we no longer need to specially protect the original method entry
//
if (opts.IsOSR() && (fgEntryBB != nullptr) && (fgEntryBB->bbFlags & BBF_IMPORTED))
{
JITDUMP("OSR: un-protecting original method entry " FMT_BB "\n", fgEntryBB->bbNum);
assert(fgEntryBB->bbRefs > 0);
fgEntryBB->bbRefs--;
// We don't need to remember this block anymore.
fgEntryBB = nullptr;
}
#ifdef DEBUG
if (verboseTrees)
{
fgDispBasicBlocks(true);
}
#endif
}
//------------------------------------------------------------------------
// fgMergeBlockReturn: assign the block return value (if any) into the single return temp
// and branch to the single return block.
//
// Arguments:
// block - the block to process.
//
// Notes:
// A block is not guaranteed to have a last stmt if its jump kind is BBJ_RETURN.
// For example a method returning void could have an empty block with jump kind BBJ_RETURN.
// Such blocks do materialize as part of in-lining.
//
// A block with jump kind BBJ_RETURN does not necessarily need to end with GT_RETURN.
// It could end with a tail call or rejected tail call or monitor.exit or a GT_INTRINSIC.
// For now it is safe to explicitly check whether last stmt is GT_RETURN if genReturnLocal
// is BAD_VAR_NUM.
//
void Compiler::fgMergeBlockReturn(BasicBlock* block)
{
assert((block->bbJumpKind == BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0));
assert((genReturnBB != nullptr) && (genReturnBB != block));
// TODO: Need to characterize the last top level stmt of a block ending with BBJ_RETURN.
Statement* lastStmt = block->lastStmt();
GenTree* ret = (lastStmt != nullptr) ? lastStmt->GetRootNode() : nullptr;
if ((ret != nullptr) && (ret->OperGet() == GT_RETURN) && ((ret->gtFlags & GTF_RET_MERGED) != 0))
{
// This return was generated during epilog merging, so leave it alone
}
else
{
// We'll jump to the genReturnBB.
CLANG_FORMAT_COMMENT_ANCHOR;
#if !defined(TARGET_X86)
if (info.compFlags & CORINFO_FLG_SYNCH)
{
fgConvertSyncReturnToLeave(block);
}
else
#endif // !TARGET_X86
{
block->bbJumpKind = BBJ_ALWAYS;
block->bbJumpDest = genReturnBB;
fgAddRefPred(genReturnBB, block);
fgReturnCount--;
}
if (genReturnLocal != BAD_VAR_NUM)
{
// replace the GT_RETURN node to be a GT_ASG that stores the return value into genReturnLocal.
// Method must be returning a value other than TYP_VOID.
noway_assert(compMethodHasRetVal());
// This block must be ending with a GT_RETURN
noway_assert(lastStmt != nullptr);
noway_assert(lastStmt->GetNextStmt() == nullptr);
noway_assert(ret != nullptr);
// GT_RETURN must have non-null operand as the method is returning the value assigned to
// genReturnLocal
noway_assert(ret->OperGet() == GT_RETURN);
noway_assert(ret->gtGetOp1() != nullptr);
Statement* pAfterStatement = lastStmt;
const DebugInfo& di = lastStmt->GetDebugInfo();
GenTree* tree = gtNewTempAssign(genReturnLocal, ret->gtGetOp1(), &pAfterStatement, di, block);
if (tree->OperIsCopyBlkOp())
{
tree = fgMorphCopyBlock(tree);
}
else if (tree->OperIsInitBlkOp())
{
tree = fgMorphInitBlock(tree);
}
if (pAfterStatement == lastStmt)
{
lastStmt->SetRootNode(tree);
}
else
{
// gtNewTempAssign inserted additional statements after last
fgRemoveStmt(block, lastStmt);
Statement* newStmt = gtNewStmt(tree, di);
fgInsertStmtAfter(block, pAfterStatement, newStmt);
lastStmt = newStmt;
}
}
else if (ret != nullptr && ret->OperGet() == GT_RETURN)
{
// This block ends with a GT_RETURN
noway_assert(lastStmt != nullptr);
noway_assert(lastStmt->GetNextStmt() == nullptr);
// Must be a void GT_RETURN with null operand; delete it as this block branches to oneReturn
// block
noway_assert(ret->TypeGet() == TYP_VOID);
noway_assert(ret->gtGetOp1() == nullptr);
fgRemoveStmt(block, lastStmt);
}
JITDUMP("\nUpdate " FMT_BB " to jump to common return block.\n", block->bbNum);
DISPBLOCK(block);
if (block->hasProfileWeight())
{
weight_t const oldWeight = genReturnBB->hasProfileWeight() ? genReturnBB->bbWeight : BB_ZERO_WEIGHT;
weight_t const newWeight = oldWeight + block->bbWeight;
JITDUMP("merging profile weight " FMT_WT " from " FMT_BB " to common return " FMT_BB "\n", block->bbWeight,
block->bbNum, genReturnBB->bbNum);
genReturnBB->setBBProfileWeight(newWeight);
DISPBLOCK(genReturnBB);
}
}
}
/*****************************************************************************
*
* Make some decisions about the kind of code to generate.
*/
void Compiler::fgSetOptions()
{
#ifdef DEBUG
/* Should we force fully interruptible code ? */
if (JitConfig.JitFullyInt() || compStressCompile(STRESS_GENERIC_VARN, 30))
{
noway_assert(!codeGen->isGCTypeFixed());
SetInterruptible(true);
}
#endif
if (opts.compDbgCode)
{
assert(!codeGen->isGCTypeFixed());
SetInterruptible(true); // debugging is easier this way ...
}
/* Assume we won't need an explicit stack frame if this is allowed */
if (compLocallocUsed)
{
codeGen->setFramePointerRequired(true);
}
#ifdef TARGET_X86
if (compTailCallUsed)
codeGen->setFramePointerRequired(true);
#endif // TARGET_X86
if (!opts.genFPopt)
{
codeGen->setFramePointerRequired(true);
}
// Assert that the EH table has been initialized by now. Note that
// compHndBBtabAllocCount never decreases; it is a high-water mark
// of table allocation. In contrast, compHndBBtabCount does shrink
// if we delete a dead EH region, and if it shrinks to zero, the
// table pointer compHndBBtab is unreliable.
assert(compHndBBtabAllocCount >= info.compXcptnsCount);
#ifdef TARGET_X86
// Note: this case, and the !X86 case below, should both use the
// !X86 path. This would require a few more changes for X86 to use
// compHndBBtabCount (the current number of EH clauses) instead of
// info.compXcptnsCount (the number of EH clauses in IL), such as
// in ehNeedsShadowSPslots(). This is because sometimes the IL has
// an EH clause that we delete as statically dead code before we
// get here, leaving no EH clauses left, and thus no requirement
// to use a frame pointer because of EH. But until all the code uses
// the same test, leave info.compXcptnsCount here.
if (info.compXcptnsCount > 0)
{
codeGen->setFramePointerRequiredEH(true);
}
#else // !TARGET_X86
if (compHndBBtabCount > 0)
{
codeGen->setFramePointerRequiredEH(true);
}
#endif // TARGET_X86
#ifdef UNIX_X86_ABI
if (info.compXcptnsCount > 0)
{
assert(!codeGen->isGCTypeFixed());
// Enforce fully interruptible codegen for funclet unwinding
SetInterruptible(true);
}
#endif // UNIX_X86_ABI
if (compMethodRequiresPInvokeFrame())
{
codeGen->setFramePointerRequired(true); // Setup of Pinvoke frame currently requires an EBP style frame
}
if (info.compPublishStubParam)
{
codeGen->setFramePointerRequiredGCInfo(true);
}
if (compIsProfilerHookNeeded())
{
codeGen->setFramePointerRequired(true);
}
if (info.compIsVarArgs)
{
// Code that initializes lvaVarargsBaseOfStkArgs requires this to be EBP relative.
codeGen->setFramePointerRequiredGCInfo(true);
}
if (lvaReportParamTypeArg())
{
codeGen->setFramePointerRequiredGCInfo(true);
}
// printf("method will %s be fully interruptible\n", GetInterruptible() ? " " : "not");
}
/*****************************************************************************/
GenTree* Compiler::fgInitThisClass()
{
noway_assert(!compIsForInlining());
CORINFO_LOOKUP_KIND kind;
info.compCompHnd->getLocationOfThisType(info.compMethodHnd, &kind);
if (!kind.needsRuntimeLookup)
{
return fgGetSharedCCtor(info.compClassHnd);
}
else
{
#ifdef FEATURE_READYTORUN
// Only CoreRT understands CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE. Don't do this on CoreCLR.
if (opts.IsReadyToRun() && IsTargetAbi(CORINFO_CORERT_ABI))
{
CORINFO_RESOLVED_TOKEN resolvedToken;
memset(&resolvedToken, 0, sizeof(resolvedToken));
// We are in a shared method body, but maybe we don't need a runtime lookup after all.
// This covers the case of a generic method on a non-generic type.
if (!(info.compClassAttr & CORINFO_FLG_SHAREDINST))
{
resolvedToken.hClass = info.compClassHnd;
return impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF);
}
// We need a runtime lookup.
GenTree* ctxTree = getRuntimeContextTree(kind.runtimeLookupKind);
// CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE with a zeroed out resolvedToken means "get the static
// base of the class that owns the method being compiled". If we're in this method, it means we're not
// inlining and there's no ambiguity.
return impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, TYP_BYREF,
gtNewCallArgs(ctxTree), &kind);
}
#endif
// Collectible types requires that for shared generic code, if we use the generic context paramter
// that we report it. (This is a conservative approach, we could detect some cases particularly when the
// context parameter is this that we don't need the eager reporting logic.)
lvaGenericsContextInUse = true;
switch (kind.runtimeLookupKind)
{
case CORINFO_LOOKUP_THISOBJ:
{
// This code takes a this pointer; but we need to pass the static method desc to get the right point in
// the hierarchy
GenTree* vtTree = gtNewLclvNode(info.compThisArg, TYP_REF);
vtTree->gtFlags |= GTF_VAR_CONTEXT;
// Vtable pointer of this object
vtTree = gtNewMethodTableLookup(vtTree);
GenTree* methodHnd = gtNewIconEmbMethHndNode(info.compMethodHnd);
return gtNewHelperCallNode(CORINFO_HELP_INITINSTCLASS, TYP_VOID, gtNewCallArgs(vtTree, methodHnd));
}
case CORINFO_LOOKUP_CLASSPARAM:
{
GenTree* vtTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL);
vtTree->gtFlags |= GTF_VAR_CONTEXT;
return gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, gtNewCallArgs(vtTree));
}
case CORINFO_LOOKUP_METHODPARAM:
{
GenTree* methHndTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL);
methHndTree->gtFlags |= GTF_VAR_CONTEXT;
return gtNewHelperCallNode(CORINFO_HELP_INITINSTCLASS, TYP_VOID,
gtNewCallArgs(gtNewIconNode(0), methHndTree));
}
default:
noway_assert(!"Unknown LOOKUP_KIND");
UNREACHABLE();
}
}
}
#ifdef DEBUG
/*****************************************************************************
*
* Tree walk callback to make sure no GT_QMARK nodes are present in the tree,
* except for the allowed ? 1 : 0; pattern.
*/
Compiler::fgWalkResult Compiler::fgAssertNoQmark(GenTree** tree, fgWalkData* data)
{
if ((*tree)->OperGet() == GT_QMARK)
{
fgCheckQmarkAllowedForm(*tree);
}
return WALK_CONTINUE;
}
void Compiler::fgCheckQmarkAllowedForm(GenTree* tree)
{
assert(tree->OperGet() == GT_QMARK);
assert(!"Qmarks beyond morph disallowed.");
}
/*****************************************************************************
*
* Verify that the importer has created GT_QMARK nodes in a way we can
* process them. The following is allowed:
*
* 1. A top level qmark. Top level qmark is of the form:
* a) (bool) ? (void) : (void) OR
* b) V0N = (bool) ? (type) : (type)
*
* 2. Recursion is allowed at the top level, i.e., a GT_QMARK can be a child
* of either op1 of colon or op2 of colon but not a child of any other
* operator.
*/
void Compiler::fgPreExpandQmarkChecks(GenTree* expr)
{
GenTree* topQmark = fgGetTopLevelQmark(expr);
// If the top level Qmark is null, then scan the tree to make sure
// there are no qmarks within it.
if (topQmark == nullptr)
{
fgWalkTreePre(&expr, Compiler::fgAssertNoQmark, nullptr);
}
else
{
// We could probably expand the cond node also, but don't think the extra effort is necessary,
// so let's just assert the cond node of a top level qmark doesn't have further top level qmarks.
fgWalkTreePre(&topQmark->AsOp()->gtOp1, Compiler::fgAssertNoQmark, nullptr);
fgPreExpandQmarkChecks(topQmark->AsOp()->gtOp2->AsOp()->gtOp1);
fgPreExpandQmarkChecks(topQmark->AsOp()->gtOp2->AsOp()->gtOp2);
}
}
#endif // DEBUG
/*****************************************************************************
*
* Get the top level GT_QMARK node in a given "expr", return NULL if such a
* node is not present. If the top level GT_QMARK node is assigned to a
* GT_LCL_VAR, then return the lcl node in ppDst.
*
*/
GenTree* Compiler::fgGetTopLevelQmark(GenTree* expr, GenTree** ppDst /* = NULL */)
{
if (ppDst != nullptr)
{
*ppDst = nullptr;
}
GenTree* topQmark = nullptr;
if (expr->gtOper == GT_QMARK)
{
topQmark = expr;
}
else if (expr->gtOper == GT_ASG && expr->AsOp()->gtOp2->gtOper == GT_QMARK &&
expr->AsOp()->gtOp1->gtOper == GT_LCL_VAR)
{
topQmark = expr->AsOp()->gtOp2;
if (ppDst != nullptr)
{
*ppDst = expr->AsOp()->gtOp1;
}
}
return topQmark;
}
/*********************************************************************************
*
* For a castclass helper call,
* Importer creates the following tree:
* tmp = (op1 == null) ? op1 : ((*op1 == (cse = op2, cse)) ? op1 : helper());
*
* This method splits the qmark expression created by the importer into the
* following blocks: (block, asg, cond1, cond2, helper, remainder)
* Notice that op1 is the result for both the conditions. So we coalesce these
* assignments into a single block instead of two blocks resulting a nested diamond.
*
* +---------->-----------+
* | | |
* ^ ^ v
* | | |
* block-->asg-->cond1--+-->cond2--+-->helper--+-->remainder
*
* We expect to achieve the following codegen:
* mov rsi, rdx tmp = op1 // asgBlock
* test rsi, rsi goto skip if tmp == null ? // cond1Block
* je SKIP
* mov rcx, 0x76543210 cns = op2 // cond2Block
* cmp qword ptr [rsi], rcx goto skip if *tmp == op2
* je SKIP
* call CORINFO_HELP_CHKCASTCLASS_SPECIAL tmp = helper(cns, tmp) // helperBlock
* mov rsi, rax
* SKIP: // remainderBlock
* tmp has the result.
*
*/
void Compiler::fgExpandQmarkForCastInstOf(BasicBlock* block, Statement* stmt)
{
#ifdef DEBUG
if (verbose)
{
printf("\nExpanding CastInstOf qmark in " FMT_BB " (before)\n", block->bbNum);
fgDispBasicBlocks(block, block, true);
}
#endif // DEBUG
GenTree* expr = stmt->GetRootNode();
GenTree* dst = nullptr;
GenTree* qmark = fgGetTopLevelQmark(expr, &dst);
noway_assert(dst != nullptr);
assert(qmark->gtFlags & GTF_QMARK_CAST_INSTOF);
// Get cond, true, false exprs for the qmark.
GenTree* condExpr = qmark->gtGetOp1();
GenTree* trueExpr = qmark->gtGetOp2()->AsColon()->ThenNode();
GenTree* falseExpr = qmark->gtGetOp2()->AsColon()->ElseNode();
// Get cond, true, false exprs for the nested qmark.
GenTree* nestedQmark = falseExpr;
GenTree* cond2Expr;
GenTree* true2Expr;
GenTree* false2Expr;
if (nestedQmark->gtOper == GT_QMARK)
{
cond2Expr = nestedQmark->gtGetOp1();
true2Expr = nestedQmark->gtGetOp2()->AsColon()->ThenNode();
false2Expr = nestedQmark->gtGetOp2()->AsColon()->ElseNode();
}
else
{
// This is a rare case that arises when we are doing minopts and encounter isinst of null
// gtFoldExpr was still is able to optimize away part of the tree (but not all).
// That means it does not match our pattern.
// Rather than write code to handle this case, just fake up some nodes to make it match the common
// case. Synthesize a comparison that is always true, and for the result-on-true, use the
// entire subtree we expected to be the nested question op.
cond2Expr = gtNewOperNode(GT_EQ, TYP_INT, gtNewIconNode(0, TYP_I_IMPL), gtNewIconNode(0, TYP_I_IMPL));
true2Expr = nestedQmark;
false2Expr = gtNewIconNode(0, TYP_I_IMPL);
}
assert(false2Expr->OperGet() == trueExpr->OperGet());
// Create the chain of blocks. See method header comment.
// The order of blocks after this is the following:
// block ... asgBlock ... cond1Block ... cond2Block ... helperBlock ... remainderBlock
//
// We need to remember flags that exist on 'block' that we want to propagate to 'remainderBlock',
// if they are going to be cleared by fgSplitBlockAfterStatement(). We currently only do this only
// for the GC safe point bit, the logic being that if 'block' was marked gcsafe, then surely
// remainderBlock will still be GC safe.
BasicBlockFlags propagateFlags = block->bbFlags & BBF_GC_SAFE_POINT;
BasicBlock* remainderBlock = fgSplitBlockAfterStatement(block, stmt);
fgRemoveRefPred(remainderBlock, block); // We're going to put more blocks between block and remainderBlock.
BasicBlock* helperBlock = fgNewBBafter(BBJ_NONE, block, true);
BasicBlock* cond2Block = fgNewBBafter(BBJ_COND, block, true);
BasicBlock* cond1Block = fgNewBBafter(BBJ_COND, block, true);
BasicBlock* asgBlock = fgNewBBafter(BBJ_NONE, block, true);
remainderBlock->bbFlags |= propagateFlags;
// These blocks are only internal if 'block' is (but they've been set as internal by fgNewBBafter).
// If they're not internal, mark them as imported to avoid asserts about un-imported blocks.
if ((block->bbFlags & BBF_INTERNAL) == 0)
{
helperBlock->bbFlags &= ~BBF_INTERNAL;
cond2Block->bbFlags &= ~BBF_INTERNAL;
cond1Block->bbFlags &= ~BBF_INTERNAL;
asgBlock->bbFlags &= ~BBF_INTERNAL;
helperBlock->bbFlags |= BBF_IMPORTED;
cond2Block->bbFlags |= BBF_IMPORTED;
cond1Block->bbFlags |= BBF_IMPORTED;
asgBlock->bbFlags |= BBF_IMPORTED;
}
// Chain the flow correctly.
fgAddRefPred(asgBlock, block);
fgAddRefPred(cond1Block, asgBlock);
fgAddRefPred(cond2Block, cond1Block);
fgAddRefPred(helperBlock, cond2Block);
fgAddRefPred(remainderBlock, helperBlock);
fgAddRefPred(remainderBlock, cond1Block);
fgAddRefPred(remainderBlock, cond2Block);
cond1Block->bbJumpDest = remainderBlock;
cond2Block->bbJumpDest = remainderBlock;
// Set the weights; some are guesses.
asgBlock->inheritWeight(block);
cond1Block->inheritWeight(block);
cond2Block->inheritWeightPercentage(cond1Block, 50);
helperBlock->inheritWeightPercentage(cond2Block, 50);
// Append cond1 as JTRUE to cond1Block
GenTree* jmpTree = gtNewOperNode(GT_JTRUE, TYP_VOID, condExpr);
Statement* jmpStmt = fgNewStmtFromTree(jmpTree, stmt->GetDebugInfo());
fgInsertStmtAtEnd(cond1Block, jmpStmt);
// Append cond2 as JTRUE to cond2Block
jmpTree = gtNewOperNode(GT_JTRUE, TYP_VOID, cond2Expr);
jmpStmt = fgNewStmtFromTree(jmpTree, stmt->GetDebugInfo());
fgInsertStmtAtEnd(cond2Block, jmpStmt);
// AsgBlock should get tmp = op1 assignment.
trueExpr = gtNewTempAssign(dst->AsLclVarCommon()->GetLclNum(), trueExpr);
Statement* trueStmt = fgNewStmtFromTree(trueExpr, stmt->GetDebugInfo());
fgInsertStmtAtEnd(asgBlock, trueStmt);
// Since we are adding helper in the JTRUE false path, reverse the cond2 and add the helper.
gtReverseCond(cond2Expr);
GenTree* helperExpr = gtNewTempAssign(dst->AsLclVarCommon()->GetLclNum(), true2Expr);
Statement* helperStmt = fgNewStmtFromTree(helperExpr, stmt->GetDebugInfo());
fgInsertStmtAtEnd(helperBlock, helperStmt);
// Finally remove the nested qmark stmt.
fgRemoveStmt(block, stmt);
if (true2Expr->OperIs(GT_CALL) && (true2Expr->AsCall()->gtCallMoreFlags & GTF_CALL_M_DOES_NOT_RETURN))
{
fgConvertBBToThrowBB(helperBlock);
}
#ifdef DEBUG
if (verbose)
{
printf("\nExpanding CastInstOf qmark in " FMT_BB " (after)\n", block->bbNum);
fgDispBasicBlocks(block, remainderBlock, true);
}
#endif // DEBUG
}
/*****************************************************************************
*
* Expand a statement with a top level qmark node. There are three cases, based
* on whether the qmark has both "true" and "false" arms, or just one of them.
*
* S0;
* C ? T : F;
* S1;
*
* Generates ===>
*
* bbj_always
* +---->------+
* false | |
* S0 -->-- ~C -->-- T F -->-- S1
* | |
* +--->--------+
* bbj_cond(true)
*
* -----------------------------------------
*
* S0;
* C ? T : NOP;
* S1;
*
* Generates ===>
*
* false
* S0 -->-- ~C -->-- T -->-- S1
* | |
* +-->-------------+
* bbj_cond(true)
*
* -----------------------------------------
*
* S0;
* C ? NOP : F;
* S1;
*
* Generates ===>
*
* false
* S0 -->-- C -->-- F -->-- S1
* | |
* +-->------------+
* bbj_cond(true)
*
* If the qmark assigns to a variable, then create tmps for "then"
* and "else" results and assign the temp to the variable as a writeback step.
*/
void Compiler::fgExpandQmarkStmt(BasicBlock* block, Statement* stmt)
{
GenTree* expr = stmt->GetRootNode();
// Retrieve the Qmark node to be expanded.
GenTree* dst = nullptr;
GenTree* qmark = fgGetTopLevelQmark(expr, &dst);
if (qmark == nullptr)
{
return;
}
if (qmark->gtFlags & GTF_QMARK_CAST_INSTOF)
{
fgExpandQmarkForCastInstOf(block, stmt);
return;
}
#ifdef DEBUG
if (verbose)
{
printf("\nExpanding top-level qmark in " FMT_BB " (before)\n", block->bbNum);
fgDispBasicBlocks(block, block, true);
}
#endif // DEBUG
// Retrieve the operands.
GenTree* condExpr = qmark->gtGetOp1();
GenTree* trueExpr = qmark->gtGetOp2()->AsColon()->ThenNode();
GenTree* falseExpr = qmark->gtGetOp2()->AsColon()->ElseNode();
assert(!varTypeIsFloating(condExpr->TypeGet()));
bool hasTrueExpr = (trueExpr->OperGet() != GT_NOP);
bool hasFalseExpr = (falseExpr->OperGet() != GT_NOP);
assert(hasTrueExpr || hasFalseExpr); // We expect to have at least one arm of the qmark!
// Create remainder, cond and "else" blocks. After this, the blocks are in this order:
// block ... condBlock ... elseBlock ... remainderBlock
//
// We need to remember flags that exist on 'block' that we want to propagate to 'remainderBlock',
// if they are going to be cleared by fgSplitBlockAfterStatement(). We currently only do this only
// for the GC safe point bit, the logic being that if 'block' was marked gcsafe, then surely
// remainderBlock will still be GC safe.
BasicBlockFlags propagateFlags = block->bbFlags & BBF_GC_SAFE_POINT;
BasicBlock* remainderBlock = fgSplitBlockAfterStatement(block, stmt);
fgRemoveRefPred(remainderBlock, block); // We're going to put more blocks between block and remainderBlock.
BasicBlock* condBlock = fgNewBBafter(BBJ_COND, block, true);
BasicBlock* elseBlock = fgNewBBafter(BBJ_NONE, condBlock, true);
// These blocks are only internal if 'block' is (but they've been set as internal by fgNewBBafter).
// If they're not internal, mark them as imported to avoid asserts about un-imported blocks.
if ((block->bbFlags & BBF_INTERNAL) == 0)
{
condBlock->bbFlags &= ~BBF_INTERNAL;
elseBlock->bbFlags &= ~BBF_INTERNAL;
condBlock->bbFlags |= BBF_IMPORTED;
elseBlock->bbFlags |= BBF_IMPORTED;
}
remainderBlock->bbFlags |= propagateFlags;
condBlock->inheritWeight(block);
fgAddRefPred(condBlock, block);
fgAddRefPred(elseBlock, condBlock);
fgAddRefPred(remainderBlock, elseBlock);
BasicBlock* thenBlock = nullptr;
if (hasTrueExpr && hasFalseExpr)
{
// bbj_always
// +---->------+
// false | |
// S0 -->-- ~C -->-- T F -->-- S1
// | |
// +--->--------+
// bbj_cond(true)
//
gtReverseCond(condExpr);
condBlock->bbJumpDest = elseBlock;
thenBlock = fgNewBBafter(BBJ_ALWAYS, condBlock, true);
thenBlock->bbJumpDest = remainderBlock;
if ((block->bbFlags & BBF_INTERNAL) == 0)
{
thenBlock->bbFlags &= ~BBF_INTERNAL;
thenBlock->bbFlags |= BBF_IMPORTED;
}
fgAddRefPred(thenBlock, condBlock);
fgAddRefPred(remainderBlock, thenBlock);
thenBlock->inheritWeightPercentage(condBlock, 50);
elseBlock->inheritWeightPercentage(condBlock, 50);
}
else if (hasTrueExpr)
{
// false
// S0 -->-- ~C -->-- T -->-- S1
// | |
// +-->-------------+
// bbj_cond(true)
//
gtReverseCond(condExpr);
condBlock->bbJumpDest = remainderBlock;
fgAddRefPred(remainderBlock, condBlock);
// Since we have no false expr, use the one we'd already created.
thenBlock = elseBlock;
elseBlock = nullptr;
thenBlock->inheritWeightPercentage(condBlock, 50);
}
else if (hasFalseExpr)
{
// false
// S0 -->-- C -->-- F -->-- S1
// | |
// +-->------------+
// bbj_cond(true)
//
condBlock->bbJumpDest = remainderBlock;
fgAddRefPred(remainderBlock, condBlock);
elseBlock->inheritWeightPercentage(condBlock, 50);
}
GenTree* jmpTree = gtNewOperNode(GT_JTRUE, TYP_VOID, qmark->gtGetOp1());
Statement* jmpStmt = fgNewStmtFromTree(jmpTree, stmt->GetDebugInfo());
fgInsertStmtAtEnd(condBlock, jmpStmt);
// Remove the original qmark statement.
fgRemoveStmt(block, stmt);
// Since we have top level qmarks, we either have a dst for it in which case
// we need to create tmps for true and falseExprs, else just don't bother
// assigning.
unsigned lclNum = BAD_VAR_NUM;
if (dst != nullptr)
{
assert(dst->gtOper == GT_LCL_VAR);
lclNum = dst->AsLclVar()->GetLclNum();
}
else
{
assert(qmark->TypeGet() == TYP_VOID);
}
if (hasTrueExpr)
{
if (dst != nullptr)
{
trueExpr = gtNewTempAssign(lclNum, trueExpr);
}
Statement* trueStmt = fgNewStmtFromTree(trueExpr, stmt->GetDebugInfo());
fgInsertStmtAtEnd(thenBlock, trueStmt);
}
// Assign the falseExpr into the dst or tmp, insert in elseBlock
if (hasFalseExpr)
{
if (dst != nullptr)
{
falseExpr = gtNewTempAssign(lclNum, falseExpr);
}
Statement* falseStmt = fgNewStmtFromTree(falseExpr, stmt->GetDebugInfo());
fgInsertStmtAtEnd(elseBlock, falseStmt);
}
#ifdef DEBUG
if (verbose)
{
printf("\nExpanding top-level qmark in " FMT_BB " (after)\n", block->bbNum);
fgDispBasicBlocks(block, remainderBlock, true);
}
#endif // DEBUG
}
/*****************************************************************************
*
* Expand GT_QMARK nodes from the flow graph into basic blocks.
*
*/
void Compiler::fgExpandQmarkNodes()
{
if (compQmarkUsed)
{
for (BasicBlock* const block : Blocks())
{
for (Statement* const stmt : block->Statements())
{
GenTree* expr = stmt->GetRootNode();
#ifdef DEBUG
fgPreExpandQmarkChecks(expr);
#endif
fgExpandQmarkStmt(block, stmt);
}
}
#ifdef DEBUG
fgPostExpandQmarkChecks();
#endif
}
compQmarkRationalized = true;
}
#ifdef DEBUG
/*****************************************************************************
*
* Make sure we don't have any more GT_QMARK nodes.
*
*/
void Compiler::fgPostExpandQmarkChecks()
{
for (BasicBlock* const block : Blocks())
{
for (Statement* const stmt : block->Statements())
{
GenTree* expr = stmt->GetRootNode();
fgWalkTreePre(&expr, Compiler::fgAssertNoQmark, nullptr);
}
}
}
#endif
/*****************************************************************************
*
* Promoting struct locals
*/
void Compiler::fgPromoteStructs()
{
#ifdef DEBUG
if (verbose)
{
printf("*************** In fgPromoteStructs()\n");
}
#endif // DEBUG
if (!opts.OptEnabled(CLFLG_STRUCTPROMOTE))
{
JITDUMP(" promotion opt flag not enabled\n");
return;
}
if (fgNoStructPromotion)
{
JITDUMP(" promotion disabled by JitNoStructPromotion\n");
return;
}
#if 0
// The code in this #if has been useful in debugging struct promotion issues, by
// enabling selective enablement of the struct promotion optimization according to
// method hash.
#ifdef DEBUG
unsigned methHash = info.compMethodHash();
char* lostr = getenv("structpromohashlo");
unsigned methHashLo = 0;
if (lostr != NULL)
{
sscanf_s(lostr, "%x", &methHashLo);
}
char* histr = getenv("structpromohashhi");
unsigned methHashHi = UINT32_MAX;
if (histr != NULL)
{
sscanf_s(histr, "%x", &methHashHi);
}
if (methHash < methHashLo || methHash > methHashHi)
{
return;
}
else
{
printf("Promoting structs for method %s, hash = 0x%x.\n",
info.compFullName, info.compMethodHash());
printf(""); // in our logic this causes a flush
}
#endif // DEBUG
#endif // 0
if (info.compIsVarArgs)
{
JITDUMP(" promotion disabled because of varargs\n");
return;
}
#ifdef DEBUG
if (verbose)
{
printf("\nlvaTable before fgPromoteStructs\n");
lvaTableDump();
}
#endif // DEBUG
// The lvaTable might grow as we grab temps. Make a local copy here.
unsigned startLvaCount = lvaCount;
//
// Loop through the original lvaTable. Looking for struct locals to be promoted.
//
lvaStructPromotionInfo structPromotionInfo;
bool tooManyLocalsReported = false;
// Clear the structPromotionHelper, since it is used during inlining, at which point it
// may be conservative about looking up SIMD info.
// We don't want to preserve those conservative decisions for the actual struct promotion.
structPromotionHelper->Clear();
for (unsigned lclNum = 0; lclNum < startLvaCount; lclNum++)
{
// Whether this var got promoted
bool promotedVar = false;
LclVarDsc* varDsc = lvaGetDesc(lclNum);
// If we have marked this as lvUsedInSIMDIntrinsic, then we do not want to promote
// its fields. Instead, we will attempt to enregister the entire struct.
if (varDsc->lvIsSIMDType() && (varDsc->lvIsUsedInSIMDIntrinsic() || isOpaqueSIMDLclVar(varDsc)))
{
varDsc->lvRegStruct = true;
}
// Don't promote if we have reached the tracking limit.
else if (lvaHaveManyLocals())
{
// Print the message first time when we detected this condition
if (!tooManyLocalsReported)
{
JITDUMP("Stopped promoting struct fields, due to too many locals.\n");
}
tooManyLocalsReported = true;
}
else if (varTypeIsStruct(varDsc))
{
assert(structPromotionHelper != nullptr);
promotedVar = structPromotionHelper->TryPromoteStructVar(lclNum);
}
if (!promotedVar && varDsc->lvIsSIMDType() && !varDsc->lvFieldAccessed)
{
// Even if we have not used this in a SIMD intrinsic, if it is not being promoted,
// we will treat it as a reg struct.
varDsc->lvRegStruct = true;
}
}
#ifdef DEBUG
if (verbose)
{
printf("\nlvaTable after fgPromoteStructs\n");
lvaTableDump();
}
#endif // DEBUG
}
void Compiler::fgMorphStructField(GenTree* tree, GenTree* parent)
{
noway_assert(tree->OperGet() == GT_FIELD);
GenTreeField* field = tree->AsField();
GenTree* objRef = field->GetFldObj();
GenTree* obj = ((objRef != nullptr) && (objRef->gtOper == GT_ADDR)) ? objRef->AsOp()->gtOp1 : nullptr;
noway_assert((tree->gtFlags & GTF_GLOB_REF) || ((obj != nullptr) && (obj->gtOper == GT_LCL_VAR)));
/* Is this an instance data member? */
if ((obj != nullptr) && (obj->gtOper == GT_LCL_VAR))
{
unsigned lclNum = obj->AsLclVarCommon()->GetLclNum();
const LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (varTypeIsStruct(obj))
{
if (varDsc->lvPromoted)
{
// Promoted struct
unsigned fldOffset = field->gtFldOffset;
unsigned fieldLclIndex = lvaGetFieldLocal(varDsc, fldOffset);
if (fieldLclIndex == BAD_VAR_NUM)
{
// Access a promoted struct's field with an offset that doesn't correspond to any field.
// It can happen if the struct was cast to another struct with different offsets.
return;
}
const LclVarDsc* fieldDsc = lvaGetDesc(fieldLclIndex);
var_types fieldType = fieldDsc->TypeGet();
assert(fieldType != TYP_STRUCT); // promoted LCL_VAR can't have a struct type.
if (tree->TypeGet() != fieldType)
{
if (tree->TypeGet() != TYP_STRUCT)
{
// This is going to be an incorrect instruction promotion.
// For example when we try to read int as long.
return;
}
if (field->gtFldHnd != fieldDsc->lvFieldHnd)
{
CORINFO_CLASS_HANDLE fieldTreeClass = nullptr, fieldDscClass = nullptr;
CorInfoType fieldTreeType = info.compCompHnd->getFieldType(field->gtFldHnd, &fieldTreeClass);
CorInfoType fieldDscType = info.compCompHnd->getFieldType(fieldDsc->lvFieldHnd, &fieldDscClass);
if (fieldTreeType != fieldDscType || fieldTreeClass != fieldDscClass)
{
// Access the promoted field with a different class handle, can't check that types match.
return;
}
// Access the promoted field as a field of a non-promoted struct with the same class handle.
}
else
{
// As we already checked this above, we must have a tree with a TYP_STRUCT type
//
assert(tree->TypeGet() == TYP_STRUCT);
// The field tree accesses it as a struct, but the promoted LCL_VAR field
// says that it has another type. This happens when struct promotion unwraps
// a single field struct to get to its ultimate type.
//
// Note that currently, we cannot have a promoted LCL_VAR field with a struct type.
//
// This mismatch in types can lead to problems for some parent node type like GT_RETURN.
// So we check the parent node and only allow this optimization when we have
// a GT_ADDR or a GT_ASG.
//
// Note that for a GT_ASG we have to do some additional work,
// see below after the SetOper(GT_LCL_VAR)
//
if (!parent->OperIs(GT_ADDR, GT_ASG))
{
// Don't transform other operations such as GT_RETURN
//
return;
}
#ifdef DEBUG
// This is an additional DEBUG-only sanity check
//
assert(structPromotionHelper != nullptr);
structPromotionHelper->CheckRetypedAsScalar(field->gtFldHnd, fieldType);
#endif // DEBUG
}
}
tree->SetOper(GT_LCL_VAR);
tree->AsLclVarCommon()->SetLclNum(fieldLclIndex);
tree->gtType = fieldType;
tree->gtFlags &= GTF_NODE_MASK; // Note: that clears all flags except `GTF_COLON_COND`.
if (parent->gtOper == GT_ASG)
{
// If we are changing the left side of an assignment, we need to set
// these two flags:
//
if (parent->AsOp()->gtOp1 == tree)
{
tree->gtFlags |= GTF_VAR_DEF;
tree->gtFlags |= GTF_DONT_CSE;
}
// Promotion of struct containing struct fields where the field
// is a struct with a single pointer sized scalar type field: in
// this case struct promotion uses the type of the underlying
// scalar field as the type of struct field instead of recursively
// promoting. This can lead to a case where we have a block-asgn
// with its RHS replaced with a scalar type. Mark RHS value as
// DONT_CSE so that assertion prop will not do const propagation.
// The reason this is required is that if RHS of a block-asg is a
// constant, then it is interpreted as init-block incorrectly.
//
// TODO - This can also be avoided if we implement recursive struct
// promotion, tracked by #10019.
if (varTypeIsStruct(parent) && parent->AsOp()->gtOp2 == tree && !varTypeIsStruct(tree))
{
tree->gtFlags |= GTF_DONT_CSE;
}
}
#ifdef DEBUG
if (verbose)
{
printf("Replacing the field in promoted struct with local var V%02u\n", fieldLclIndex);
}
#endif // DEBUG
}
}
else
{
// Normed struct
// A "normed struct" is a struct that the VM tells us is a basic type. This can only happen if
// the struct contains a single element, and that element is 4 bytes (on x64 it can also be 8
// bytes). Normally, the type of the local var and the type of GT_FIELD are equivalent. However,
// there is one extremely rare case where that won't be true. An enum type is a special value type
// that contains exactly one element of a primitive integer type (that, for CLS programs is named
// "value__"). The VM tells us that a local var of that enum type is the primitive type of the
// enum's single field. It turns out that it is legal for IL to access this field using ldflda or
// ldfld. For example:
//
// .class public auto ansi sealed mynamespace.e_t extends [mscorlib]System.Enum
// {
// .field public specialname rtspecialname int16 value__
// .field public static literal valuetype mynamespace.e_t one = int16(0x0000)
// }
// .method public hidebysig static void Main() cil managed
// {
// .locals init (valuetype mynamespace.e_t V_0)
// ...
// ldloca.s V_0
// ldflda int16 mynamespace.e_t::value__
// ...
// }
//
// Normally, compilers will not generate the ldflda, since it is superfluous.
//
// In the example, the lclVar is short, but the JIT promotes all trees using this local to the
// "actual type", that is, INT. But the GT_FIELD is still SHORT. So, in the case of a type
// mismatch like this, don't do this morphing. The local var may end up getting marked as
// address taken, and the appropriate SHORT load will be done from memory in that case.
if (tree->TypeGet() == obj->TypeGet())
{
tree->ChangeOper(GT_LCL_VAR);
tree->AsLclVarCommon()->SetLclNum(lclNum);
tree->gtFlags &= GTF_NODE_MASK;
if ((parent->gtOper == GT_ASG) && (parent->AsOp()->gtOp1 == tree))
{
tree->gtFlags |= GTF_VAR_DEF;
tree->gtFlags |= GTF_DONT_CSE;
}
#ifdef DEBUG
if (verbose)
{
printf("Replacing the field in normed struct with local var V%02u\n", lclNum);
}
#endif // DEBUG
}
}
}
}
void Compiler::fgMorphLocalField(GenTree* tree, GenTree* parent)
{
noway_assert(tree->OperGet() == GT_LCL_FLD);
unsigned lclNum = tree->AsLclFld()->GetLclNum();
LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (varTypeIsStruct(varDsc))
{
if (varDsc->lvPromoted)
{
// Promoted struct
unsigned fldOffset = tree->AsLclFld()->GetLclOffs();
unsigned fieldLclIndex = 0;
LclVarDsc* fldVarDsc = nullptr;
if (fldOffset != BAD_VAR_NUM)
{
fieldLclIndex = lvaGetFieldLocal(varDsc, fldOffset);
noway_assert(fieldLclIndex != BAD_VAR_NUM);
fldVarDsc = lvaGetDesc(fieldLclIndex);
}
var_types treeType = tree->TypeGet();
var_types fieldType = fldVarDsc->TypeGet();
if (fldOffset != BAD_VAR_NUM &&
((genTypeSize(fieldType) == genTypeSize(treeType)) || (varDsc->lvFieldCnt == 1)))
{
// There is an existing sub-field we can use.
tree->AsLclFld()->SetLclNum(fieldLclIndex);
// The field must be an enregisterable type; otherwise it would not be a promoted field.
// The tree type may not match, e.g. for return types that have been morphed, but both
// must be enregisterable types.
assert(varTypeIsEnregisterable(treeType) && varTypeIsEnregisterable(fieldType));
tree->ChangeOper(GT_LCL_VAR);
assert(tree->AsLclVarCommon()->GetLclNum() == fieldLclIndex);
tree->gtType = fldVarDsc->TypeGet();
if ((parent->gtOper == GT_ASG) && (parent->AsOp()->gtOp1 == tree))
{
tree->gtFlags |= GTF_VAR_DEF;
tree->gtFlags |= GTF_DONT_CSE;
}
JITDUMP("Replacing the GT_LCL_FLD in promoted struct with local var V%02u\n", fieldLclIndex);
}
else
{
// There is no existing field that has all the parts that we need
// So we must ensure that the struct lives in memory.
lvaSetVarDoNotEnregister(lclNum DEBUGARG(DoNotEnregisterReason::LocalField));
#ifdef DEBUG
// We can't convert this guy to a float because he really does have his
// address taken..
varDsc->lvKeepType = 1;
#endif // DEBUG
}
}
else if (varTypeIsSIMD(varDsc) && (genTypeSize(tree->TypeGet()) == genTypeSize(varDsc)))
{
assert(tree->AsLclFld()->GetLclOffs() == 0);
tree->gtType = varDsc->TypeGet();
tree->ChangeOper(GT_LCL_VAR);
JITDUMP("Replacing GT_LCL_FLD of struct with local var V%02u\n", lclNum);
}
}
}
//------------------------------------------------------------------------
// fgResetImplicitByRefRefCount: Clear the ref count field of all implicit byrefs
void Compiler::fgResetImplicitByRefRefCount()
{
#if (defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)
#ifdef DEBUG
if (verbose)
{
printf("\n*************** In fgResetImplicitByRefRefCount()\n");
}
#endif // DEBUG
for (unsigned lclNum = 0; lclNum < info.compArgsCount; ++lclNum)
{
LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (varDsc->lvIsImplicitByRef)
{
// Clear the ref count field; fgMarkAddressTakenLocals will increment it per
// appearance of implicit-by-ref param so that call arg morphing can do an
// optimization for single-use implicit-by-ref params whose single use is as
// an outgoing call argument.
varDsc->setLvRefCnt(0, RCS_EARLY);
varDsc->setLvRefCntWtd(0, RCS_EARLY);
}
}
#endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 || TARGET_LOONGARCH64
}
//------------------------------------------------------------------------
// fgRetypeImplicitByRefArgs: Update the types on implicit byref parameters' `LclVarDsc`s (from
// struct to pointer). Also choose (based on address-exposed analysis)
// which struct promotions of implicit byrefs to keep or discard.
// For those which are kept, insert the appropriate initialization code.
// For those which are to be discarded, annotate the promoted field locals
// so that fgMorphImplicitByRefArgs will know to rewrite their appearances
// using indirections off the pointer parameters.
void Compiler::fgRetypeImplicitByRefArgs()
{
#if (defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)
#ifdef DEBUG
if (verbose)
{
printf("\n*************** In fgRetypeImplicitByRefArgs()\n");
}
#endif // DEBUG
for (unsigned lclNum = 0; lclNum < info.compArgsCount; lclNum++)
{
LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (lvaIsImplicitByRefLocal(lclNum))
{
unsigned size;
if (varDsc->lvSize() > REGSIZE_BYTES)
{
size = varDsc->lvSize();
}
else
{
CORINFO_CLASS_HANDLE typeHnd = varDsc->GetStructHnd();
size = info.compCompHnd->getClassSize(typeHnd);
}
if (varDsc->lvPromoted)
{
// This implicit-by-ref was promoted; create a new temp to represent the
// promoted struct before rewriting this parameter as a pointer.
unsigned newLclNum = lvaGrabTemp(false DEBUGARG("Promoted implicit byref"));
lvaSetStruct(newLclNum, lvaGetStruct(lclNum), true);
if (info.compIsVarArgs)
{
lvaSetStructUsedAsVarArg(newLclNum);
}
// Update varDsc since lvaGrabTemp might have re-allocated the var dsc array.
varDsc = lvaGetDesc(lclNum);
// Copy the struct promotion annotations to the new temp.
LclVarDsc* newVarDsc = lvaGetDesc(newLclNum);
newVarDsc->lvPromoted = true;
newVarDsc->lvFieldLclStart = varDsc->lvFieldLclStart;
newVarDsc->lvFieldCnt = varDsc->lvFieldCnt;
newVarDsc->lvContainsHoles = varDsc->lvContainsHoles;
newVarDsc->lvCustomLayout = varDsc->lvCustomLayout;
#ifdef DEBUG
newVarDsc->lvKeepType = true;
#endif // DEBUG
// Propagate address-taken-ness and do-not-enregister-ness.
newVarDsc->SetAddressExposed(varDsc->IsAddressExposed() DEBUGARG(varDsc->GetAddrExposedReason()));
newVarDsc->lvDoNotEnregister = varDsc->lvDoNotEnregister;
newVarDsc->lvLiveInOutOfHndlr = varDsc->lvLiveInOutOfHndlr;
newVarDsc->lvSingleDef = varDsc->lvSingleDef;
newVarDsc->lvSingleDefRegCandidate = varDsc->lvSingleDefRegCandidate;
newVarDsc->lvSpillAtSingleDef = varDsc->lvSpillAtSingleDef;
#ifdef DEBUG
newVarDsc->SetDoNotEnregReason(varDsc->GetDoNotEnregReason());
#endif // DEBUG
// If the promotion is dependent, the promoted temp would just be committed
// to memory anyway, so we'll rewrite its appearances to be indirections
// through the pointer parameter, the same as we'd do for this
// parameter if it weren't promoted at all (otherwise the initialization
// of the new temp would just be a needless memcpy at method entry).
//
// Otherwise, see how many appearances there are. We keep two early ref counts: total
// number of references to the struct or some field, and how many of these are
// arguments to calls. We undo promotion unless we see enough non-call uses.
//
const unsigned totalAppearances = varDsc->lvRefCnt(RCS_EARLY);
const unsigned callAppearances = (unsigned)varDsc->lvRefCntWtd(RCS_EARLY);
assert(totalAppearances >= callAppearances);
const unsigned nonCallAppearances = totalAppearances - callAppearances;
bool undoPromotion = ((lvaGetPromotionType(newVarDsc) == PROMOTION_TYPE_DEPENDENT) ||
(nonCallAppearances <= varDsc->lvFieldCnt));
#ifdef DEBUG
// Above is a profitability heurisic; either value of
// undoPromotion should lead to correct code. So,
// under stress, make different decisions at times.
if (compStressCompile(STRESS_BYREF_PROMOTION, 25))
{
undoPromotion = !undoPromotion;
JITDUMP("Stress -- changing byref undo promotion for V%02u to %s undo\n", lclNum,
undoPromotion ? "" : "NOT");
}
#endif // DEBUG
JITDUMP("%s promotion of implicit by-ref V%02u: %s total: %u non-call: %u fields: %u\n",
undoPromotion ? "Undoing" : "Keeping", lclNum,
(lvaGetPromotionType(newVarDsc) == PROMOTION_TYPE_DEPENDENT) ? "dependent;" : "",
totalAppearances, nonCallAppearances, varDsc->lvFieldCnt);
if (!undoPromotion)
{
// Insert IR that initializes the temp from the parameter.
// LHS is a simple reference to the temp.
fgEnsureFirstBBisScratch();
GenTree* lhs = gtNewLclvNode(newLclNum, varDsc->lvType);
// RHS is an indirection (using GT_OBJ) off the parameter.
GenTree* addr = gtNewLclvNode(lclNum, TYP_BYREF);
GenTree* rhs = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, addr, typGetBlkLayout(size));
GenTree* assign = gtNewAssignNode(lhs, rhs);
fgNewStmtAtBeg(fgFirstBB, assign);
}
// Update the locals corresponding to the promoted fields.
unsigned fieldLclStart = varDsc->lvFieldLclStart;
unsigned fieldCount = varDsc->lvFieldCnt;
unsigned fieldLclStop = fieldLclStart + fieldCount;
for (unsigned fieldLclNum = fieldLclStart; fieldLclNum < fieldLclStop; ++fieldLclNum)
{
LclVarDsc* fieldVarDsc = lvaGetDesc(fieldLclNum);
if (undoPromotion)
{
// Leave lvParentLcl pointing to the parameter so that fgMorphImplicitByRefArgs
// will know to rewrite appearances of this local.
assert(fieldVarDsc->lvParentLcl == lclNum);
}
else
{
// Set the new parent.
fieldVarDsc->lvParentLcl = newLclNum;
}
fieldVarDsc->lvIsParam = false;
// The fields shouldn't inherit any register preferences from
// the parameter which is really a pointer to the struct.
fieldVarDsc->lvIsRegArg = false;
fieldVarDsc->lvIsMultiRegArg = false;
fieldVarDsc->SetArgReg(REG_NA);
#if FEATURE_MULTIREG_ARGS
fieldVarDsc->SetOtherArgReg(REG_NA);
#endif
// Promoted fields of implicit byrefs can't be OSR locals.
//
if (fieldVarDsc->lvIsOSRLocal)
{
assert(opts.IsOSR());
fieldVarDsc->lvIsOSRLocal = false;
}
}
// Hijack lvFieldLclStart to record the new temp number.
// It will get fixed up in fgMarkDemotedImplicitByRefArgs.
varDsc->lvFieldLclStart = newLclNum;
// Go ahead and clear lvFieldCnt -- either we're promoting
// a replacement temp or we're not promoting this arg, and
// in either case the parameter is now a pointer that doesn't
// have these fields.
varDsc->lvFieldCnt = 0;
// Hijack lvPromoted to communicate to fgMorphImplicitByRefArgs
// whether references to the struct should be rewritten as
// indirections off the pointer (not promoted) or references
// to the new struct local (promoted).
varDsc->lvPromoted = !undoPromotion;
}
else
{
// The "undo promotion" path above clears lvPromoted for args that struct
// promotion wanted to promote but that aren't considered profitable to
// rewrite. It hijacks lvFieldLclStart to communicate to
// fgMarkDemotedImplicitByRefArgs that it needs to clean up annotations left
// on such args for fgMorphImplicitByRefArgs to consult in the interim.
// Here we have an arg that was simply never promoted, so make sure it doesn't
// have nonzero lvFieldLclStart, since that would confuse fgMorphImplicitByRefArgs
// and fgMarkDemotedImplicitByRefArgs.
assert(varDsc->lvFieldLclStart == 0);
}
// Since the parameter in this position is really a pointer, its type is TYP_BYREF.
varDsc->lvType = TYP_BYREF;
// Since this previously was a TYP_STRUCT and we have changed it to a TYP_BYREF
// make sure that the following flag is not set as these will force SSA to
// exclude tracking/enregistering these LclVars. (see SsaBuilder::IncludeInSsa)
//
varDsc->lvOverlappingFields = 0; // This flag could have been set, clear it.
// The struct parameter may have had its address taken, but the pointer parameter
// cannot -- any uses of the struct parameter's address are uses of the pointer
// parameter's value, and there's no way for the MSIL to reference the pointer
// parameter's address. So clear the address-taken bit for the parameter.
varDsc->CleanAddressExposed();
varDsc->lvDoNotEnregister = 0;
#ifdef DEBUG
// This should not be converted to a double in stress mode,
// because it is really a pointer
varDsc->lvKeepType = 1;
if (verbose)
{
printf("Changing the lvType for struct parameter V%02d to TYP_BYREF.\n", lclNum);
}
#endif // DEBUG
}
}
#endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 || TARGET_LOONGARCH64
}
//------------------------------------------------------------------------
// fgMarkDemotedImplicitByRefArgs: Clear annotations for any implicit byrefs that struct promotion
// asked to promote. Appearances of these have now been rewritten
// (by fgMorphImplicitByRefArgs) using indirections from the pointer
// parameter or references to the promotion temp, as appropriate.
void Compiler::fgMarkDemotedImplicitByRefArgs()
{
JITDUMP("\n*************** In fgMarkDemotedImplicitByRefArgs()\n");
#if (defined(TARGET_AMD64) && !defined(UNIX_AMD64_ABI)) || defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64)
for (unsigned lclNum = 0; lclNum < info.compArgsCount; lclNum++)
{
LclVarDsc* varDsc = lvaGetDesc(lclNum);
if (lvaIsImplicitByRefLocal(lclNum))
{
JITDUMP("Clearing annotation for V%02d\n", lclNum);
if (varDsc->lvPromoted)
{
// The parameter is simply a pointer now, so clear lvPromoted. It was left set
// by fgRetypeImplicitByRefArgs to communicate to fgMorphImplicitByRefArgs that
// appearances of this arg needed to be rewritten to a new promoted struct local.
varDsc->lvPromoted = false;
// Clear the lvFieldLclStart value that was set by fgRetypeImplicitByRefArgs
// to tell fgMorphImplicitByRefArgs which local is the new promoted struct one.
varDsc->lvFieldLclStart = 0;
}
else if (varDsc->lvFieldLclStart != 0)
{
// We created new temps to represent a promoted struct corresponding to this
// parameter, but decided not to go through with the promotion and have
// rewritten all uses as indirections off the pointer parameter.
// We stashed the pointer to the new struct temp in lvFieldLclStart; make
// note of that and clear the annotation.
unsigned structLclNum = varDsc->lvFieldLclStart;
varDsc->lvFieldLclStart = 0;
// The temp struct is now unused; set flags appropriately so that we
// won't allocate space for it on the stack.
LclVarDsc* structVarDsc = lvaGetDesc(structLclNum);
structVarDsc->CleanAddressExposed();
#ifdef DEBUG
structVarDsc->lvUnusedStruct = true;
structVarDsc->lvUndoneStructPromotion = true;
#endif // DEBUG
unsigned fieldLclStart = structVarDsc->lvFieldLclStart;
unsigned fieldCount = structVarDsc->lvFieldCnt;
unsigned fieldLclStop = fieldLclStart + fieldCount;
for (unsigned fieldLclNum = fieldLclStart; fieldLclNum < fieldLclStop; ++fieldLclNum)
{
JITDUMP("Fixing pointer for field V%02d from V%02d to V%02d\n", fieldLclNum, lclNum, structLclNum);
// Fix the pointer to the parent local.
LclVarDsc* fieldVarDsc = lvaGetDesc(fieldLclNum);
assert(fieldVarDsc->lvParentLcl == lclNum);
fieldVarDsc->lvParentLcl = structLclNum;
// The field local is now unused; set flags appropriately so that
// we won't allocate stack space for it.
fieldVarDsc->CleanAddressExposed();
}
}
}
}
#endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 || TARGET_LOONGARCH64
}
/*****************************************************************************
*
* Morph irregular parameters
* for x64 and ARM64 this means turning them into byrefs, adding extra indirs.
*/
bool Compiler::fgMorphImplicitByRefArgs(GenTree* tree)
{
#if (!defined(TARGET_AMD64) || defined(UNIX_AMD64_ABI)) && !defined(TARGET_ARM64) && !defined(TARGET_LOONGARCH64)
return false;
#else // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 || TARGET_LOONGARCH64
bool changed = false;
// Implicit byref morphing needs to know if the reference to the parameter is a
// child of GT_ADDR or not, so this method looks one level down and does the
// rewrite whenever a child is a reference to an implicit byref parameter.
if (tree->gtOper == GT_ADDR)
{
if (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR)
{
GenTree* morphedTree = fgMorphImplicitByRefArgs(tree, true);
changed = (morphedTree != nullptr);
assert(!changed || (morphedTree == tree));
}
}
else
{
for (GenTree** pTree : tree->UseEdges())
{
GenTree** pTreeCopy = pTree;
GenTree* childTree = *pTree;
if (childTree->gtOper == GT_LCL_VAR)
{
GenTree* newChildTree = fgMorphImplicitByRefArgs(childTree, false);
if (newChildTree != nullptr)
{
changed = true;
*pTreeCopy = newChildTree;
}
}
}
}
return changed;
#endif // (TARGET_AMD64 && !UNIX_AMD64_ABI) || TARGET_ARM64 || TARGET_LOONGARCH64
}
GenTree* Compiler::fgMorphImplicitByRefArgs(GenTree* tree, bool isAddr)
{
assert((tree->gtOper == GT_LCL_VAR) || ((tree->gtOper == GT_ADDR) && (tree->AsOp()->gtOp1->gtOper == GT_LCL_VAR)));
assert(isAddr == (tree->gtOper == GT_ADDR));
GenTree* lclVarTree = isAddr ? tree->AsOp()->gtOp1 : tree;
unsigned lclNum = lclVarTree->AsLclVarCommon()->GetLclNum();
LclVarDsc* lclVarDsc = lvaGetDesc(lclNum);
CORINFO_FIELD_HANDLE fieldHnd;
unsigned fieldOffset = 0;
var_types fieldRefType = TYP_UNKNOWN;
if (lvaIsImplicitByRefLocal(lclNum))
{
// The SIMD transformation to coalesce contiguous references to SIMD vector fields will
// re-invoke the traversal to mark address-taken locals.
// So, we may encounter a tree that has already been transformed to TYP_BYREF.
// If we do, leave it as-is.
if (!varTypeIsStruct(lclVarTree))
{
assert(lclVarTree->TypeGet() == TYP_BYREF);
return nullptr;
}
else if (lclVarDsc->lvPromoted)
{
// fgRetypeImplicitByRefArgs created a new promoted struct local to represent this
// arg. Rewrite this to refer to the new local.
assert(lclVarDsc->lvFieldLclStart != 0);
lclVarTree->AsLclVarCommon()->SetLclNum(lclVarDsc->lvFieldLclStart);
return tree;
}
fieldHnd = nullptr;
}
else if (lclVarDsc->lvIsStructField && lvaIsImplicitByRefLocal(lclVarDsc->lvParentLcl))
{
// This was a field reference to an implicit-by-reference struct parameter that was
// dependently promoted; update it to a field reference off the pointer.
// Grab the field handle from the struct field lclVar.
fieldHnd = lclVarDsc->lvFieldHnd;
fieldOffset = lclVarDsc->lvFldOffset;
assert(fieldHnd != nullptr);
// Update lclNum/lclVarDsc to refer to the parameter
lclNum = lclVarDsc->lvParentLcl;
lclVarDsc = lvaGetDesc(lclNum);
fieldRefType = lclVarTree->TypeGet();
}
else
{
// We only need to tranform the 'marked' implicit by ref parameters
return nullptr;
}
// This is no longer a def of the lclVar, even if it WAS a def of the struct.
lclVarTree->gtFlags &= ~(GTF_LIVENESS_MASK);
if (isAddr)
{
if (fieldHnd == nullptr)
{
// change &X into just plain X
tree->ReplaceWith(lclVarTree, this);
tree->gtType = TYP_BYREF;
}
else
{
// change &(X.f) [i.e. GT_ADDR of local for promoted arg field]
// into &(X, f) [i.e. GT_ADDR of GT_FIELD off ptr param]
lclVarTree->AsLclVarCommon()->SetLclNum(lclNum);
lclVarTree->gtType = TYP_BYREF;
tree->AsOp()->gtOp1 = gtNewFieldRef(fieldRefType, fieldHnd, lclVarTree, fieldOffset);
}
#ifdef DEBUG
if (verbose)
{
printf("Replacing address of implicit by ref struct parameter with byref:\n");
}
#endif // DEBUG
}
else
{
// Change X into OBJ(X) or FIELD(X, f)
var_types structType = tree->gtType;
tree->gtType = TYP_BYREF;
if (fieldHnd)
{
tree->AsLclVarCommon()->SetLclNum(lclNum);
tree = gtNewFieldRef(fieldRefType, fieldHnd, tree, fieldOffset);
}
else
{
tree = gtNewObjNode(lclVarDsc->GetStructHnd(), tree);
if (structType == TYP_STRUCT)
{
gtSetObjGcInfo(tree->AsObj());
}
}
// TODO-CQ: If the VM ever stops violating the ABI and passing heap references
// we could remove TGTANYWHERE
tree->gtFlags = ((tree->gtFlags & GTF_COMMON_MASK) | GTF_IND_TGTANYWHERE);
#ifdef DEBUG
if (verbose)
{
printf("Replacing value of implicit by ref struct parameter with indir of parameter:\n");
}
#endif // DEBUG
}
#ifdef DEBUG
if (verbose)
{
gtDispTree(tree);
}
#endif // DEBUG
return tree;
}
//------------------------------------------------------------------------
// fgAddFieldSeqForZeroOffset:
// Associate a fieldSeq (with a zero offset) with the GenTree node 'addr'
//
// Arguments:
// addr - A GenTree node
// fieldSeqZero - a fieldSeq (with a zero offset)
//
// Notes:
// Some GenTree nodes have internal fields that record the field sequence.
// If we have one of these nodes: GT_CNS_INT, GT_LCL_FLD
// we can append the field sequence using the gtFieldSeq
// If we have a GT_ADD of a GT_CNS_INT we can use the
// fieldSeq from child node.
// Otherwise we record 'fieldSeqZero' in the GenTree node using
// a Map: GetFieldSeqStore()
// When doing so we take care to preserve any existing zero field sequence
//
void Compiler::fgAddFieldSeqForZeroOffset(GenTree* addr, FieldSeqNode* fieldSeqZero)
{
// We expect 'addr' to be an address at this point.
assert(addr->TypeGet() == TYP_BYREF || addr->TypeGet() == TYP_I_IMPL || addr->TypeGet() == TYP_REF);
// Tunnel through any commas.
const bool commaOnly = true;
addr = addr->gtEffectiveVal(commaOnly);
// We still expect 'addr' to be an address at this point.
assert(addr->TypeGet() == TYP_BYREF || addr->TypeGet() == TYP_I_IMPL || addr->TypeGet() == TYP_REF);
FieldSeqNode* fieldSeqUpdate = fieldSeqZero;
GenTree* fieldSeqNode = addr;
bool fieldSeqRecorded = false;
#ifdef DEBUG
if (verbose)
{
printf("\nfgAddFieldSeqForZeroOffset for");
gtDispAnyFieldSeq(fieldSeqZero);
printf("\naddr (Before)\n");
gtDispNode(addr, nullptr, nullptr, false);
gtDispCommonEndLine(addr);
}
#endif // DEBUG
switch (addr->OperGet())
{
case GT_CNS_INT:
fieldSeqUpdate = GetFieldSeqStore()->Append(addr->AsIntCon()->gtFieldSeq, fieldSeqZero);
addr->AsIntCon()->gtFieldSeq = fieldSeqUpdate;
fieldSeqRecorded = true;
break;
case GT_ADDR:
if (addr->AsOp()->gtOp1->OperGet() == GT_LCL_FLD)
{
fieldSeqNode = addr->AsOp()->gtOp1;
GenTreeLclFld* lclFld = addr->AsOp()->gtOp1->AsLclFld();
fieldSeqUpdate = GetFieldSeqStore()->Append(lclFld->GetFieldSeq(), fieldSeqZero);
lclFld->SetFieldSeq(fieldSeqUpdate);
fieldSeqRecorded = true;
}
break;
case GT_ADD:
if (addr->AsOp()->gtOp1->OperGet() == GT_CNS_INT)
{
fieldSeqNode = addr->AsOp()->gtOp1;
fieldSeqUpdate = GetFieldSeqStore()->Append(addr->AsOp()->gtOp1->AsIntCon()->gtFieldSeq, fieldSeqZero);
addr->AsOp()->gtOp1->AsIntCon()->gtFieldSeq = fieldSeqUpdate;
fieldSeqRecorded = true;
}
else if (addr->AsOp()->gtOp2->OperGet() == GT_CNS_INT)
{
fieldSeqNode = addr->AsOp()->gtOp2;
fieldSeqUpdate = GetFieldSeqStore()->Append(addr->AsOp()->gtOp2->AsIntCon()->gtFieldSeq, fieldSeqZero);
addr->AsOp()->gtOp2->AsIntCon()->gtFieldSeq = fieldSeqUpdate;
fieldSeqRecorded = true;
}
break;
default:
break;
}
if (fieldSeqRecorded == false)
{
// Record in the general zero-offset map.
// The "addr" node might already be annotated with a zero-offset field sequence.
FieldSeqNode* existingFieldSeq = nullptr;
if (GetZeroOffsetFieldMap()->Lookup(addr, &existingFieldSeq))
{
// Append the zero field sequences
fieldSeqUpdate = GetFieldSeqStore()->Append(existingFieldSeq, fieldSeqZero);
}
// Overwrite the field sequence annotation for op1
GetZeroOffsetFieldMap()->Set(addr, fieldSeqUpdate, NodeToFieldSeqMap::Overwrite);
fieldSeqRecorded = true;
}
#ifdef DEBUG
if (verbose)
{
printf(" (After)\n");
gtDispNode(fieldSeqNode, nullptr, nullptr, false);
gtDispCommonEndLine(fieldSeqNode);
}
#endif // DEBUG
}
#ifdef FEATURE_SIMD
//-----------------------------------------------------------------------------------
// fgMorphCombineSIMDFieldAssignments:
// If the RHS of the input stmt is a read for simd vector X Field, then this function
// will keep reading next few stmts based on the vector size(2, 3, 4).
// If the next stmts LHS are located contiguous and RHS are also located
// contiguous, then we replace those statements with a copyblk.
//
// Argument:
// block - BasicBlock*. block which stmt belongs to
// stmt - Statement*. the stmt node we want to check
//
// return value:
// if this funciton successfully optimized the stmts, then return true. Otherwise
// return false;
bool Compiler::fgMorphCombineSIMDFieldAssignments(BasicBlock* block, Statement* stmt)
{
GenTree* tree = stmt->GetRootNode();
assert(tree->OperGet() == GT_ASG);
GenTree* originalLHS = tree->AsOp()->gtOp1;
GenTree* prevLHS = tree->AsOp()->gtOp1;
GenTree* prevRHS = tree->AsOp()->gtOp2;
unsigned index = 0;
CorInfoType simdBaseJitType = CORINFO_TYPE_UNDEF;
unsigned simdSize = 0;
GenTree* simdStructNode = getSIMDStructFromField(prevRHS, &simdBaseJitType, &index, &simdSize, true);
if (simdStructNode == nullptr || index != 0 || simdBaseJitType != CORINFO_TYPE_FLOAT)
{
// if the RHS is not from a SIMD vector field X, then there is no need to check further.
return false;
}
var_types simdBaseType = JitType2PreciseVarType(simdBaseJitType);
var_types simdType = getSIMDTypeForSize(simdSize);
int assignmentsCount = simdSize / genTypeSize(simdBaseType) - 1;
int remainingAssignments = assignmentsCount;
Statement* curStmt = stmt->GetNextStmt();
Statement* lastStmt = stmt;
while (curStmt != nullptr && remainingAssignments > 0)
{
GenTree* exp = curStmt->GetRootNode();
if (exp->OperGet() != GT_ASG)
{
break;
}
GenTree* curLHS = exp->gtGetOp1();
GenTree* curRHS = exp->gtGetOp2();
if (!areArgumentsContiguous(prevLHS, curLHS) || !areArgumentsContiguous(prevRHS, curRHS))
{
break;
}
remainingAssignments--;
prevLHS = curLHS;
prevRHS = curRHS;
lastStmt = curStmt;
curStmt = curStmt->GetNextStmt();
}
if (remainingAssignments > 0)
{
// if the left assignments number is bigger than zero, then this means
// that the assignments are not assgining to the contiguously memory
// locations from same vector.
return false;
}
#ifdef DEBUG
if (verbose)
{
printf("\nFound contiguous assignments from a SIMD vector to memory.\n");
printf("From " FMT_BB ", stmt ", block->bbNum);
printStmtID(stmt);
printf(" to stmt");
printStmtID(lastStmt);
printf("\n");
}
#endif
for (int i = 0; i < assignmentsCount; i++)
{
fgRemoveStmt(block, stmt->GetNextStmt());
}
GenTree* dstNode;
if (originalLHS->OperIs(GT_LCL_FLD))
{
dstNode = originalLHS;
dstNode->gtType = simdType;
dstNode->AsLclFld()->SetFieldSeq(FieldSeqStore::NotAField());
// This may have changed a partial local field into full local field
if (dstNode->IsPartialLclFld(this))
{
dstNode->gtFlags |= GTF_VAR_USEASG;
}
else
{
dstNode->gtFlags &= ~GTF_VAR_USEASG;
}
}
else
{
GenTree* copyBlkDst = createAddressNodeForSIMDInit(originalLHS, simdSize);
if (simdStructNode->OperIsLocal())
{
setLclRelatedToSIMDIntrinsic(simdStructNode);
}
GenTree* copyBlkAddr = copyBlkDst;
if (copyBlkAddr->gtOper == GT_LEA)
{
copyBlkAddr = copyBlkAddr->AsAddrMode()->Base();
}
GenTreeLclVarCommon* localDst = copyBlkAddr->IsLocalAddrExpr();
if (localDst != nullptr)
{
setLclRelatedToSIMDIntrinsic(localDst);
}
if (simdStructNode->TypeGet() == TYP_BYREF)
{
assert(simdStructNode->OperIsLocal());
assert(lvaIsImplicitByRefLocal(simdStructNode->AsLclVarCommon()->GetLclNum()));
simdStructNode = gtNewIndir(simdType, simdStructNode);
}
else
{
assert(varTypeIsSIMD(simdStructNode));
}
dstNode = gtNewOperNode(GT_IND, simdType, copyBlkDst);
}
#ifdef DEBUG
if (verbose)
{
printf("\n" FMT_BB " stmt ", block->bbNum);
printStmtID(stmt);
printf("(before)\n");
gtDispStmt(stmt);
}
#endif
assert(!simdStructNode->CanCSE());
simdStructNode->ClearDoNotCSE();
tree = gtNewAssignNode(dstNode, simdStructNode);
stmt->SetRootNode(tree);
// Since we generated a new address node which didn't exist before,
// we should expose this address manually here.
// TODO-ADDR: Remove this when LocalAddressVisitor transforms all
// local field access into LCL_FLDs, at that point we would be
// combining 2 existing LCL_FLDs or 2 FIELDs that do not reference
// a local and thus cannot result in a new address exposed local.
fgMarkAddressExposedLocals(stmt);
#ifdef DEBUG
if (verbose)
{
printf("\nReplaced " FMT_BB " stmt", block->bbNum);
printStmtID(stmt);
printf("(after)\n");
gtDispStmt(stmt);
}
#endif
return true;
}
#endif // FEATURE_SIMD
//------------------------------------------------------------------------
// fgCheckStmtAfterTailCall: check that statements after the tail call stmt
// candidate are in one of expected forms, that are desctibed below.
//
// Return Value:
// 'true' if stmts are in the expected form, else 'false'.
//
bool Compiler::fgCheckStmtAfterTailCall()
{
// For void calls, we would have created a GT_CALL in the stmt list.
// For non-void calls, we would have created a GT_RETURN(GT_CAST(GT_CALL)).
// For calls returning structs, we would have a void call, followed by a void return.
// For debuggable code, it would be an assignment of the call to a temp
// We want to get rid of any of this extra trees, and just leave
// the call.
Statement* callStmt = fgMorphStmt;
Statement* nextMorphStmt = callStmt->GetNextStmt();
// Check that the rest stmts in the block are in one of the following pattern:
// 1) ret(void)
// 2) ret(cast*(callResultLclVar))
// 3) lclVar = callResultLclVar, the actual ret(lclVar) in another block
// 4) nop
if (nextMorphStmt != nullptr)
{
GenTree* callExpr = callStmt->GetRootNode();
if (callExpr->gtOper != GT_ASG)
{
// The next stmt can be GT_RETURN(TYP_VOID) or GT_RETURN(lclVar),
// where lclVar was return buffer in the call for structs or simd.
Statement* retStmt = nextMorphStmt;
GenTree* retExpr = retStmt->GetRootNode();
noway_assert(retExpr->gtOper == GT_RETURN);
nextMorphStmt = retStmt->GetNextStmt();
}
else
{
noway_assert(callExpr->gtGetOp1()->OperIsLocal());
unsigned callResultLclNumber = callExpr->gtGetOp1()->AsLclVarCommon()->GetLclNum();
#if FEATURE_TAILCALL_OPT_SHARED_RETURN
// We can have a chain of assignments from the call result to
// various inline return spill temps. These are ok as long
// as the last one ultimately provides the return value or is ignored.
//
// And if we're returning a small type we may see a cast
// on the source side.
while ((nextMorphStmt != nullptr) && (nextMorphStmt->GetRootNode()->OperIs(GT_ASG, GT_NOP)))
{
if (nextMorphStmt->GetRootNode()->OperIs(GT_NOP))
{
nextMorphStmt = nextMorphStmt->GetNextStmt();
continue;
}
Statement* moveStmt = nextMorphStmt;
GenTree* moveExpr = nextMorphStmt->GetRootNode();
GenTree* moveDest = moveExpr->gtGetOp1();
noway_assert(moveDest->OperIsLocal());
// Tunnel through any casts on the source side.
GenTree* moveSource = moveExpr->gtGetOp2();
while (moveSource->OperIs(GT_CAST))
{
noway_assert(!moveSource->gtOverflow());
moveSource = moveSource->gtGetOp1();
}
noway_assert(moveSource->OperIsLocal());
// Verify we're just passing the value from one local to another
// along the chain.
const unsigned srcLclNum = moveSource->AsLclVarCommon()->GetLclNum();
noway_assert(srcLclNum == callResultLclNumber);
const unsigned dstLclNum = moveDest->AsLclVarCommon()->GetLclNum();
callResultLclNumber = dstLclNum;
nextMorphStmt = moveStmt->GetNextStmt();
}
if (nextMorphStmt != nullptr)
#endif
{
Statement* retStmt = nextMorphStmt;
GenTree* retExpr = nextMorphStmt->GetRootNode();
noway_assert(retExpr->gtOper == GT_RETURN);
GenTree* treeWithLcl = retExpr->gtGetOp1();
while (treeWithLcl->gtOper == GT_CAST)
{
noway_assert(!treeWithLcl->gtOverflow());
treeWithLcl = treeWithLcl->gtGetOp1();
}
noway_assert(callResultLclNumber == treeWithLcl->AsLclVarCommon()->GetLclNum());
nextMorphStmt = retStmt->GetNextStmt();
}
}
}
return nextMorphStmt == nullptr;
}
//------------------------------------------------------------------------
// fgCanTailCallViaJitHelper: check whether we can use the faster tailcall
// JIT helper on x86.
//
// Return Value:
// 'true' if we can; or 'false' if we should use the generic tailcall mechanism.
//
bool Compiler::fgCanTailCallViaJitHelper()
{
#if !defined(TARGET_X86) || defined(UNIX_X86_ABI) || defined(FEATURE_READYTORUN)
// On anything except windows X86 we have no faster mechanism available.
return false;
#else
// The JIT helper does not properly handle the case where localloc was used.
if (compLocallocUsed)
return false;
return true;
#endif
}
//------------------------------------------------------------------------
// fgMorphReduceAddOps: reduce successive variable adds into a single multiply,
// e.g., i + i + i + i => i * 4.
//
// Arguments:
// tree - tree for reduction
//
// Return Value:
// reduced tree if pattern matches, original tree otherwise
//
GenTree* Compiler::fgMorphReduceAddOps(GenTree* tree)
{
// ADD(_, V0) starts the pattern match.
if (!tree->OperIs(GT_ADD) || tree->gtOverflow())
{
return tree;
}
#ifndef TARGET_64BIT
// Transforming 64-bit ADD to 64-bit MUL on 32-bit system results in replacing
// ADD ops with a helper function call. Don't apply optimization in that case.
if (tree->TypeGet() == TYP_LONG)
{
return tree;
}
#endif
GenTree* lclVarTree = tree->AsOp()->gtOp2;
GenTree* consTree = tree->AsOp()->gtOp1;
GenTree* op1 = consTree;
GenTree* op2 = lclVarTree;
if (!op2->OperIs(GT_LCL_VAR) || !varTypeIsIntegral(op2))
{
return tree;
}
int foldCount = 0;
unsigned lclNum = op2->AsLclVarCommon()->GetLclNum();
// Search for pattern of shape ADD(ADD(ADD(lclNum, lclNum), lclNum), lclNum).
while (true)
{
// ADD(lclNum, lclNum), end of tree
if (op1->OperIs(GT_LCL_VAR) && op1->AsLclVarCommon()->GetLclNum() == lclNum && op2->OperIs(GT_LCL_VAR) &&
op2->AsLclVarCommon()->GetLclNum() == lclNum)
{
foldCount += 2;
break;
}
// ADD(ADD(X, Y), lclNum), keep descending
else if (op1->OperIs(GT_ADD) && !op1->gtOverflow() && op2->OperIs(GT_LCL_VAR) &&
op2->AsLclVarCommon()->GetLclNum() == lclNum)
{
foldCount++;
op2 = op1->AsOp()->gtOp2;
op1 = op1->AsOp()->gtOp1;
}
// Any other case is a pattern we won't attempt to fold for now.
else
{
return tree;
}
}
// V0 + V0 ... + V0 becomes V0 * foldCount, where postorder transform will optimize
// accordingly
consTree->BashToConst(foldCount, tree->TypeGet());
GenTree* morphed = gtNewOperNode(GT_MUL, tree->TypeGet(), lclVarTree, consTree);
DEBUG_DESTROY_NODE(tree);
return morphed;
}
| 1 |
dotnet/runtime
| 66,407 |
ARM64 - Optimizing a % b operations part 2
|
Addressing part of this issue: https://github.com/dotnet/runtime/issues/34937
**Description**
There are various ways to optimize `%` for integers on ARM64.
`a % b` can be transformed into a specific sequence of instructions for ARM64 if the operation is signed and `b` is a constant with the power of 2.
**Acceptance Criteria**
- [x] Merge https://github.com/dotnet/runtime/pull/65535
- [x] Add Tests
ARM64 diffs based on tests
```diff
- asr w1, w0, #31
- and w1, w1, #15
- add w1, w1, w0
- asr w1, w1, #4
- lsl w1, w1, #4
- sub w0, w0, w1
- ;; bbWeight=1 PerfScore 4.50
+ and w1, w0, #15
+ negs w0, w0
+ and w0, w0, #15
+ csneg w0, w1, w0, mi
+ ;; bbWeight=1 PerfScore 2.00
```
|
TIHan
| 2022-03-09T20:33:02Z | 2022-04-13T20:01:26Z |
3b6a539c7219383ec6181a112465f904fe9e2edb
|
a81f4bc9d53dedefe03f49da53ed198abbf9c467
|
ARM64 - Optimizing a % b operations part 2. Addressing part of this issue: https://github.com/dotnet/runtime/issues/34937
**Description**
There are various ways to optimize `%` for integers on ARM64.
`a % b` can be transformed into a specific sequence of instructions for ARM64 if the operation is signed and `b` is a constant with the power of 2.
**Acceptance Criteria**
- [x] Merge https://github.com/dotnet/runtime/pull/65535
- [x] Add Tests
ARM64 diffs based on tests
```diff
- asr w1, w0, #31
- and w1, w1, #15
- add w1, w1, w0
- asr w1, w1, #4
- lsl w1, w1, #4
- sub w0, w0, w1
- ;; bbWeight=1 PerfScore 4.50
+ and w1, w0, #15
+ negs w0, w0
+ and w0, w0, #15
+ csneg w0, w1, w0, mi
+ ;; bbWeight=1 PerfScore 2.00
```
|
./src/coreclr/pal/tests/palsuite/c_runtime/vsprintf/vsprintf.h
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*=====================================================================
**
** Source: vsprintf.h
**
** Purpose: Helper functions for the vsprintf tests.
**
**
**===================================================================*/
#ifndef __VSPRINTF_H__
#define __VSPRINTF_H__
/* These functions leaks memory a lot. C'est la vie. */
inline int testvsp(char* buf, size_t buffSize, const char* format, ...)
{
int retVal;
va_list arglist;
va_start(arglist, format);
retVal = _vsnprintf_s(buf, buffSize, _TRUNCATE, format, arglist);
va_end(arglist);
return (retVal);
}
inline void DoStrTest_vsprintf(const char *formatstr, char* param, const char *checkstr)
{
char buf[256] = { 0 };
testvsp(buf, ARRAY_SIZE(buf), formatstr, param);
if (memcmp(buf, checkstr, strlen(buf) + 1) != 0)
{
Fail("ERROR: failed to insert string \"%s\" into \"%s\"\n"
"Expected \"%s\" got \"%s\".\n",
param, formatstr, checkstr, buf);
}
}
#define DoStrTest DoStrTest_vsprintf
inline void DoWStrTest_vsprintf(const char *formatstr, WCHAR* param, const char *checkstr)
{
char buf[256] = { 0 };
testvsp(buf, ARRAY_SIZE(buf), formatstr, param);
if (memcmp(buf, checkstr, strlen(buf) + 1) != 0)
{
Fail("ERROR: failed to insert wide string \"%s\" into \"%s\"\n"
"Expected \"%s\" got \"%s\".\n",
convertC(param), formatstr, checkstr, buf);
}
}
#define DoWStrTest DoWStrTest_vsprintf
inline void DoCharTest_vsprintf(const char *formatstr, char param, const char *checkstr)
{
char buf[256] = { 0 };
testvsp(buf, ARRAY_SIZE(buf), formatstr, param);
if (memcmp(buf, checkstr, strlen(buf) + 1) != 0)
{
Fail("ERROR: failed to insert char \'%c\' (%d) into \"%s\"\n"
"Expected \"%s\" got \"%s\".\n",
param, param, formatstr, checkstr, buf);
}
}
#define DoCharTest DoCharTest_vsprintf
inline void DoWCharTest_vsprintf(const char *formatstr, WCHAR param, const char *checkstr)
{
char buf[256] = { 0 };
testvsp(buf, ARRAY_SIZE(buf), formatstr, param);
if (memcmp(buf, checkstr, strlen(buf) + 1) != 0)
{
Fail("ERROR: failed to insert wide char \'%c\' (%d) into \"%s\"\n"
"Expected \"%s\" got \"%s\".\n",
(char)param, param, formatstr, checkstr, buf);
}
}
#define DoWCharTest DoWCharTest_vsprintf
inline void DoNumTest_vsprintf(const char *formatstr, int value, const char *checkstr)
{
char buf[256] = { 0 };
testvsp(buf, ARRAY_SIZE(buf), formatstr, value);
if (memcmp(buf, checkstr, strlen(buf) + 1) != 0)
{
Fail("ERROR: failed to insert %#x into \"%s\"\n"
"Expected \"%s\" got \"%s\".\n",
value, formatstr, checkstr, buf);
}
}
#define DoNumTest DoNumTest_vsprintf
inline void DoI64Test_vsprintf(const char *formatstr, INT64 value, char *valuestr, const char *checkstr)
{
char buf[256] = { 0 };
testvsp(buf, ARRAY_SIZE(buf), formatstr, value);
if (memcmp(buf, checkstr, strlen(buf) + 1) != 0)
{
Fail("ERROR: failed to insert %s into \"%s\"\n"
"Expected \"%s\" got \"%s\".\n",
valuestr, formatstr, checkstr, buf);
}
}
#define DoI64Test DoI64Test_vsprintf
inline void DoDoubleTest_vsprintf(const char *formatstr, double value, const char *checkstr1, char
*checkstr2)
{
char buf[256] = { 0 };
testvsp(buf, ARRAY_SIZE(buf), formatstr, value);
if (memcmp(buf, checkstr1, strlen(checkstr1) + 1) != 0 &&
memcmp(buf, checkstr2, strlen(checkstr2) + 1) != 0)
{
Fail("ERROR: failed to insert %f into \"%s\"\n"
"Expected \"%s\" or \"%s\", got \"%s\".\n",
value, formatstr, checkstr1, checkstr2, buf);
}
}
#define DoDoubleTest DoDoubleTest_vsprintf
/*FROM TEST 9*/
inline void DoArgumentPrecTest_vsprintf(const char *formatstr, int precision, void *param,
char *paramstr, const char *checkstr1, const char *checkstr2)
{
char buf[256];
testvsp(buf, ARRAY_SIZE(buf), formatstr, precision, param);
if (memcmp(buf, checkstr1, strlen(checkstr1) + 1) != 0 &&
memcmp(buf, checkstr2, strlen(checkstr2) + 1) != 0)
{
Fail("ERROR: failed to insert %s into \"%s\" with precision %d\n"
"Expected \"%s\" or \"%s\", got \"%s\".\n", paramstr, formatstr,
precision, checkstr1, checkstr2, buf);
}
}
#define DoArgumentPrecTest DoArgumentPrecTest_vsprintf
inline void DoArgumentPrecDoubleTest_vsprintf(const char *formatstr, int precision, double param,
const char *checkstr1, const char *checkstr2)
{
char buf[256];
testvsp(buf, ARRAY_SIZE(buf), formatstr, precision, param);
if (memcmp(buf, checkstr1, strlen(checkstr1) + 1) != 0 &&
memcmp(buf, checkstr2, strlen(checkstr2) + 1) != 0)
{
Fail("ERROR: failed to insert %f into \"%s\" with precision %d\n"
"Expected \"%s\" or \"%s\", got \"%s\".\n", param, formatstr,
precision, checkstr1, checkstr2, buf);
}
}
#define DoArgumentPrecDoubleTest DoArgumentPrecDoubleTest_vsprintf
/*FROM TEST4*/
inline void DoPointerTest_vsprintf(const char *formatstr, void* param, char* paramstr,
const char *checkstr1)
{
char buf[256] = { 0 };
testvsp(buf, ARRAY_SIZE(buf), formatstr, param);
if (memcmp(buf, checkstr1, strlen(checkstr1) + 1))
{
Fail("ERROR: failed to insert %s into \"%s\"\n"
"Expected \"%s\" got \"%s\".\n",
paramstr, formatstr, checkstr1, buf);
}
}
#define DoPointerTest DoPointerTest_vsprintf
inline void DoI64DoubleTest_vsprintf(const char *formatstr, INT64 value, char *valuestr,
const char *checkstr1)
{
char buf[256] = { 0 };
testvsp(buf, ARRAY_SIZE(buf), formatstr, value);
if (memcmp(buf, checkstr1, strlen(checkstr1) + 1) != 0)
{
Fail("ERROR: failed to insert %s into \"%s\"\n"
"Expected \"%s\", got \"%s\".\n",
valuestr, formatstr, checkstr1, buf);
}
}
#define DoI64DoubleTest DoI64DoubleTest_vsprintf
inline void DoTest_vsprintf(const char *formatstr, int param, const char *checkstr)
{
char buf[256] = { 0 };
int n = -1;
testvsp(buf, ARRAY_SIZE(buf), formatstr, &n);
if (n != param)
{
Fail("ERROR: Expected count parameter to resolve to %d, got %X\n",
param, n);
}
if (memcmp(buf, checkstr, strlen(buf) + 1) != 0)
{
Fail("ERROR: Expected \"%s\" got \"%s\".\n", checkstr, buf);
}
}
#define DoTest DoTest_vsprintf
inline void DoShortTest_vsprintf(const char *formatstr, int param, const char *checkstr)
{
char buf[256] = { 0 };
short int n = -1;
testvsp(buf, ARRAY_SIZE(buf), formatstr, &n);
if (n != param)
{
Fail("ERROR: Expected count parameter to resolve to %d, got %X\n",
param, n);
}
if (memcmp(buf, checkstr, strlen(buf) + 1) != 0)
{
Fail("ERROR: Expected \"%s\" got \"%s\".\n", checkstr, buf);
}
}
#define DoShortTest DoShortTest_vsprintf
#endif
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*=====================================================================
**
** Source: vsprintf.h
**
** Purpose: Helper functions for the vsprintf tests.
**
**
**===================================================================*/
#ifndef __VSPRINTF_H__
#define __VSPRINTF_H__
/* These functions leaks memory a lot. C'est la vie. */
inline int testvsp(char* buf, size_t buffSize, const char* format, ...)
{
int retVal;
va_list arglist;
va_start(arglist, format);
retVal = _vsnprintf_s(buf, buffSize, _TRUNCATE, format, arglist);
va_end(arglist);
return (retVal);
}
inline void DoStrTest_vsprintf(const char *formatstr, char* param, const char *checkstr)
{
char buf[256] = { 0 };
testvsp(buf, ARRAY_SIZE(buf), formatstr, param);
if (memcmp(buf, checkstr, strlen(buf) + 1) != 0)
{
Fail("ERROR: failed to insert string \"%s\" into \"%s\"\n"
"Expected \"%s\" got \"%s\".\n",
param, formatstr, checkstr, buf);
}
}
#define DoStrTest DoStrTest_vsprintf
inline void DoWStrTest_vsprintf(const char *formatstr, WCHAR* param, const char *checkstr)
{
char buf[256] = { 0 };
testvsp(buf, ARRAY_SIZE(buf), formatstr, param);
if (memcmp(buf, checkstr, strlen(buf) + 1) != 0)
{
Fail("ERROR: failed to insert wide string \"%s\" into \"%s\"\n"
"Expected \"%s\" got \"%s\".\n",
convertC(param), formatstr, checkstr, buf);
}
}
#define DoWStrTest DoWStrTest_vsprintf
inline void DoCharTest_vsprintf(const char *formatstr, char param, const char *checkstr)
{
char buf[256] = { 0 };
testvsp(buf, ARRAY_SIZE(buf), formatstr, param);
if (memcmp(buf, checkstr, strlen(buf) + 1) != 0)
{
Fail("ERROR: failed to insert char \'%c\' (%d) into \"%s\"\n"
"Expected \"%s\" got \"%s\".\n",
param, param, formatstr, checkstr, buf);
}
}
#define DoCharTest DoCharTest_vsprintf
inline void DoWCharTest_vsprintf(const char *formatstr, WCHAR param, const char *checkstr)
{
char buf[256] = { 0 };
testvsp(buf, ARRAY_SIZE(buf), formatstr, param);
if (memcmp(buf, checkstr, strlen(buf) + 1) != 0)
{
Fail("ERROR: failed to insert wide char \'%c\' (%d) into \"%s\"\n"
"Expected \"%s\" got \"%s\".\n",
(char)param, param, formatstr, checkstr, buf);
}
}
#define DoWCharTest DoWCharTest_vsprintf
inline void DoNumTest_vsprintf(const char *formatstr, int value, const char *checkstr)
{
char buf[256] = { 0 };
testvsp(buf, ARRAY_SIZE(buf), formatstr, value);
if (memcmp(buf, checkstr, strlen(buf) + 1) != 0)
{
Fail("ERROR: failed to insert %#x into \"%s\"\n"
"Expected \"%s\" got \"%s\".\n",
value, formatstr, checkstr, buf);
}
}
#define DoNumTest DoNumTest_vsprintf
inline void DoI64Test_vsprintf(const char *formatstr, INT64 value, char *valuestr, const char *checkstr)
{
char buf[256] = { 0 };
testvsp(buf, ARRAY_SIZE(buf), formatstr, value);
if (memcmp(buf, checkstr, strlen(buf) + 1) != 0)
{
Fail("ERROR: failed to insert %s into \"%s\"\n"
"Expected \"%s\" got \"%s\".\n",
valuestr, formatstr, checkstr, buf);
}
}
#define DoI64Test DoI64Test_vsprintf
inline void DoDoubleTest_vsprintf(const char *formatstr, double value, const char *checkstr1, char
*checkstr2)
{
char buf[256] = { 0 };
testvsp(buf, ARRAY_SIZE(buf), formatstr, value);
if (memcmp(buf, checkstr1, strlen(checkstr1) + 1) != 0 &&
memcmp(buf, checkstr2, strlen(checkstr2) + 1) != 0)
{
Fail("ERROR: failed to insert %f into \"%s\"\n"
"Expected \"%s\" or \"%s\", got \"%s\".\n",
value, formatstr, checkstr1, checkstr2, buf);
}
}
#define DoDoubleTest DoDoubleTest_vsprintf
/*FROM TEST 9*/
inline void DoArgumentPrecTest_vsprintf(const char *formatstr, int precision, void *param,
char *paramstr, const char *checkstr1, const char *checkstr2)
{
char buf[256];
testvsp(buf, ARRAY_SIZE(buf), formatstr, precision, param);
if (memcmp(buf, checkstr1, strlen(checkstr1) + 1) != 0 &&
memcmp(buf, checkstr2, strlen(checkstr2) + 1) != 0)
{
Fail("ERROR: failed to insert %s into \"%s\" with precision %d\n"
"Expected \"%s\" or \"%s\", got \"%s\".\n", paramstr, formatstr,
precision, checkstr1, checkstr2, buf);
}
}
#define DoArgumentPrecTest DoArgumentPrecTest_vsprintf
inline void DoArgumentPrecDoubleTest_vsprintf(const char *formatstr, int precision, double param,
const char *checkstr1, const char *checkstr2)
{
char buf[256];
testvsp(buf, ARRAY_SIZE(buf), formatstr, precision, param);
if (memcmp(buf, checkstr1, strlen(checkstr1) + 1) != 0 &&
memcmp(buf, checkstr2, strlen(checkstr2) + 1) != 0)
{
Fail("ERROR: failed to insert %f into \"%s\" with precision %d\n"
"Expected \"%s\" or \"%s\", got \"%s\".\n", param, formatstr,
precision, checkstr1, checkstr2, buf);
}
}
#define DoArgumentPrecDoubleTest DoArgumentPrecDoubleTest_vsprintf
/*FROM TEST4*/
inline void DoPointerTest_vsprintf(const char *formatstr, void* param, char* paramstr,
const char *checkstr1)
{
char buf[256] = { 0 };
testvsp(buf, ARRAY_SIZE(buf), formatstr, param);
if (memcmp(buf, checkstr1, strlen(checkstr1) + 1))
{
Fail("ERROR: failed to insert %s into \"%s\"\n"
"Expected \"%s\" got \"%s\".\n",
paramstr, formatstr, checkstr1, buf);
}
}
#define DoPointerTest DoPointerTest_vsprintf
inline void DoI64DoubleTest_vsprintf(const char *formatstr, INT64 value, char *valuestr,
const char *checkstr1)
{
char buf[256] = { 0 };
testvsp(buf, ARRAY_SIZE(buf), formatstr, value);
if (memcmp(buf, checkstr1, strlen(checkstr1) + 1) != 0)
{
Fail("ERROR: failed to insert %s into \"%s\"\n"
"Expected \"%s\", got \"%s\".\n",
valuestr, formatstr, checkstr1, buf);
}
}
#define DoI64DoubleTest DoI64DoubleTest_vsprintf
inline void DoTest_vsprintf(const char *formatstr, int param, const char *checkstr)
{
char buf[256] = { 0 };
int n = -1;
testvsp(buf, ARRAY_SIZE(buf), formatstr, &n);
if (n != param)
{
Fail("ERROR: Expected count parameter to resolve to %d, got %X\n",
param, n);
}
if (memcmp(buf, checkstr, strlen(buf) + 1) != 0)
{
Fail("ERROR: Expected \"%s\" got \"%s\".\n", checkstr, buf);
}
}
#define DoTest DoTest_vsprintf
inline void DoShortTest_vsprintf(const char *formatstr, int param, const char *checkstr)
{
char buf[256] = { 0 };
short int n = -1;
testvsp(buf, ARRAY_SIZE(buf), formatstr, &n);
if (n != param)
{
Fail("ERROR: Expected count parameter to resolve to %d, got %X\n",
param, n);
}
if (memcmp(buf, checkstr, strlen(buf) + 1) != 0)
{
Fail("ERROR: Expected \"%s\" got \"%s\".\n", checkstr, buf);
}
}
#define DoShortTest DoShortTest_vsprintf
#endif
| -1 |
dotnet/runtime
| 66,407 |
ARM64 - Optimizing a % b operations part 2
|
Addressing part of this issue: https://github.com/dotnet/runtime/issues/34937
**Description**
There are various ways to optimize `%` for integers on ARM64.
`a % b` can be transformed into a specific sequence of instructions for ARM64 if the operation is signed and `b` is a constant with the power of 2.
**Acceptance Criteria**
- [x] Merge https://github.com/dotnet/runtime/pull/65535
- [x] Add Tests
ARM64 diffs based on tests
```diff
- asr w1, w0, #31
- and w1, w1, #15
- add w1, w1, w0
- asr w1, w1, #4
- lsl w1, w1, #4
- sub w0, w0, w1
- ;; bbWeight=1 PerfScore 4.50
+ and w1, w0, #15
+ negs w0, w0
+ and w0, w0, #15
+ csneg w0, w1, w0, mi
+ ;; bbWeight=1 PerfScore 2.00
```
|
TIHan
| 2022-03-09T20:33:02Z | 2022-04-13T20:01:26Z |
3b6a539c7219383ec6181a112465f904fe9e2edb
|
a81f4bc9d53dedefe03f49da53ed198abbf9c467
|
ARM64 - Optimizing a % b operations part 2. Addressing part of this issue: https://github.com/dotnet/runtime/issues/34937
**Description**
There are various ways to optimize `%` for integers on ARM64.
`a % b` can be transformed into a specific sequence of instructions for ARM64 if the operation is signed and `b` is a constant with the power of 2.
**Acceptance Criteria**
- [x] Merge https://github.com/dotnet/runtime/pull/65535
- [x] Add Tests
ARM64 diffs based on tests
```diff
- asr w1, w0, #31
- and w1, w1, #15
- add w1, w1, w0
- asr w1, w1, #4
- lsl w1, w1, #4
- sub w0, w0, w1
- ;; bbWeight=1 PerfScore 4.50
+ and w1, w0, #15
+ negs w0, w0
+ and w0, w0, #15
+ csneg w0, w1, w0, mi
+ ;; bbWeight=1 PerfScore 2.00
```
|
./src/coreclr/vm/i386/cgencpu.h
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// CGENX86.H -
//
// Various helper routines for generating x86 assembly code.
//
// DO NOT INCLUDE THIS FILE DIRECTLY - ALWAYS USE CGENSYS.H INSTEAD
//
#ifndef TARGET_X86
#error Should only include "cgenx86.h" for X86 builds
#endif // TARGET_X86
#ifndef __cgenx86_h__
#define __cgenx86_h__
#include "utilcode.h"
// Given a return address retrieved during stackwalk,
// this is the offset by which it should be decremented to lend somewhere in a call instruction.
#define STACKWALK_CONTROLPC_ADJUST_OFFSET 1
// preferred alignment for data
#define DATA_ALIGNMENT 4
class MethodDesc;
class FramedMethodFrame;
class Module;
class ComCallMethodDesc;
class BaseDomain;
// CPU-dependent functions
Stub * GenerateInitPInvokeFrameHelper();
#ifdef FEATURE_STUBS_AS_IL
EXTERN_C void SinglecastDelegateInvokeStub();
#endif // FEATURE_STUBS_AS_IL
#define GetEEFuncEntryPoint(pfn) GFN_TADDR(pfn)
//**********************************************************************
// To be used with GetSpecificCpuInfo()
#define CPU_X86_FAMILY(cpuType) (((cpuType) & 0x0F00) >> 8)
#define CPU_X86_MODEL(cpuType) (((cpuType) & 0x00F0) >> 4)
// Stepping is masked out by GetSpecificCpuInfo()
// #define CPU_X86_STEPPING(cpuType) (((cpuType) & 0x000F) )
#define CPU_X86_USE_CMOV(cpuFeat) ((cpuFeat & 0x00008001) == 0x00008001)
#define CPU_X86_USE_SSE2(cpuFeat) ((cpuFeat & 0x04000000) == 0x04000000)
// Values for CPU_X86_FAMILY(cpuType)
#define CPU_X86_486 4
#define CPU_X86_PENTIUM 5
#define CPU_X86_PENTIUM_PRO 6
#define CPU_X86_PENTIUM_4 0xF
// Values for CPU_X86_MODEL(cpuType) for CPU_X86_PENTIUM_PRO
#define CPU_X86_MODEL_PENTIUM_PRO_BANIAS 9 // Pentium M (Mobile PPro with P4 feautres)
#define COMMETHOD_PREPAD 8 // # extra bytes to allocate in addition to sizeof(ComCallMethodDesc)
#ifdef FEATURE_COMINTEROP
#define COMMETHOD_CALL_PRESTUB_SIZE 5 // x86: CALL(E8) xx xx xx xx
#define COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET 1 // the offset of the call target address inside the prestub
#endif // FEATURE_COMINTEROP
#define STACK_ALIGN_SIZE 4
#define JUMP_ALLOCATE_SIZE 8 // # bytes to allocate for a jump instruction
#define BACK_TO_BACK_JUMP_ALLOCATE_SIZE 8 // # bytes to allocate for a back to back jump instruction
#ifdef FEATURE_EH_FUNCLETS
#define USE_INDIRECT_CODEHEADER
#endif // FEATURE_EH_FUNCLETS
#define HAS_COMPACT_ENTRYPOINTS 1
// Needed for PInvoke inlining in ngened images
#define HAS_NDIRECT_IMPORT_PRECODE 1
#define HAS_FIXUP_PRECODE 1
// ThisPtrRetBufPrecode one is necessary for closed delegates over static methods with return buffer
#define HAS_THISPTR_RETBUF_PRECODE 1
#define CODE_SIZE_ALIGN 4
#define CACHE_LINE_SIZE 32 // As per Intel Optimization Manual the cache line size is 32 bytes
#define LOG2SLOT LOG2_PTRSIZE
#define ENREGISTERED_RETURNTYPE_MAXSIZE 8
#define ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE 4
#define CALLDESCR_ARGREGS 1 // CallDescrWorker has ArgumentRegister parameter
//**********************************************************************
// Parameter size
//**********************************************************************
inline unsigned StackElemSize(unsigned parmSize, bool isValueType = false /* unused */, bool isFloatHfa = false /* unused */)
{
const unsigned stackSlotSize = 4;
return ALIGN_UP(parmSize, stackSlotSize);
}
#include "stublinkerx86.h"
//**********************************************************************
// Frames
//**********************************************************************
//--------------------------------------------------------------------
// This represents some of the FramedMethodFrame fields that are
// stored at negative offsets.
//--------------------------------------------------------------------
#define ENUM_ARGUMENT_AND_SCRATCH_REGISTERS() \
ARGUMENT_AND_SCRATCH_REGISTER(Eax) \
ARGUMENT_AND_SCRATCH_REGISTER(Ecx) \
ARGUMENT_AND_SCRATCH_REGISTER(Edx)
#define ENUM_CALLEE_SAVED_REGISTERS() \
CALLEE_SAVED_REGISTER(Edi) \
CALLEE_SAVED_REGISTER(Esi) \
CALLEE_SAVED_REGISTER(Ebx) \
CALLEE_SAVED_REGISTER(Ebp)
typedef DPTR(struct CalleeSavedRegisters) PTR_CalleeSavedRegisters;
struct CalleeSavedRegisters {
#define CALLEE_SAVED_REGISTER(regname) INT32 regname;
ENUM_CALLEE_SAVED_REGISTERS();
#undef CALLEE_SAVED_REGISTER
};
//--------------------------------------------------------------------
// This represents the arguments that are stored in volatile registers.
// This should not overlap the CalleeSavedRegisters since those are already
// saved separately and it would be wasteful to save the same register twice.
// If we do use a non-volatile register as an argument, then the ArgIterator
// will probably have to communicate this back to the PromoteCallerStack
// routine to avoid a double promotion.
//--------------------------------------------------------------------
#define ENUM_ARGUMENT_REGISTERS() \
ARGUMENT_REGISTER(ECX) \
ARGUMENT_REGISTER(EDX)
#define ENUM_ARGUMENT_REGISTERS_BACKWARD() \
ARGUMENT_REGISTER(EDX) \
ARGUMENT_REGISTER(ECX)
typedef DPTR(struct ArgumentRegisters) PTR_ArgumentRegisters;
struct ArgumentRegisters {
#define ARGUMENT_REGISTER(regname) INT32 regname;
ENUM_ARGUMENT_REGISTERS_BACKWARD()
#undef ARGUMENT_REGISTER
};
#define NUM_ARGUMENT_REGISTERS 2
#define SCRATCH_REGISTER_X86REG kEAX
#define THIS_REG ECX
#define THIS_kREG kECX
#define ARGUMENT_REG1 ECX
#define ARGUMENT_REG2 EDX
// forward decl
struct REGDISPLAY;
typedef REGDISPLAY *PREGDISPLAY;
#ifndef FEATURE_EH_FUNCLETS
// Sufficient context for Try/Catch restoration.
struct EHContext {
INT32 Eax;
INT32 Ebx;
INT32 Ecx;
INT32 Edx;
INT32 Esi;
INT32 Edi;
INT32 Ebp;
INT32 Esp;
INT32 Eip;
void Setup(PCODE resumePC, PREGDISPLAY regs);
void UpdateFrame(PREGDISPLAY regs);
inline TADDR GetSP() {
LIMITED_METHOD_CONTRACT;
return (TADDR)Esp;
}
inline void SetSP(LPVOID esp) {
LIMITED_METHOD_CONTRACT;
Esp = (INT32)(size_t)esp;
}
inline LPVOID GetFP() {
LIMITED_METHOD_CONTRACT;
return (LPVOID)(UINT_PTR)Ebp;
}
inline void SetArg(LPVOID arg) {
LIMITED_METHOD_CONTRACT;
Eax = (INT32)(size_t)arg;
}
inline void Init()
{
LIMITED_METHOD_CONTRACT;
Eax = 0;
Ebx = 0;
Ecx = 0;
Edx = 0;
Esi = 0;
Edi = 0;
Ebp = 0;
Esp = 0;
Eip = 0;
}
};
#endif // !FEATURE_EH_FUNCLETS
#define ARGUMENTREGISTERS_SIZE sizeof(ArgumentRegisters)
//**********************************************************************
// Exception handling
//**********************************************************************
inline PCODE GetIP(const CONTEXT * context) {
LIMITED_METHOD_DAC_CONTRACT;
return PCODE(context->Eip);
}
inline void SetIP(CONTEXT *context, PCODE eip) {
LIMITED_METHOD_DAC_CONTRACT;
context->Eip = (DWORD)eip;
}
inline TADDR GetSP(const CONTEXT * context) {
LIMITED_METHOD_DAC_CONTRACT;
return (TADDR)(context->Esp);
}
EXTERN_C LPVOID STDCALL GetCurrentSP();
inline void SetSP(CONTEXT *context, TADDR esp) {
LIMITED_METHOD_DAC_CONTRACT;
context->Esp = (DWORD)esp;
}
inline void SetFP(CONTEXT *context, TADDR ebp) {
LIMITED_METHOD_DAC_CONTRACT;
context->Ebp = (INT32)ebp;
}
inline TADDR GetFP(const CONTEXT * context)
{
LIMITED_METHOD_DAC_CONTRACT;
return (TADDR)context->Ebp;
}
// Get Rel32 destination, emit jumpStub if necessary
inline INT32 rel32UsingJumpStub(INT32 UNALIGNED * pRel32, PCODE target, MethodDesc *pMethod = NULL, LoaderAllocator *pLoaderAllocator = NULL)
{
// We do not need jump stubs on i386
LIMITED_METHOD_CONTRACT;
TADDR baseAddr = (TADDR)pRel32 + 4;
return (INT32)(target - baseAddr);
}
#ifdef FEATURE_COMINTEROP
inline void emitCOMStubCall (ComCallMethodDesc *pCOMMethodRX, ComCallMethodDesc *pCOMMethodRW, PCODE target)
{
WRAPPER_NO_CONTRACT;
BYTE *pBufferRW = (BYTE*)pCOMMethodRW - COMMETHOD_CALL_PRESTUB_SIZE;
BYTE *pBufferRX = (BYTE*)pCOMMethodRX - COMMETHOD_CALL_PRESTUB_SIZE;
pBufferRW[0] = X86_INSTR_CALL_REL32; //CALLNEAR32
*((LPVOID*)(1+pBufferRW)) = (LPVOID) (((LPBYTE)target) - (pBufferRX+5));
_ASSERTE(IS_ALIGNED(pBufferRX + COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET, sizeof(void*)) &&
*((SSIZE_T*)(pBufferRX + COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET)) == ((LPBYTE)target - (LPBYTE)pCOMMethodRX));
}
#endif // FEATURE_COMINTEROP
//------------------------------------------------------------------------
WORD GetUnpatchedCodeData(LPCBYTE pAddr);
//------------------------------------------------------------------------
inline WORD GetUnpatchedOpcodeWORD(LPCBYTE pAddr)
{
WRAPPER_NO_CONTRACT;
if (CORDebuggerAttached())
{
return GetUnpatchedCodeData(pAddr);
}
else
{
return *((WORD *)pAddr);
}
}
//------------------------------------------------------------------------
inline BYTE GetUnpatchedOpcodeBYTE(LPCBYTE pAddr)
{
WRAPPER_NO_CONTRACT;
if (CORDebuggerAttached())
{
return (BYTE) GetUnpatchedCodeData(pAddr);
}
else
{
return *pAddr;
}
}
//------------------------------------------------------------------------
// The following must be a distinguishable set of instruction sequences for
// various stub dispatch calls.
//
// An x86 JIT which uses full stub dispatch must generate only
// the following stub dispatch calls:
//
// (1) isCallRelativeIndirect:
// call dword ptr [rel32] ; FF 15 ---rel32----
// (2) isCallRelative:
// call abc ; E8 ---rel32----
// (3) isCallRegisterIndirect:
// 3-byte nop ;
// call dword ptr [eax] ; FF 10
//
// NOTE: You must be sure that pRetAddr is a true return address for
// a stub dispatch call.
BOOL isCallRelativeIndirect(const BYTE *pRetAddr);
BOOL isCallRelative(const BYTE *pRetAddr);
BOOL isCallRegisterIndirect(const BYTE *pRetAddr);
inline BOOL isCallRelativeIndirect(const BYTE *pRetAddr)
{
LIMITED_METHOD_CONTRACT;
BOOL fRet = (GetUnpatchedOpcodeWORD(&pRetAddr[-6]) == X86_INSTR_CALL_IND);
_ASSERTE(!fRet || !isCallRelative(pRetAddr));
_ASSERTE(!fRet || !isCallRegisterIndirect(pRetAddr));
return fRet;
}
inline BOOL isCallRelative(const BYTE *pRetAddr)
{
LIMITED_METHOD_CONTRACT;
BOOL fRet = (GetUnpatchedOpcodeBYTE(&pRetAddr[-5]) == X86_INSTR_CALL_REL32);
_ASSERTE(!fRet || !isCallRelativeIndirect(pRetAddr));
_ASSERTE(!fRet || !isCallRegisterIndirect(pRetAddr));
return fRet;
}
inline BOOL isCallRegisterIndirect(const BYTE *pRetAddr)
{
LIMITED_METHOD_CONTRACT;
BOOL fRet = (GetUnpatchedOpcodeWORD(&pRetAddr[-5]) == X86_INSTR_NOP3_1)
&& (GetUnpatchedOpcodeBYTE(&pRetAddr[-3]) == X86_INSTR_NOP3_3)
&& (GetUnpatchedOpcodeWORD(&pRetAddr[-2]) == X86_INSTR_CALL_IND_EAX);
_ASSERTE(!fRet || !isCallRelative(pRetAddr));
_ASSERTE(!fRet || !isCallRelativeIndirect(pRetAddr));
return fRet;
}
//------------------------------------------------------------------------
inline void emitJump(LPBYTE pBufferRX, LPBYTE pBufferRW, LPVOID target)
{
LIMITED_METHOD_CONTRACT;
pBufferRW[0] = X86_INSTR_JMP_REL32; //JUMPNEAR32
*((LPVOID*)(1+pBufferRW)) = (LPVOID) (((LPBYTE)target) - (pBufferRX+5));
}
//------------------------------------------------------------------------
inline void emitJumpInd(LPBYTE pBuffer, LPVOID target)
{
LIMITED_METHOD_CONTRACT;
*((WORD*)pBuffer) = X86_INSTR_JMP_IND; // 0x25FF jmp dword ptr[addr32]
*((LPVOID*)(2+pBuffer)) = target;
}
//------------------------------------------------------------------------
inline PCODE isJump(PCODE pCode)
{
LIMITED_METHOD_DAC_CONTRACT;
return *PTR_BYTE(pCode) == X86_INSTR_JMP_REL32;
}
//------------------------------------------------------------------------
// Given the same pBuffer that was used by emitJump this method
// decodes the instructions and returns the jump target
inline PCODE decodeJump(PCODE pCode)
{
LIMITED_METHOD_DAC_CONTRACT;
CONSISTENCY_CHECK(*PTR_BYTE(pCode) == X86_INSTR_JMP_REL32);
return rel32Decode(pCode+1);
}
//
// On IA64 back to back jumps should be separated by a nop bundle to get
// the best performance from the hardware's branch prediction logic.
// For all other platforms back to back jumps don't require anything special
// That is why we have these two wrapper functions that call emitJump and decodeJump
//
//------------------------------------------------------------------------
inline void emitBackToBackJump(LPBYTE pBufferRX, LPBYTE pBufferRW, LPVOID target)
{
WRAPPER_NO_CONTRACT;
emitJump(pBufferRX, pBufferRW, target);
}
//------------------------------------------------------------------------
inline PCODE isBackToBackJump(PCODE pBuffer)
{
WRAPPER_NO_CONTRACT;
SUPPORTS_DAC;
return isJump(pBuffer);
}
//------------------------------------------------------------------------
inline PCODE decodeBackToBackJump(PCODE pBuffer)
{
WRAPPER_NO_CONTRACT;
SUPPORTS_DAC;
return decodeJump(pBuffer);
}
EXTERN_C void __stdcall setFPReturn(int fpSize, INT64 retVal);
EXTERN_C void __stdcall getFPReturn(int fpSize, INT64 *pretval);
// SEH info forward declarations
#include <pshpack1.h>
struct DECLSPEC_ALIGN(4) UMEntryThunkCode
{
BYTE m_alignpad[2]; // used to guarantee alignment of backpactched portion
BYTE m_movEAX; //MOV EAX,imm32
LPVOID m_uet; // pointer to start of this structure
BYTE m_jmp; //JMP NEAR32
const BYTE * m_execstub; // pointer to destination code // make sure the backpatched portion is dword aligned.
void Encode(UMEntryThunkCode *pEntryThunkCodeRX, BYTE* pTargetCode, void* pvSecretParam);
void Poison();
LPCBYTE GetEntryPoint() const
{
LIMITED_METHOD_CONTRACT;
return (LPCBYTE)&m_movEAX;
}
static int GetEntryPointOffset()
{
LIMITED_METHOD_CONTRACT;
return 2;
}
};
#include <poppack.h>
struct HijackArgs
{
DWORD FPUState[3]; // 12 bytes for FPU state (10 bytes for FP top-of-stack + 2 bytes padding)
DWORD Edi;
DWORD Esi;
DWORD Ebx;
DWORD Edx;
DWORD Ecx;
union
{
DWORD Eax;
size_t ReturnValue[1];
};
DWORD Ebp;
union
{
DWORD Eip;
size_t ReturnAddress;
};
};
// ClrFlushInstructionCache is used when we want to call FlushInstructionCache
// for a specific architecture in the common code, but not for other architectures.
// On IA64 ClrFlushInstructionCache calls the Kernel FlushInstructionCache function
// to flush the instruction cache.
// We call ClrFlushInstructionCache whenever we create or modify code in the heap.
// Currently ClrFlushInstructionCache has no effect on X86
//
inline BOOL ClrFlushInstructionCache(LPCVOID pCodeAddr, size_t sizeOfCode)
{
// FlushInstructionCache(GetCurrentProcess(), pCodeAddr, sizeOfCode);
MemoryBarrier();
return TRUE;
}
//
// JIT HELPER ALIASING FOR PORTABILITY.
//
// Create alias for optimized implementations of helpers provided on this platform
//
// optimized static helpers generated dynamically at runtime
// #define JIT_GetSharedGCStaticBase
// #define JIT_GetSharedNonGCStaticBase
// #define JIT_GetSharedGCStaticBaseNoCtor
// #define JIT_GetSharedNonGCStaticBaseNoCtor
#ifndef TARGET_UNIX
#define JIT_NewCrossContext JIT_NewCrossContext
#endif // TARGET_UNIX
#endif // __cgenx86_h__
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// CGENX86.H -
//
// Various helper routines for generating x86 assembly code.
//
// DO NOT INCLUDE THIS FILE DIRECTLY - ALWAYS USE CGENSYS.H INSTEAD
//
#ifndef TARGET_X86
#error Should only include "cgenx86.h" for X86 builds
#endif // TARGET_X86
#ifndef __cgenx86_h__
#define __cgenx86_h__
#include "utilcode.h"
// Given a return address retrieved during stackwalk,
// this is the offset by which it should be decremented to lend somewhere in a call instruction.
#define STACKWALK_CONTROLPC_ADJUST_OFFSET 1
// preferred alignment for data
#define DATA_ALIGNMENT 4
class MethodDesc;
class FramedMethodFrame;
class Module;
class ComCallMethodDesc;
class BaseDomain;
// CPU-dependent functions
Stub * GenerateInitPInvokeFrameHelper();
#ifdef FEATURE_STUBS_AS_IL
EXTERN_C void SinglecastDelegateInvokeStub();
#endif // FEATURE_STUBS_AS_IL
#define GetEEFuncEntryPoint(pfn) GFN_TADDR(pfn)
//**********************************************************************
// To be used with GetSpecificCpuInfo()
#define CPU_X86_FAMILY(cpuType) (((cpuType) & 0x0F00) >> 8)
#define CPU_X86_MODEL(cpuType) (((cpuType) & 0x00F0) >> 4)
// Stepping is masked out by GetSpecificCpuInfo()
// #define CPU_X86_STEPPING(cpuType) (((cpuType) & 0x000F) )
#define CPU_X86_USE_CMOV(cpuFeat) ((cpuFeat & 0x00008001) == 0x00008001)
#define CPU_X86_USE_SSE2(cpuFeat) ((cpuFeat & 0x04000000) == 0x04000000)
// Values for CPU_X86_FAMILY(cpuType)
#define CPU_X86_486 4
#define CPU_X86_PENTIUM 5
#define CPU_X86_PENTIUM_PRO 6
#define CPU_X86_PENTIUM_4 0xF
// Values for CPU_X86_MODEL(cpuType) for CPU_X86_PENTIUM_PRO
#define CPU_X86_MODEL_PENTIUM_PRO_BANIAS 9 // Pentium M (Mobile PPro with P4 feautres)
#define COMMETHOD_PREPAD 8 // # extra bytes to allocate in addition to sizeof(ComCallMethodDesc)
#ifdef FEATURE_COMINTEROP
#define COMMETHOD_CALL_PRESTUB_SIZE 5 // x86: CALL(E8) xx xx xx xx
#define COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET 1 // the offset of the call target address inside the prestub
#endif // FEATURE_COMINTEROP
#define STACK_ALIGN_SIZE 4
#define JUMP_ALLOCATE_SIZE 8 // # bytes to allocate for a jump instruction
#define BACK_TO_BACK_JUMP_ALLOCATE_SIZE 8 // # bytes to allocate for a back to back jump instruction
#ifdef FEATURE_EH_FUNCLETS
#define USE_INDIRECT_CODEHEADER
#endif // FEATURE_EH_FUNCLETS
#define HAS_COMPACT_ENTRYPOINTS 1
// Needed for PInvoke inlining in ngened images
#define HAS_NDIRECT_IMPORT_PRECODE 1
#define HAS_FIXUP_PRECODE 1
// ThisPtrRetBufPrecode one is necessary for closed delegates over static methods with return buffer
#define HAS_THISPTR_RETBUF_PRECODE 1
#define CODE_SIZE_ALIGN 4
#define CACHE_LINE_SIZE 32 // As per Intel Optimization Manual the cache line size is 32 bytes
#define LOG2SLOT LOG2_PTRSIZE
#define ENREGISTERED_RETURNTYPE_MAXSIZE 8
#define ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE 4
#define CALLDESCR_ARGREGS 1 // CallDescrWorker has ArgumentRegister parameter
//**********************************************************************
// Parameter size
//**********************************************************************
inline unsigned StackElemSize(unsigned parmSize, bool isValueType = false /* unused */, bool isFloatHfa = false /* unused */)
{
const unsigned stackSlotSize = 4;
return ALIGN_UP(parmSize, stackSlotSize);
}
#include "stublinkerx86.h"
//**********************************************************************
// Frames
//**********************************************************************
//--------------------------------------------------------------------
// This represents some of the FramedMethodFrame fields that are
// stored at negative offsets.
//--------------------------------------------------------------------
#define ENUM_ARGUMENT_AND_SCRATCH_REGISTERS() \
ARGUMENT_AND_SCRATCH_REGISTER(Eax) \
ARGUMENT_AND_SCRATCH_REGISTER(Ecx) \
ARGUMENT_AND_SCRATCH_REGISTER(Edx)
#define ENUM_CALLEE_SAVED_REGISTERS() \
CALLEE_SAVED_REGISTER(Edi) \
CALLEE_SAVED_REGISTER(Esi) \
CALLEE_SAVED_REGISTER(Ebx) \
CALLEE_SAVED_REGISTER(Ebp)
typedef DPTR(struct CalleeSavedRegisters) PTR_CalleeSavedRegisters;
struct CalleeSavedRegisters {
#define CALLEE_SAVED_REGISTER(regname) INT32 regname;
ENUM_CALLEE_SAVED_REGISTERS();
#undef CALLEE_SAVED_REGISTER
};
//--------------------------------------------------------------------
// This represents the arguments that are stored in volatile registers.
// This should not overlap the CalleeSavedRegisters since those are already
// saved separately and it would be wasteful to save the same register twice.
// If we do use a non-volatile register as an argument, then the ArgIterator
// will probably have to communicate this back to the PromoteCallerStack
// routine to avoid a double promotion.
//--------------------------------------------------------------------
#define ENUM_ARGUMENT_REGISTERS() \
ARGUMENT_REGISTER(ECX) \
ARGUMENT_REGISTER(EDX)
#define ENUM_ARGUMENT_REGISTERS_BACKWARD() \
ARGUMENT_REGISTER(EDX) \
ARGUMENT_REGISTER(ECX)
typedef DPTR(struct ArgumentRegisters) PTR_ArgumentRegisters;
struct ArgumentRegisters {
#define ARGUMENT_REGISTER(regname) INT32 regname;
ENUM_ARGUMENT_REGISTERS_BACKWARD()
#undef ARGUMENT_REGISTER
};
#define NUM_ARGUMENT_REGISTERS 2
#define SCRATCH_REGISTER_X86REG kEAX
#define THIS_REG ECX
#define THIS_kREG kECX
#define ARGUMENT_REG1 ECX
#define ARGUMENT_REG2 EDX
// forward decl
struct REGDISPLAY;
typedef REGDISPLAY *PREGDISPLAY;
#ifndef FEATURE_EH_FUNCLETS
// Sufficient context for Try/Catch restoration.
struct EHContext {
INT32 Eax;
INT32 Ebx;
INT32 Ecx;
INT32 Edx;
INT32 Esi;
INT32 Edi;
INT32 Ebp;
INT32 Esp;
INT32 Eip;
void Setup(PCODE resumePC, PREGDISPLAY regs);
void UpdateFrame(PREGDISPLAY regs);
inline TADDR GetSP() {
LIMITED_METHOD_CONTRACT;
return (TADDR)Esp;
}
inline void SetSP(LPVOID esp) {
LIMITED_METHOD_CONTRACT;
Esp = (INT32)(size_t)esp;
}
inline LPVOID GetFP() {
LIMITED_METHOD_CONTRACT;
return (LPVOID)(UINT_PTR)Ebp;
}
inline void SetArg(LPVOID arg) {
LIMITED_METHOD_CONTRACT;
Eax = (INT32)(size_t)arg;
}
inline void Init()
{
LIMITED_METHOD_CONTRACT;
Eax = 0;
Ebx = 0;
Ecx = 0;
Edx = 0;
Esi = 0;
Edi = 0;
Ebp = 0;
Esp = 0;
Eip = 0;
}
};
#endif // !FEATURE_EH_FUNCLETS
#define ARGUMENTREGISTERS_SIZE sizeof(ArgumentRegisters)
//**********************************************************************
// Exception handling
//**********************************************************************
inline PCODE GetIP(const CONTEXT * context) {
LIMITED_METHOD_DAC_CONTRACT;
return PCODE(context->Eip);
}
inline void SetIP(CONTEXT *context, PCODE eip) {
LIMITED_METHOD_DAC_CONTRACT;
context->Eip = (DWORD)eip;
}
inline TADDR GetSP(const CONTEXT * context) {
LIMITED_METHOD_DAC_CONTRACT;
return (TADDR)(context->Esp);
}
EXTERN_C LPVOID STDCALL GetCurrentSP();
inline void SetSP(CONTEXT *context, TADDR esp) {
LIMITED_METHOD_DAC_CONTRACT;
context->Esp = (DWORD)esp;
}
inline void SetFP(CONTEXT *context, TADDR ebp) {
LIMITED_METHOD_DAC_CONTRACT;
context->Ebp = (INT32)ebp;
}
inline TADDR GetFP(const CONTEXT * context)
{
LIMITED_METHOD_DAC_CONTRACT;
return (TADDR)context->Ebp;
}
// Get Rel32 destination, emit jumpStub if necessary
inline INT32 rel32UsingJumpStub(INT32 UNALIGNED * pRel32, PCODE target, MethodDesc *pMethod = NULL, LoaderAllocator *pLoaderAllocator = NULL)
{
// We do not need jump stubs on i386
LIMITED_METHOD_CONTRACT;
TADDR baseAddr = (TADDR)pRel32 + 4;
return (INT32)(target - baseAddr);
}
#ifdef FEATURE_COMINTEROP
inline void emitCOMStubCall (ComCallMethodDesc *pCOMMethodRX, ComCallMethodDesc *pCOMMethodRW, PCODE target)
{
WRAPPER_NO_CONTRACT;
BYTE *pBufferRW = (BYTE*)pCOMMethodRW - COMMETHOD_CALL_PRESTUB_SIZE;
BYTE *pBufferRX = (BYTE*)pCOMMethodRX - COMMETHOD_CALL_PRESTUB_SIZE;
pBufferRW[0] = X86_INSTR_CALL_REL32; //CALLNEAR32
*((LPVOID*)(1+pBufferRW)) = (LPVOID) (((LPBYTE)target) - (pBufferRX+5));
_ASSERTE(IS_ALIGNED(pBufferRX + COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET, sizeof(void*)) &&
*((SSIZE_T*)(pBufferRX + COMMETHOD_CALL_PRESTUB_ADDRESS_OFFSET)) == ((LPBYTE)target - (LPBYTE)pCOMMethodRX));
}
#endif // FEATURE_COMINTEROP
//------------------------------------------------------------------------
WORD GetUnpatchedCodeData(LPCBYTE pAddr);
//------------------------------------------------------------------------
inline WORD GetUnpatchedOpcodeWORD(LPCBYTE pAddr)
{
WRAPPER_NO_CONTRACT;
if (CORDebuggerAttached())
{
return GetUnpatchedCodeData(pAddr);
}
else
{
return *((WORD *)pAddr);
}
}
//------------------------------------------------------------------------
inline BYTE GetUnpatchedOpcodeBYTE(LPCBYTE pAddr)
{
WRAPPER_NO_CONTRACT;
if (CORDebuggerAttached())
{
return (BYTE) GetUnpatchedCodeData(pAddr);
}
else
{
return *pAddr;
}
}
//------------------------------------------------------------------------
// The following must be a distinguishable set of instruction sequences for
// various stub dispatch calls.
//
// An x86 JIT which uses full stub dispatch must generate only
// the following stub dispatch calls:
//
// (1) isCallRelativeIndirect:
// call dword ptr [rel32] ; FF 15 ---rel32----
// (2) isCallRelative:
// call abc ; E8 ---rel32----
// (3) isCallRegisterIndirect:
// 3-byte nop ;
// call dword ptr [eax] ; FF 10
//
// NOTE: You must be sure that pRetAddr is a true return address for
// a stub dispatch call.
BOOL isCallRelativeIndirect(const BYTE *pRetAddr);
BOOL isCallRelative(const BYTE *pRetAddr);
BOOL isCallRegisterIndirect(const BYTE *pRetAddr);
inline BOOL isCallRelativeIndirect(const BYTE *pRetAddr)
{
LIMITED_METHOD_CONTRACT;
BOOL fRet = (GetUnpatchedOpcodeWORD(&pRetAddr[-6]) == X86_INSTR_CALL_IND);
_ASSERTE(!fRet || !isCallRelative(pRetAddr));
_ASSERTE(!fRet || !isCallRegisterIndirect(pRetAddr));
return fRet;
}
inline BOOL isCallRelative(const BYTE *pRetAddr)
{
LIMITED_METHOD_CONTRACT;
BOOL fRet = (GetUnpatchedOpcodeBYTE(&pRetAddr[-5]) == X86_INSTR_CALL_REL32);
_ASSERTE(!fRet || !isCallRelativeIndirect(pRetAddr));
_ASSERTE(!fRet || !isCallRegisterIndirect(pRetAddr));
return fRet;
}
inline BOOL isCallRegisterIndirect(const BYTE *pRetAddr)
{
LIMITED_METHOD_CONTRACT;
BOOL fRet = (GetUnpatchedOpcodeWORD(&pRetAddr[-5]) == X86_INSTR_NOP3_1)
&& (GetUnpatchedOpcodeBYTE(&pRetAddr[-3]) == X86_INSTR_NOP3_3)
&& (GetUnpatchedOpcodeWORD(&pRetAddr[-2]) == X86_INSTR_CALL_IND_EAX);
_ASSERTE(!fRet || !isCallRelative(pRetAddr));
_ASSERTE(!fRet || !isCallRelativeIndirect(pRetAddr));
return fRet;
}
//------------------------------------------------------------------------
inline void emitJump(LPBYTE pBufferRX, LPBYTE pBufferRW, LPVOID target)
{
LIMITED_METHOD_CONTRACT;
pBufferRW[0] = X86_INSTR_JMP_REL32; //JUMPNEAR32
*((LPVOID*)(1+pBufferRW)) = (LPVOID) (((LPBYTE)target) - (pBufferRX+5));
}
//------------------------------------------------------------------------
inline void emitJumpInd(LPBYTE pBuffer, LPVOID target)
{
LIMITED_METHOD_CONTRACT;
*((WORD*)pBuffer) = X86_INSTR_JMP_IND; // 0x25FF jmp dword ptr[addr32]
*((LPVOID*)(2+pBuffer)) = target;
}
//------------------------------------------------------------------------
inline PCODE isJump(PCODE pCode)
{
LIMITED_METHOD_DAC_CONTRACT;
return *PTR_BYTE(pCode) == X86_INSTR_JMP_REL32;
}
//------------------------------------------------------------------------
// Given the same pBuffer that was used by emitJump this method
// decodes the instructions and returns the jump target
inline PCODE decodeJump(PCODE pCode)
{
LIMITED_METHOD_DAC_CONTRACT;
CONSISTENCY_CHECK(*PTR_BYTE(pCode) == X86_INSTR_JMP_REL32);
return rel32Decode(pCode+1);
}
//
// On IA64 back to back jumps should be separated by a nop bundle to get
// the best performance from the hardware's branch prediction logic.
// For all other platforms back to back jumps don't require anything special
// That is why we have these two wrapper functions that call emitJump and decodeJump
//
//------------------------------------------------------------------------
inline void emitBackToBackJump(LPBYTE pBufferRX, LPBYTE pBufferRW, LPVOID target)
{
WRAPPER_NO_CONTRACT;
emitJump(pBufferRX, pBufferRW, target);
}
//------------------------------------------------------------------------
inline PCODE isBackToBackJump(PCODE pBuffer)
{
WRAPPER_NO_CONTRACT;
SUPPORTS_DAC;
return isJump(pBuffer);
}
//------------------------------------------------------------------------
inline PCODE decodeBackToBackJump(PCODE pBuffer)
{
WRAPPER_NO_CONTRACT;
SUPPORTS_DAC;
return decodeJump(pBuffer);
}
EXTERN_C void __stdcall setFPReturn(int fpSize, INT64 retVal);
EXTERN_C void __stdcall getFPReturn(int fpSize, INT64 *pretval);
// SEH info forward declarations
#include <pshpack1.h>
struct DECLSPEC_ALIGN(4) UMEntryThunkCode
{
BYTE m_alignpad[2]; // used to guarantee alignment of backpactched portion
BYTE m_movEAX; //MOV EAX,imm32
LPVOID m_uet; // pointer to start of this structure
BYTE m_jmp; //JMP NEAR32
const BYTE * m_execstub; // pointer to destination code // make sure the backpatched portion is dword aligned.
void Encode(UMEntryThunkCode *pEntryThunkCodeRX, BYTE* pTargetCode, void* pvSecretParam);
void Poison();
LPCBYTE GetEntryPoint() const
{
LIMITED_METHOD_CONTRACT;
return (LPCBYTE)&m_movEAX;
}
static int GetEntryPointOffset()
{
LIMITED_METHOD_CONTRACT;
return 2;
}
};
#include <poppack.h>
struct HijackArgs
{
DWORD FPUState[3]; // 12 bytes for FPU state (10 bytes for FP top-of-stack + 2 bytes padding)
DWORD Edi;
DWORD Esi;
DWORD Ebx;
DWORD Edx;
DWORD Ecx;
union
{
DWORD Eax;
size_t ReturnValue[1];
};
DWORD Ebp;
union
{
DWORD Eip;
size_t ReturnAddress;
};
};
// ClrFlushInstructionCache is used when we want to call FlushInstructionCache
// for a specific architecture in the common code, but not for other architectures.
// On IA64 ClrFlushInstructionCache calls the Kernel FlushInstructionCache function
// to flush the instruction cache.
// We call ClrFlushInstructionCache whenever we create or modify code in the heap.
// Currently ClrFlushInstructionCache has no effect on X86
//
inline BOOL ClrFlushInstructionCache(LPCVOID pCodeAddr, size_t sizeOfCode)
{
// FlushInstructionCache(GetCurrentProcess(), pCodeAddr, sizeOfCode);
MemoryBarrier();
return TRUE;
}
//
// JIT HELPER ALIASING FOR PORTABILITY.
//
// Create alias for optimized implementations of helpers provided on this platform
//
// optimized static helpers generated dynamically at runtime
// #define JIT_GetSharedGCStaticBase
// #define JIT_GetSharedNonGCStaticBase
// #define JIT_GetSharedGCStaticBaseNoCtor
// #define JIT_GetSharedNonGCStaticBaseNoCtor
#ifndef TARGET_UNIX
#define JIT_NewCrossContext JIT_NewCrossContext
#endif // TARGET_UNIX
#endif // __cgenx86_h__
| -1 |
dotnet/runtime
| 66,407 |
ARM64 - Optimizing a % b operations part 2
|
Addressing part of this issue: https://github.com/dotnet/runtime/issues/34937
**Description**
There are various ways to optimize `%` for integers on ARM64.
`a % b` can be transformed into a specific sequence of instructions for ARM64 if the operation is signed and `b` is a constant with the power of 2.
**Acceptance Criteria**
- [x] Merge https://github.com/dotnet/runtime/pull/65535
- [x] Add Tests
ARM64 diffs based on tests
```diff
- asr w1, w0, #31
- and w1, w1, #15
- add w1, w1, w0
- asr w1, w1, #4
- lsl w1, w1, #4
- sub w0, w0, w1
- ;; bbWeight=1 PerfScore 4.50
+ and w1, w0, #15
+ negs w0, w0
+ and w0, w0, #15
+ csneg w0, w1, w0, mi
+ ;; bbWeight=1 PerfScore 2.00
```
|
TIHan
| 2022-03-09T20:33:02Z | 2022-04-13T20:01:26Z |
3b6a539c7219383ec6181a112465f904fe9e2edb
|
a81f4bc9d53dedefe03f49da53ed198abbf9c467
|
ARM64 - Optimizing a % b operations part 2. Addressing part of this issue: https://github.com/dotnet/runtime/issues/34937
**Description**
There are various ways to optimize `%` for integers on ARM64.
`a % b` can be transformed into a specific sequence of instructions for ARM64 if the operation is signed and `b` is a constant with the power of 2.
**Acceptance Criteria**
- [x] Merge https://github.com/dotnet/runtime/pull/65535
- [x] Add Tests
ARM64 diffs based on tests
```diff
- asr w1, w0, #31
- and w1, w1, #15
- add w1, w1, w0
- asr w1, w1, #4
- lsl w1, w1, #4
- sub w0, w0, w1
- ;; bbWeight=1 PerfScore 4.50
+ and w1, w0, #15
+ negs w0, w0
+ and w0, w0, #15
+ csneg w0, w1, w0, mi
+ ;; bbWeight=1 PerfScore 2.00
```
|
./src/coreclr/pal/tests/palsuite/exception_handling/PAL_TRY_EXCEPT/test1/PAL_TRY_EXCEPT.cpp
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*=====================================================================
**
** Source: PAL_TRY_EXCEPT.c (test 1)
**
** Purpose: Tests the PAL implementation of the PAL_TRY and
** PAL_EXCEPT functions. An exception is forced to ensure
** the exception block is hit.
**
**
**===================================================================*/
#include <palsuite.h>
PALTEST(exception_handling_PAL_TRY_EXCEPT_test1_paltest_pal_try_except_test1, "exception_handling/PAL_TRY_EXCEPT/test1/paltest_pal_try_except_test1")
{
int* p = 0x00000000; /* NULL pointer */
BOOL bTry = FALSE;
BOOL bExcept = FALSE;
if (0 != PAL_Initialize(argc, argv))
{
return FAIL;
}
/*
** test to make sure we get into the exception block
*/
PAL_TRY
{
if (bExcept)
{
Fail("PAL_TRY_EXCEPT: ERROR -> Something weird is going on."
" PAL_EXCEPT was hit before PAL_TRY.\n");
}
bTry = TRUE; /* indicate we hit the PAL_TRY block */
*p = 13; /* causes an access violation exception */
Fail("PAL_TRY_EXCEPT: ERROR -> code was executed after the "
"access violation.\n");
}
PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
{
if (!bTry)
{
Fail("PAL_TRY_EXCEPT: ERROR -> Something weird is going on."
" PAL_EXCEPT was hit without PAL_TRY being hit.\n");
}
bExcept = TRUE; /* indicate we hit the PAL_EXCEPT block */
}
PAL_ENDTRY;
if (!bTry)
{
Trace("PAL_TRY_EXCEPT: ERROR -> It appears the code in the PAL_TRY"
" block was not executed.\n");
}
if (!bExcept)
{
Trace("PAL_TRY_EXCEPT: ERROR -> It appears the code in the PAL_EXCEPT"
" block was not executed.\n");
}
/* did we hit all the code blocks? */
if(!bTry || !bExcept)
{
Fail("");
}
PAL_Terminate();
return PASS;
}
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*=====================================================================
**
** Source: PAL_TRY_EXCEPT.c (test 1)
**
** Purpose: Tests the PAL implementation of the PAL_TRY and
** PAL_EXCEPT functions. An exception is forced to ensure
** the exception block is hit.
**
**
**===================================================================*/
#include <palsuite.h>
PALTEST(exception_handling_PAL_TRY_EXCEPT_test1_paltest_pal_try_except_test1, "exception_handling/PAL_TRY_EXCEPT/test1/paltest_pal_try_except_test1")
{
int* p = 0x00000000; /* NULL pointer */
BOOL bTry = FALSE;
BOOL bExcept = FALSE;
if (0 != PAL_Initialize(argc, argv))
{
return FAIL;
}
/*
** test to make sure we get into the exception block
*/
PAL_TRY
{
if (bExcept)
{
Fail("PAL_TRY_EXCEPT: ERROR -> Something weird is going on."
" PAL_EXCEPT was hit before PAL_TRY.\n");
}
bTry = TRUE; /* indicate we hit the PAL_TRY block */
*p = 13; /* causes an access violation exception */
Fail("PAL_TRY_EXCEPT: ERROR -> code was executed after the "
"access violation.\n");
}
PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
{
if (!bTry)
{
Fail("PAL_TRY_EXCEPT: ERROR -> Something weird is going on."
" PAL_EXCEPT was hit without PAL_TRY being hit.\n");
}
bExcept = TRUE; /* indicate we hit the PAL_EXCEPT block */
}
PAL_ENDTRY;
if (!bTry)
{
Trace("PAL_TRY_EXCEPT: ERROR -> It appears the code in the PAL_TRY"
" block was not executed.\n");
}
if (!bExcept)
{
Trace("PAL_TRY_EXCEPT: ERROR -> It appears the code in the PAL_EXCEPT"
" block was not executed.\n");
}
/* did we hit all the code blocks? */
if(!bTry || !bExcept)
{
Fail("");
}
PAL_Terminate();
return PASS;
}
| -1 |
dotnet/runtime
| 66,407 |
ARM64 - Optimizing a % b operations part 2
|
Addressing part of this issue: https://github.com/dotnet/runtime/issues/34937
**Description**
There are various ways to optimize `%` for integers on ARM64.
`a % b` can be transformed into a specific sequence of instructions for ARM64 if the operation is signed and `b` is a constant with the power of 2.
**Acceptance Criteria**
- [x] Merge https://github.com/dotnet/runtime/pull/65535
- [x] Add Tests
ARM64 diffs based on tests
```diff
- asr w1, w0, #31
- and w1, w1, #15
- add w1, w1, w0
- asr w1, w1, #4
- lsl w1, w1, #4
- sub w0, w0, w1
- ;; bbWeight=1 PerfScore 4.50
+ and w1, w0, #15
+ negs w0, w0
+ and w0, w0, #15
+ csneg w0, w1, w0, mi
+ ;; bbWeight=1 PerfScore 2.00
```
|
TIHan
| 2022-03-09T20:33:02Z | 2022-04-13T20:01:26Z |
3b6a539c7219383ec6181a112465f904fe9e2edb
|
a81f4bc9d53dedefe03f49da53ed198abbf9c467
|
ARM64 - Optimizing a % b operations part 2. Addressing part of this issue: https://github.com/dotnet/runtime/issues/34937
**Description**
There are various ways to optimize `%` for integers on ARM64.
`a % b` can be transformed into a specific sequence of instructions for ARM64 if the operation is signed and `b` is a constant with the power of 2.
**Acceptance Criteria**
- [x] Merge https://github.com/dotnet/runtime/pull/65535
- [x] Add Tests
ARM64 diffs based on tests
```diff
- asr w1, w0, #31
- and w1, w1, #15
- add w1, w1, w0
- asr w1, w1, #4
- lsl w1, w1, #4
- sub w0, w0, w1
- ;; bbWeight=1 PerfScore 4.50
+ and w1, w0, #15
+ negs w0, w0
+ and w0, w0, #15
+ csneg w0, w1, w0, mi
+ ;; bbWeight=1 PerfScore 2.00
```
|
./src/coreclr/tools/superpmi/superpmi-shared/methodcontextreader.h
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//----------------------------------------------------------
// MethodContextReader.h - Abstraction for reading MethodContexts
// Should eventually support multithreading
//----------------------------------------------------------
#ifndef _MethodContextReader
#define _MethodContextReader
#include "methodcontext.h"
#include "tocfile.h"
struct MethodContextBuffer
{
private:
static const int Completed = 0x1234abcd;
public:
unsigned char* buff;
DWORD size;
MethodContextBuffer() : buff(nullptr), size(Completed)
{
}
MethodContextBuffer(DWORD error) : buff(nullptr), size(error)
{
}
MethodContextBuffer(unsigned char* b, DWORD e) : buff(b), size(e)
{
}
bool allDone()
{
return size == Completed && buff == nullptr;
}
bool Error()
{
return size != 0 && size != Completed && buff == nullptr;
}
};
// The pack(4) directive is so that each entry is 12 bytes, instead of 16
#pragma pack(push)
#pragma pack(4)
class MethodContextReader
{
private:
// The MC/MCH file
HANDLE fileHandle;
// The size of the MC/MCH file
__int64 fileSize;
// Current MC index in the input MC/MCH file
int curMCIndex;
// The synchronization mutex
HANDLE mutex;
bool AcquireLock();
void ReleaseLock();
TOCFile tocFile;
// Method ranges to process
// If you have an index file, these things get processed
// much faster, now
const int* Indexes;
int IndexCount;
int curIndexPos;
// Method hash to process
// If you have an index file, these things get processed
// much faster, now
char* Hash;
int curTOCIndex;
// Offset/increment if running in parallel mode
// If you have an index file, these things get processed
// much faster, now
int Offset;
int Increment;
struct StringList
{
StringList* next;
std::string hash;
};
StringList* excludedMethodsList;
// Binary search to get this method number from the index
// Returns -1 for not found, or -2 for not indexed
__int64 GetOffset(unsigned int methodNumber);
// Just a helper...
static HANDLE OpenFile(const char* inputFile, DWORD flags = FILE_ATTRIBUTE_NORMAL);
MethodContextBuffer ReadMethodContextNoLock(bool justSkip = false);
MethodContextBuffer ReadMethodContext(bool acquireLock, bool justSkip = false);
MethodContextBuffer GetSpecificMethodContext(unsigned int methodNumber);
MethodContextBuffer GetNextMethodContextFromIndexes();
MethodContextBuffer GetNextMethodContextFromHash();
MethodContextBuffer GetNextMethodContextFromOffsetIncrement();
MethodContextBuffer GetNextMethodContextHelper();
// Looks for a file named foo.origSuffix.newSuffix or foo.newSuffix
// but only if foo.origSuffix exists
static std::string CheckForPairedFile(const std::string& fileName,
const std::string& origSuffix,
const std::string& newSuffix);
// are we're at the end of the file...
bool atEof();
// Do we have a valid TOC?
bool hasTOC();
// Do we have a valid index?
bool hasIndex();
void ReadExcludedMethods(std::string mchFileName);
void CleanExcludedMethods();
public:
MethodContextReader(const char* inputFileName,
const int* indexes = nullptr,
int indexCount = -1,
char* hash = nullptr,
int offset = -1,
int increment = -1);
~MethodContextReader();
// Read a method context buffer from the ContextCollection
// (either a hive [single] or an index)
MethodContextBuffer GetNextMethodContext();
// No C++ exceptions, so the constructor has to always succeed...
bool isValid();
double PercentComplete();
// Returns the index of the last MethodContext read by GetNextMethodContext
inline int GetMethodContextIndex()
{
return curMCIndex;
}
// Return should this method context be excluded from the replay or not.
bool IsMethodExcluded(MethodContext* mc);
};
#pragma pack(pop)
#endif
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
//----------------------------------------------------------
// MethodContextReader.h - Abstraction for reading MethodContexts
// Should eventually support multithreading
//----------------------------------------------------------
#ifndef _MethodContextReader
#define _MethodContextReader
#include "methodcontext.h"
#include "tocfile.h"
struct MethodContextBuffer
{
private:
static const int Completed = 0x1234abcd;
public:
unsigned char* buff;
DWORD size;
MethodContextBuffer() : buff(nullptr), size(Completed)
{
}
MethodContextBuffer(DWORD error) : buff(nullptr), size(error)
{
}
MethodContextBuffer(unsigned char* b, DWORD e) : buff(b), size(e)
{
}
bool allDone()
{
return size == Completed && buff == nullptr;
}
bool Error()
{
return size != 0 && size != Completed && buff == nullptr;
}
};
// The pack(4) directive is so that each entry is 12 bytes, instead of 16
#pragma pack(push)
#pragma pack(4)
class MethodContextReader
{
private:
// The MC/MCH file
HANDLE fileHandle;
// The size of the MC/MCH file
__int64 fileSize;
// Current MC index in the input MC/MCH file
int curMCIndex;
// The synchronization mutex
HANDLE mutex;
bool AcquireLock();
void ReleaseLock();
TOCFile tocFile;
// Method ranges to process
// If you have an index file, these things get processed
// much faster, now
const int* Indexes;
int IndexCount;
int curIndexPos;
// Method hash to process
// If you have an index file, these things get processed
// much faster, now
char* Hash;
int curTOCIndex;
// Offset/increment if running in parallel mode
// If you have an index file, these things get processed
// much faster, now
int Offset;
int Increment;
struct StringList
{
StringList* next;
std::string hash;
};
StringList* excludedMethodsList;
// Binary search to get this method number from the index
// Returns -1 for not found, or -2 for not indexed
__int64 GetOffset(unsigned int methodNumber);
// Just a helper...
static HANDLE OpenFile(const char* inputFile, DWORD flags = FILE_ATTRIBUTE_NORMAL);
MethodContextBuffer ReadMethodContextNoLock(bool justSkip = false);
MethodContextBuffer ReadMethodContext(bool acquireLock, bool justSkip = false);
MethodContextBuffer GetSpecificMethodContext(unsigned int methodNumber);
MethodContextBuffer GetNextMethodContextFromIndexes();
MethodContextBuffer GetNextMethodContextFromHash();
MethodContextBuffer GetNextMethodContextFromOffsetIncrement();
MethodContextBuffer GetNextMethodContextHelper();
// Looks for a file named foo.origSuffix.newSuffix or foo.newSuffix
// but only if foo.origSuffix exists
static std::string CheckForPairedFile(const std::string& fileName,
const std::string& origSuffix,
const std::string& newSuffix);
// are we're at the end of the file...
bool atEof();
// Do we have a valid TOC?
bool hasTOC();
// Do we have a valid index?
bool hasIndex();
void ReadExcludedMethods(std::string mchFileName);
void CleanExcludedMethods();
public:
MethodContextReader(const char* inputFileName,
const int* indexes = nullptr,
int indexCount = -1,
char* hash = nullptr,
int offset = -1,
int increment = -1);
~MethodContextReader();
// Read a method context buffer from the ContextCollection
// (either a hive [single] or an index)
MethodContextBuffer GetNextMethodContext();
// No C++ exceptions, so the constructor has to always succeed...
bool isValid();
double PercentComplete();
// Returns the index of the last MethodContext read by GetNextMethodContext
inline int GetMethodContextIndex()
{
return curMCIndex;
}
// Return should this method context be excluded from the replay or not.
bool IsMethodExcluded(MethodContext* mc);
};
#pragma pack(pop)
#endif
| -1 |
dotnet/runtime
| 66,407 |
ARM64 - Optimizing a % b operations part 2
|
Addressing part of this issue: https://github.com/dotnet/runtime/issues/34937
**Description**
There are various ways to optimize `%` for integers on ARM64.
`a % b` can be transformed into a specific sequence of instructions for ARM64 if the operation is signed and `b` is a constant with the power of 2.
**Acceptance Criteria**
- [x] Merge https://github.com/dotnet/runtime/pull/65535
- [x] Add Tests
ARM64 diffs based on tests
```diff
- asr w1, w0, #31
- and w1, w1, #15
- add w1, w1, w0
- asr w1, w1, #4
- lsl w1, w1, #4
- sub w0, w0, w1
- ;; bbWeight=1 PerfScore 4.50
+ and w1, w0, #15
+ negs w0, w0
+ and w0, w0, #15
+ csneg w0, w1, w0, mi
+ ;; bbWeight=1 PerfScore 2.00
```
|
TIHan
| 2022-03-09T20:33:02Z | 2022-04-13T20:01:26Z |
3b6a539c7219383ec6181a112465f904fe9e2edb
|
a81f4bc9d53dedefe03f49da53ed198abbf9c467
|
ARM64 - Optimizing a % b operations part 2. Addressing part of this issue: https://github.com/dotnet/runtime/issues/34937
**Description**
There are various ways to optimize `%` for integers on ARM64.
`a % b` can be transformed into a specific sequence of instructions for ARM64 if the operation is signed and `b` is a constant with the power of 2.
**Acceptance Criteria**
- [x] Merge https://github.com/dotnet/runtime/pull/65535
- [x] Add Tests
ARM64 diffs based on tests
```diff
- asr w1, w0, #31
- and w1, w1, #15
- add w1, w1, w0
- asr w1, w1, #4
- lsl w1, w1, #4
- sub w0, w0, w1
- ;; bbWeight=1 PerfScore 4.50
+ and w1, w0, #15
+ negs w0, w0
+ and w0, w0, #15
+ csneg w0, w1, w0, mi
+ ;; bbWeight=1 PerfScore 2.00
```
|
./src/coreclr/pal/tests/palsuite/c_runtime/vswprintf/test15/test15.cpp
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*=====================================================================
**
** Source: test15.c
**
** Purpose: Test #15 for the vswprintf function.
**
**
**===================================================================*/
#include <palsuite.h>
#include "../vswprintf.h"
/* memcmp is used to verify the results, so this test is dependent on it. */
/* ditto with wcslen */
PALTEST(c_runtime_vswprintf_test15_paltest_vswprintf_test15, "c_runtime/vswprintf/test15/paltest_vswprintf_test15")
{
double val = 256.0;
double neg = -256.0;
if (PAL_Initialize(argc, argv) != 0)
return(FAIL);
DoDoubleTest(convert("foo %E"), val, convert("foo 2.560000E+002"),
convert("foo 2.560000E+02"));
DoDoubleTest(convert("foo %lE"), val, convert("foo 2.560000E+002"),
convert("foo 2.560000E+02"));
DoDoubleTest(convert("foo %hE"), val, convert("foo 2.560000E+002"),
convert("foo 2.560000E+02"));
DoDoubleTest(convert("foo %LE"), val, convert("foo 2.560000E+002"),
convert("foo 2.560000E+02"));
DoDoubleTest(convert("foo %I64E"), val, convert("foo 2.560000E+002"),
convert("foo 2.560000E+02"));
DoDoubleTest(convert("foo %14E"), val, convert("foo 2.560000E+002"),
convert("foo 2.560000E+02"));
DoDoubleTest(convert("foo %-14E"), val, convert("foo 2.560000E+002 "),
convert("foo 2.560000E+02 "));
DoDoubleTest(convert("foo %.1E"), val, convert("foo 2.6E+002"),
convert("foo 2.6E+02"));
DoDoubleTest(convert("foo %.8E"), val, convert("foo 2.56000000E+002"),
convert("foo 2.56000000E+02"));
DoDoubleTest(convert("foo %014E"), val, convert("foo 02.560000E+002"),
convert("foo 002.560000E+02"));
DoDoubleTest(convert("foo %#E"), val, convert("foo 2.560000E+002"),
convert("foo 2.560000E+02"));
DoDoubleTest(convert("foo %+E"), val, convert("foo +2.560000E+002"),
convert("foo +2.560000E+02"));
DoDoubleTest(convert("foo % E"), val, convert("foo 2.560000E+002"),
convert("foo 2.560000E+02"));
DoDoubleTest(convert("foo %+E"), neg, convert("foo -2.560000E+002"),
convert("foo -2.560000E+02"));
DoDoubleTest(convert("foo % E"), neg, convert("foo -2.560000E+002"),
convert("foo -2.560000E+002"));
PAL_Terminate();
return PASS;
}
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*=====================================================================
**
** Source: test15.c
**
** Purpose: Test #15 for the vswprintf function.
**
**
**===================================================================*/
#include <palsuite.h>
#include "../vswprintf.h"
/* memcmp is used to verify the results, so this test is dependent on it. */
/* ditto with wcslen */
PALTEST(c_runtime_vswprintf_test15_paltest_vswprintf_test15, "c_runtime/vswprintf/test15/paltest_vswprintf_test15")
{
double val = 256.0;
double neg = -256.0;
if (PAL_Initialize(argc, argv) != 0)
return(FAIL);
DoDoubleTest(convert("foo %E"), val, convert("foo 2.560000E+002"),
convert("foo 2.560000E+02"));
DoDoubleTest(convert("foo %lE"), val, convert("foo 2.560000E+002"),
convert("foo 2.560000E+02"));
DoDoubleTest(convert("foo %hE"), val, convert("foo 2.560000E+002"),
convert("foo 2.560000E+02"));
DoDoubleTest(convert("foo %LE"), val, convert("foo 2.560000E+002"),
convert("foo 2.560000E+02"));
DoDoubleTest(convert("foo %I64E"), val, convert("foo 2.560000E+002"),
convert("foo 2.560000E+02"));
DoDoubleTest(convert("foo %14E"), val, convert("foo 2.560000E+002"),
convert("foo 2.560000E+02"));
DoDoubleTest(convert("foo %-14E"), val, convert("foo 2.560000E+002 "),
convert("foo 2.560000E+02 "));
DoDoubleTest(convert("foo %.1E"), val, convert("foo 2.6E+002"),
convert("foo 2.6E+02"));
DoDoubleTest(convert("foo %.8E"), val, convert("foo 2.56000000E+002"),
convert("foo 2.56000000E+02"));
DoDoubleTest(convert("foo %014E"), val, convert("foo 02.560000E+002"),
convert("foo 002.560000E+02"));
DoDoubleTest(convert("foo %#E"), val, convert("foo 2.560000E+002"),
convert("foo 2.560000E+02"));
DoDoubleTest(convert("foo %+E"), val, convert("foo +2.560000E+002"),
convert("foo +2.560000E+02"));
DoDoubleTest(convert("foo % E"), val, convert("foo 2.560000E+002"),
convert("foo 2.560000E+02"));
DoDoubleTest(convert("foo %+E"), neg, convert("foo -2.560000E+002"),
convert("foo -2.560000E+02"));
DoDoubleTest(convert("foo % E"), neg, convert("foo -2.560000E+002"),
convert("foo -2.560000E+002"));
PAL_Terminate();
return PASS;
}
| -1 |
dotnet/runtime
| 66,407 |
ARM64 - Optimizing a % b operations part 2
|
Addressing part of this issue: https://github.com/dotnet/runtime/issues/34937
**Description**
There are various ways to optimize `%` for integers on ARM64.
`a % b` can be transformed into a specific sequence of instructions for ARM64 if the operation is signed and `b` is a constant with the power of 2.
**Acceptance Criteria**
- [x] Merge https://github.com/dotnet/runtime/pull/65535
- [x] Add Tests
ARM64 diffs based on tests
```diff
- asr w1, w0, #31
- and w1, w1, #15
- add w1, w1, w0
- asr w1, w1, #4
- lsl w1, w1, #4
- sub w0, w0, w1
- ;; bbWeight=1 PerfScore 4.50
+ and w1, w0, #15
+ negs w0, w0
+ and w0, w0, #15
+ csneg w0, w1, w0, mi
+ ;; bbWeight=1 PerfScore 2.00
```
|
TIHan
| 2022-03-09T20:33:02Z | 2022-04-13T20:01:26Z |
3b6a539c7219383ec6181a112465f904fe9e2edb
|
a81f4bc9d53dedefe03f49da53ed198abbf9c467
|
ARM64 - Optimizing a % b operations part 2. Addressing part of this issue: https://github.com/dotnet/runtime/issues/34937
**Description**
There are various ways to optimize `%` for integers on ARM64.
`a % b` can be transformed into a specific sequence of instructions for ARM64 if the operation is signed and `b` is a constant with the power of 2.
**Acceptance Criteria**
- [x] Merge https://github.com/dotnet/runtime/pull/65535
- [x] Add Tests
ARM64 diffs based on tests
```diff
- asr w1, w0, #31
- and w1, w1, #15
- add w1, w1, w0
- asr w1, w1, #4
- lsl w1, w1, #4
- sub w0, w0, w1
- ;; bbWeight=1 PerfScore 4.50
+ and w1, w0, #15
+ negs w0, w0
+ and w0, w0, #15
+ csneg w0, w1, w0, mi
+ ;; bbWeight=1 PerfScore 2.00
```
|
./src/coreclr/pal/tests/palsuite/debug_api/WriteProcessMemory/test3/test3.cpp
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*=============================================================
**
** Source: test3.c
**
** Purpose: Create a child process and debug it. When the child
** raises an exception, it sends back a memory location. Call
** WriteProcessMemory on the memory location, but attempt to write
** more than the memory allows. This should cause an error and the
** data should be unchanged.
**
**
==============================================================*/
#define UNICODE
#include "commonconsts.h"
#include <palsuite.h>
PALTEST(debug_api_WriteProcessMemory_test3_paltest_writeprocessmemory_test3, "debug_api/WriteProcessMemory/test3/paltest_writeprocessmemory_test3")
{
PROCESS_INFORMATION pi;
STARTUPINFO si;
HANDLE hEvToHelper;
HANDLE hEvFromHelper;
DWORD dwExitCode;
DWORD dwRet;
BOOL success = TRUE; /* assume success */
char cmdComposeBuf[MAX_PATH];
PWCHAR uniString;
if(0 != (PAL_Initialize(argc, argv)))
{
return FAIL;
}
/* Create the signals we need for cross process communication */
hEvToHelper = CreateEvent(NULL, TRUE, FALSE, szcToHelperEvName);
if (!hEvToHelper)
{
Fail("WriteProcessMemory: CreateEvent of '%S' failed. "
"GetLastError() returned %u.\n", szcToHelperEvName,
GetLastError());
}
if (GetLastError() == ERROR_ALREADY_EXISTS)
{
Fail("WriteProcessMemory: CreateEvent of '%S' failed. "
"(already exists!)\n", szcToHelperEvName);
}
hEvFromHelper = CreateEvent(NULL, TRUE, FALSE, szcFromHelperEvName);
if (!hEvToHelper)
{
Fail("WriteProcessMemory: CreateEvent of '%S' failed. "
"GetLastError() returned %u.\n", szcFromHelperEvName,
GetLastError());
}
if (GetLastError() == ERROR_ALREADY_EXISTS)
{
Fail("WriteProcessMemory: CreateEvent of '%S' failed. "
"(already exists!)\n", szcFromHelperEvName);
}
if (!sprintf_s(cmdComposeBuf, ARRAY_SIZE(cmdComposeBuf), "helper %s", commsFileName))
{
Fail("Could not convert command line\n");
}
uniString = convert(cmdComposeBuf);
ZeroMemory( &si, sizeof(si) );
si.cb = sizeof(si);
ZeroMemory( &pi, sizeof(pi) );
/* Create a new process. This is the process that will ask for
* memory munging */
if(!CreateProcess( NULL, uniString, NULL, NULL,
FALSE, 0, NULL, NULL, &si, &pi))
{
Trace("ERROR: CreateProcess failed to load executable '%S'. "
"GetLastError() returned %u.\n",
uniString, GetLastError());
free(uniString);
Fail("");
}
free(uniString);
while(1)
{
FILE *commsFile;
char* pSrcMemory;
char* pDestMemory;
int Count;
SIZE_T wpmCount;
DWORD dwExpectedErrorCode;
char incomingCMDBuffer[MAX_PATH + 1];
/* wait until the helper tells us that it has given us
* something to do */
dwRet = WaitForSingleObject(hEvFromHelper, TIMEOUT);
if (dwRet != WAIT_OBJECT_0)
{
Trace("test1 WaitForSingleObjectTest: WaitForSingleObject "
"failed (%u)\n", GetLastError());
break; /* no more work incoming */
}
/* get the parameters to test WriteProcessMemory with */
if (!(commsFile = fopen(commsFileName, "r")))
{
/* no file means there is no more work */
break;
}
if ( NULL == fgets(incomingCMDBuffer, MAX_PATH, commsFile))
{
Trace ("unable to read from communication file %s "
"for reasons %u & %u\n",
errno, GetLastError());
success = FALSE;
PEDANTIC1(fclose,(commsFile));
/* it's not worth continuing this trial */
goto doneIteration;
}
PEDANTIC1(fclose,(commsFile));
sscanf(incomingCMDBuffer, "%u %u %u",
&pDestMemory, &Count, &dwExpectedErrorCode);
if (argc > 1)
{
Trace("Preparing to write to %u bytes @ %u ('%s')\n",
Count, pDestMemory, incomingCMDBuffer);
}
/* compose some data to write to the client process */
if (!(pSrcMemory = (char*)malloc(Count)))
{
Trace("could not dynamically allocate memory to copy from "
"for reasons %u & %u\n",
errno, GetLastError());
success = FALSE;
goto doneIteration;
}
memset(pSrcMemory, nextValue, Count);
/* do the work */
dwRet = WriteProcessMemory(pi.hProcess,
pDestMemory,
pSrcMemory,
Count,
&wpmCount);
if(dwRet != 0)
{
Trace("ERROR: Situation: '%s', return code: %u, bytes 'written': %u\n",
incomingCMDBuffer, dwRet, wpmCount);
Trace("ERROR: WriteProcessMemory did not fail as it should, as "
"it attempted to write to a range of memory which was "
"not completely accessible.\n");
success = FALSE;
}
if(GetLastError() != dwExpectedErrorCode)
{
Trace("ERROR: GetLastError() should have returned "
"%u , but instead it returned %u.\n",
dwExpectedErrorCode, GetLastError());
success = FALSE;
}
free(pSrcMemory);
doneIteration:
PEDANTIC(ResetEvent, (hEvFromHelper));
PEDANTIC(SetEvent, (hEvToHelper));
}
/* wait for the child process to complete */
WaitForSingleObject ( pi.hProcess, TIMEOUT );
/* this may return a failure code on a success path */
/* check the exit code from the process */
if( ! GetExitCodeProcess( pi.hProcess, &dwExitCode ) )
{
Trace( "GetExitCodeProcess call failed with error code %u\n",
GetLastError() );
dwExitCode = FAIL;
}
if(!success)
{
dwExitCode = FAIL;
}
PEDANTIC(CloseHandle, (hEvToHelper));
PEDANTIC(CloseHandle, (hEvFromHelper));
PEDANTIC(CloseHandle, (pi.hThread));
PEDANTIC(CloseHandle, (pi.hProcess));
PAL_TerminateEx(dwExitCode);
return dwExitCode;
}
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*=============================================================
**
** Source: test3.c
**
** Purpose: Create a child process and debug it. When the child
** raises an exception, it sends back a memory location. Call
** WriteProcessMemory on the memory location, but attempt to write
** more than the memory allows. This should cause an error and the
** data should be unchanged.
**
**
==============================================================*/
#define UNICODE
#include "commonconsts.h"
#include <palsuite.h>
PALTEST(debug_api_WriteProcessMemory_test3_paltest_writeprocessmemory_test3, "debug_api/WriteProcessMemory/test3/paltest_writeprocessmemory_test3")
{
PROCESS_INFORMATION pi;
STARTUPINFO si;
HANDLE hEvToHelper;
HANDLE hEvFromHelper;
DWORD dwExitCode;
DWORD dwRet;
BOOL success = TRUE; /* assume success */
char cmdComposeBuf[MAX_PATH];
PWCHAR uniString;
if(0 != (PAL_Initialize(argc, argv)))
{
return FAIL;
}
/* Create the signals we need for cross process communication */
hEvToHelper = CreateEvent(NULL, TRUE, FALSE, szcToHelperEvName);
if (!hEvToHelper)
{
Fail("WriteProcessMemory: CreateEvent of '%S' failed. "
"GetLastError() returned %u.\n", szcToHelperEvName,
GetLastError());
}
if (GetLastError() == ERROR_ALREADY_EXISTS)
{
Fail("WriteProcessMemory: CreateEvent of '%S' failed. "
"(already exists!)\n", szcToHelperEvName);
}
hEvFromHelper = CreateEvent(NULL, TRUE, FALSE, szcFromHelperEvName);
if (!hEvToHelper)
{
Fail("WriteProcessMemory: CreateEvent of '%S' failed. "
"GetLastError() returned %u.\n", szcFromHelperEvName,
GetLastError());
}
if (GetLastError() == ERROR_ALREADY_EXISTS)
{
Fail("WriteProcessMemory: CreateEvent of '%S' failed. "
"(already exists!)\n", szcFromHelperEvName);
}
if (!sprintf_s(cmdComposeBuf, ARRAY_SIZE(cmdComposeBuf), "helper %s", commsFileName))
{
Fail("Could not convert command line\n");
}
uniString = convert(cmdComposeBuf);
ZeroMemory( &si, sizeof(si) );
si.cb = sizeof(si);
ZeroMemory( &pi, sizeof(pi) );
/* Create a new process. This is the process that will ask for
* memory munging */
if(!CreateProcess( NULL, uniString, NULL, NULL,
FALSE, 0, NULL, NULL, &si, &pi))
{
Trace("ERROR: CreateProcess failed to load executable '%S'. "
"GetLastError() returned %u.\n",
uniString, GetLastError());
free(uniString);
Fail("");
}
free(uniString);
while(1)
{
FILE *commsFile;
char* pSrcMemory;
char* pDestMemory;
int Count;
SIZE_T wpmCount;
DWORD dwExpectedErrorCode;
char incomingCMDBuffer[MAX_PATH + 1];
/* wait until the helper tells us that it has given us
* something to do */
dwRet = WaitForSingleObject(hEvFromHelper, TIMEOUT);
if (dwRet != WAIT_OBJECT_0)
{
Trace("test1 WaitForSingleObjectTest: WaitForSingleObject "
"failed (%u)\n", GetLastError());
break; /* no more work incoming */
}
/* get the parameters to test WriteProcessMemory with */
if (!(commsFile = fopen(commsFileName, "r")))
{
/* no file means there is no more work */
break;
}
if ( NULL == fgets(incomingCMDBuffer, MAX_PATH, commsFile))
{
Trace ("unable to read from communication file %s "
"for reasons %u & %u\n",
errno, GetLastError());
success = FALSE;
PEDANTIC1(fclose,(commsFile));
/* it's not worth continuing this trial */
goto doneIteration;
}
PEDANTIC1(fclose,(commsFile));
sscanf(incomingCMDBuffer, "%u %u %u",
&pDestMemory, &Count, &dwExpectedErrorCode);
if (argc > 1)
{
Trace("Preparing to write to %u bytes @ %u ('%s')\n",
Count, pDestMemory, incomingCMDBuffer);
}
/* compose some data to write to the client process */
if (!(pSrcMemory = (char*)malloc(Count)))
{
Trace("could not dynamically allocate memory to copy from "
"for reasons %u & %u\n",
errno, GetLastError());
success = FALSE;
goto doneIteration;
}
memset(pSrcMemory, nextValue, Count);
/* do the work */
dwRet = WriteProcessMemory(pi.hProcess,
pDestMemory,
pSrcMemory,
Count,
&wpmCount);
if(dwRet != 0)
{
Trace("ERROR: Situation: '%s', return code: %u, bytes 'written': %u\n",
incomingCMDBuffer, dwRet, wpmCount);
Trace("ERROR: WriteProcessMemory did not fail as it should, as "
"it attempted to write to a range of memory which was "
"not completely accessible.\n");
success = FALSE;
}
if(GetLastError() != dwExpectedErrorCode)
{
Trace("ERROR: GetLastError() should have returned "
"%u , but instead it returned %u.\n",
dwExpectedErrorCode, GetLastError());
success = FALSE;
}
free(pSrcMemory);
doneIteration:
PEDANTIC(ResetEvent, (hEvFromHelper));
PEDANTIC(SetEvent, (hEvToHelper));
}
/* wait for the child process to complete */
WaitForSingleObject ( pi.hProcess, TIMEOUT );
/* this may return a failure code on a success path */
/* check the exit code from the process */
if( ! GetExitCodeProcess( pi.hProcess, &dwExitCode ) )
{
Trace( "GetExitCodeProcess call failed with error code %u\n",
GetLastError() );
dwExitCode = FAIL;
}
if(!success)
{
dwExitCode = FAIL;
}
PEDANTIC(CloseHandle, (hEvToHelper));
PEDANTIC(CloseHandle, (hEvFromHelper));
PEDANTIC(CloseHandle, (pi.hThread));
PEDANTIC(CloseHandle, (pi.hProcess));
PAL_TerminateEx(dwExitCode);
return dwExitCode;
}
| -1 |
dotnet/runtime
| 66,407 |
ARM64 - Optimizing a % b operations part 2
|
Addressing part of this issue: https://github.com/dotnet/runtime/issues/34937
**Description**
There are various ways to optimize `%` for integers on ARM64.
`a % b` can be transformed into a specific sequence of instructions for ARM64 if the operation is signed and `b` is a constant with the power of 2.
**Acceptance Criteria**
- [x] Merge https://github.com/dotnet/runtime/pull/65535
- [x] Add Tests
ARM64 diffs based on tests
```diff
- asr w1, w0, #31
- and w1, w1, #15
- add w1, w1, w0
- asr w1, w1, #4
- lsl w1, w1, #4
- sub w0, w0, w1
- ;; bbWeight=1 PerfScore 4.50
+ and w1, w0, #15
+ negs w0, w0
+ and w0, w0, #15
+ csneg w0, w1, w0, mi
+ ;; bbWeight=1 PerfScore 2.00
```
|
TIHan
| 2022-03-09T20:33:02Z | 2022-04-13T20:01:26Z |
3b6a539c7219383ec6181a112465f904fe9e2edb
|
a81f4bc9d53dedefe03f49da53ed198abbf9c467
|
ARM64 - Optimizing a % b operations part 2. Addressing part of this issue: https://github.com/dotnet/runtime/issues/34937
**Description**
There are various ways to optimize `%` for integers on ARM64.
`a % b` can be transformed into a specific sequence of instructions for ARM64 if the operation is signed and `b` is a constant with the power of 2.
**Acceptance Criteria**
- [x] Merge https://github.com/dotnet/runtime/pull/65535
- [x] Add Tests
ARM64 diffs based on tests
```diff
- asr w1, w0, #31
- and w1, w1, #15
- add w1, w1, w0
- asr w1, w1, #4
- lsl w1, w1, #4
- sub w0, w0, w1
- ;; bbWeight=1 PerfScore 4.50
+ and w1, w0, #15
+ negs w0, w0
+ and w0, w0, #15
+ csneg w0, w1, w0, mi
+ ;; bbWeight=1 PerfScore 2.00
```
|
./src/coreclr/inc/stackframe.h
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#ifndef __STACKFRAME_H
#define __STACKFRAME_H
#include "regdisp.h"
struct StackFrame
{
const static UINT_PTR maxVal = (UINT_PTR)(INT_PTR)-1;
StackFrame() : SP(NULL)
{
}
StackFrame(UINT_PTR sp)
{
SP = sp;
}
void Clear()
{
SP = NULL;
}
void SetMaxVal()
{
SP = maxVal;
}
bool IsNull()
{
return (SP == NULL);
}
bool IsMaxVal()
{
return (SP == maxVal);
}
bool operator==(StackFrame sf)
{
return (SP == sf.SP);
}
bool operator!=(StackFrame sf)
{
return (SP != sf.SP);
}
bool operator<(StackFrame sf)
{
return (SP < sf.SP);
}
bool operator<=(StackFrame sf)
{
return (SP <= sf.SP);
}
bool operator>(StackFrame sf)
{
return (SP > sf.SP);
}
bool operator>=(StackFrame sf)
{
return (SP >= sf.SP);
}
static inline StackFrame FromEstablisherFrame(UINT_PTR EstablisherFrame)
{
return StackFrame(EstablisherFrame);
}
static inline StackFrame FromRegDisplay(REGDISPLAY* pRD)
{
return StackFrame(GetRegdisplaySP(pRD));
}
UINT_PTR SP;
};
//---------------------------------------------------------------------------------------
//
// On WIN64, all the stack range tracking done by the Exception Handling (EH) subsystem is based on the
// establisher frame given by the OS. On IA64, the establisher frame is the caller SP and the current BSP.
// On X64, it is the initial SP before any dynamic stack allocation, i.e. it is the SP when a function exits
// the prolog. The EH subsystem uses the same format.
//
// The stackwalker needs to get information from the EH subsystem in order to skip funclets. Unfortunately,
// stackwalking is based on the current SP, i.e. the SP when the control flow leaves a function via a
// function call. Thus, for stack frames with dynamic stack allocations on X64, the SP values used by the
// stackwalker and the EH subsystem don't match.
//
// To work around this problem, we need to somehow bridge the different SP values. We do so by using the
// caller SP instead of the current SP for comparisons during a stackwalk on X64. Creating a new type
// explicitly spells out the important distinction that this is NOT in the same format as the
// OS establisher frame.
//
// Notes:
// In the long term, we should look at merging the two SP formats and have one consistent abstraction.
//
struct CallerStackFrame : StackFrame
{
CallerStackFrame() : StackFrame()
{
}
CallerStackFrame(UINT_PTR sp) : StackFrame(sp)
{
}
#ifdef FEATURE_EH_FUNCLETS
static inline CallerStackFrame FromRegDisplay(REGDISPLAY* pRD)
{
_ASSERTE(pRD->IsCallerSPValid || pRD->IsCallerContextValid);
return CallerStackFrame(GetSP(pRD->pCallerContext));
}
#endif // FEATURE_EH_FUNCLETS
};
#endif // __STACKFRAME_H
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
#ifndef __STACKFRAME_H
#define __STACKFRAME_H
#include "regdisp.h"
struct StackFrame
{
const static UINT_PTR maxVal = (UINT_PTR)(INT_PTR)-1;
StackFrame() : SP(NULL)
{
}
StackFrame(UINT_PTR sp)
{
SP = sp;
}
void Clear()
{
SP = NULL;
}
void SetMaxVal()
{
SP = maxVal;
}
bool IsNull()
{
return (SP == NULL);
}
bool IsMaxVal()
{
return (SP == maxVal);
}
bool operator==(StackFrame sf)
{
return (SP == sf.SP);
}
bool operator!=(StackFrame sf)
{
return (SP != sf.SP);
}
bool operator<(StackFrame sf)
{
return (SP < sf.SP);
}
bool operator<=(StackFrame sf)
{
return (SP <= sf.SP);
}
bool operator>(StackFrame sf)
{
return (SP > sf.SP);
}
bool operator>=(StackFrame sf)
{
return (SP >= sf.SP);
}
static inline StackFrame FromEstablisherFrame(UINT_PTR EstablisherFrame)
{
return StackFrame(EstablisherFrame);
}
static inline StackFrame FromRegDisplay(REGDISPLAY* pRD)
{
return StackFrame(GetRegdisplaySP(pRD));
}
UINT_PTR SP;
};
//---------------------------------------------------------------------------------------
//
// On WIN64, all the stack range tracking done by the Exception Handling (EH) subsystem is based on the
// establisher frame given by the OS. On IA64, the establisher frame is the caller SP and the current BSP.
// On X64, it is the initial SP before any dynamic stack allocation, i.e. it is the SP when a function exits
// the prolog. The EH subsystem uses the same format.
//
// The stackwalker needs to get information from the EH subsystem in order to skip funclets. Unfortunately,
// stackwalking is based on the current SP, i.e. the SP when the control flow leaves a function via a
// function call. Thus, for stack frames with dynamic stack allocations on X64, the SP values used by the
// stackwalker and the EH subsystem don't match.
//
// To work around this problem, we need to somehow bridge the different SP values. We do so by using the
// caller SP instead of the current SP for comparisons during a stackwalk on X64. Creating a new type
// explicitly spells out the important distinction that this is NOT in the same format as the
// OS establisher frame.
//
// Notes:
// In the long term, we should look at merging the two SP formats and have one consistent abstraction.
//
struct CallerStackFrame : StackFrame
{
CallerStackFrame() : StackFrame()
{
}
CallerStackFrame(UINT_PTR sp) : StackFrame(sp)
{
}
#ifdef FEATURE_EH_FUNCLETS
static inline CallerStackFrame FromRegDisplay(REGDISPLAY* pRD)
{
_ASSERTE(pRD->IsCallerSPValid || pRD->IsCallerContextValid);
return CallerStackFrame(GetSP(pRD->pCallerContext));
}
#endif // FEATURE_EH_FUNCLETS
};
#endif // __STACKFRAME_H
| -1 |
dotnet/runtime
| 66,407 |
ARM64 - Optimizing a % b operations part 2
|
Addressing part of this issue: https://github.com/dotnet/runtime/issues/34937
**Description**
There are various ways to optimize `%` for integers on ARM64.
`a % b` can be transformed into a specific sequence of instructions for ARM64 if the operation is signed and `b` is a constant with the power of 2.
**Acceptance Criteria**
- [x] Merge https://github.com/dotnet/runtime/pull/65535
- [x] Add Tests
ARM64 diffs based on tests
```diff
- asr w1, w0, #31
- and w1, w1, #15
- add w1, w1, w0
- asr w1, w1, #4
- lsl w1, w1, #4
- sub w0, w0, w1
- ;; bbWeight=1 PerfScore 4.50
+ and w1, w0, #15
+ negs w0, w0
+ and w0, w0, #15
+ csneg w0, w1, w0, mi
+ ;; bbWeight=1 PerfScore 2.00
```
|
TIHan
| 2022-03-09T20:33:02Z | 2022-04-13T20:01:26Z |
3b6a539c7219383ec6181a112465f904fe9e2edb
|
a81f4bc9d53dedefe03f49da53ed198abbf9c467
|
ARM64 - Optimizing a % b operations part 2. Addressing part of this issue: https://github.com/dotnet/runtime/issues/34937
**Description**
There are various ways to optimize `%` for integers on ARM64.
`a % b` can be transformed into a specific sequence of instructions for ARM64 if the operation is signed and `b` is a constant with the power of 2.
**Acceptance Criteria**
- [x] Merge https://github.com/dotnet/runtime/pull/65535
- [x] Add Tests
ARM64 diffs based on tests
```diff
- asr w1, w0, #31
- and w1, w1, #15
- add w1, w1, w0
- asr w1, w1, #4
- lsl w1, w1, #4
- sub w0, w0, w1
- ;; bbWeight=1 PerfScore 4.50
+ and w1, w0, #15
+ negs w0, w0
+ and w0, w0, #15
+ csneg w0, w1, w0, mi
+ ;; bbWeight=1 PerfScore 2.00
```
|
./src/coreclr/pal/tests/palsuite/c_runtime/wcslen/test1/test1.cpp
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test1.c
**
** Purpose:
** Tests that wcslen correctly returns the length (in wide characters,
** not byte) of a wide string
**
**
**==========================================================================*/
#include <palsuite.h>
PALTEST(c_runtime_wcslen_test1_paltest_wcslen_test1, "c_runtime/wcslen/test1/paltest_wcslen_test1")
{
WCHAR str1[] = {'f','o','o',' ',0};
WCHAR str2[] = {0};
int ret;
if (PAL_Initialize(argc, argv))
{
return FAIL;
}
ret = wcslen(str1);
if (ret != 4)
{
Fail("ERROR: Expected wcslen of \"foo \" to be 4, got %d\n", ret);
}
ret = wcslen(str2);
if (ret != 0)
{
Fail("ERROR: Expected wcslen of \"\" to be 0, got %d\n", ret);
}
PAL_Terminate();
return PASS;
}
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/*============================================================================
**
** Source: test1.c
**
** Purpose:
** Tests that wcslen correctly returns the length (in wide characters,
** not byte) of a wide string
**
**
**==========================================================================*/
#include <palsuite.h>
PALTEST(c_runtime_wcslen_test1_paltest_wcslen_test1, "c_runtime/wcslen/test1/paltest_wcslen_test1")
{
WCHAR str1[] = {'f','o','o',' ',0};
WCHAR str2[] = {0};
int ret;
if (PAL_Initialize(argc, argv))
{
return FAIL;
}
ret = wcslen(str1);
if (ret != 4)
{
Fail("ERROR: Expected wcslen of \"foo \" to be 4, got %d\n", ret);
}
ret = wcslen(str2);
if (ret != 0)
{
Fail("ERROR: Expected wcslen of \"\" to be 0, got %d\n", ret);
}
PAL_Terminate();
return PASS;
}
| -1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.